in_source_id
stringlengths
13
58
issue
stringlengths
3
241k
before_files
listlengths
0
3
after_files
listlengths
0
3
pr_diff
stringlengths
109
107M
netbox-community__netbox-14828
Only one event rule triggers for a content type ### Deployment Type Self-hosted ### NetBox Version v3.7.0 ### Python Version 3.11 ### Steps to Reproduce Setup: 1. Create a webhook: Name = Test, URL = http://127.0.0.1:9000 2. Create event rule 1: - Name = Rule 1 - Content types = Prefix - select Updates - Condition = `{ "and": [{"attr": "status.value", "value": "deprecated"}]}` - Action type = Webhook - Webhook = Test 3. Create event rule 2: - Name = Rule 2 - Content types = Prefix - select Updates - Condition = `{ "and": [{"attr": "status.value", "value": "active"}]}` - Action type = Webhook - Webhook = Test 4. Start webhook receiver (`python manage.py webhook_receiver`), or observe the webhooks happen in some other way (Sorry, couldn't figure out the correct condition syntax without using the "and" operator) Demo: 5. Create a prefix, like 10.1.2.0/24, status = Active (the defaults) 6. Edit the prefix: change its status to **Deprecated** 7. Edit the prefix again: change its status to **Active** ### Expected Behavior Webhook is run **twice**: first when prefix status was changed to **Deprecated** (step 6), second when changed to **Active** again (step 7). ### Observed Behavior Webhook is run **only once**, that's in step 6, but not in step 7. Additionally: If Rule 1 is disabled, and steps 6 and 7 are executed again, now the webhook is run in step 7. Looks like only the first enabled event rule is run for a specific object type.
[ { "content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.utils import timezone\nfrom django.utils.module_loading import import_string\nfrom django_rq import get_queue\n\nfrom core.models import Job\nfrom netbox.config import get_config\nfrom netbox.constants import RQ_QUEUE_DEFAULT\nfrom netbox.registry import registry\nfrom utilities.api import get_serializer_for_model\nfrom utilities.rqworker import get_rq_retry\nfrom utilities.utils import serialize_object\nfrom .choices import *\nfrom .models import EventRule, ScriptModule\n\nlogger = logging.getLogger('netbox.events_processor')\n\n\ndef serialize_for_event(instance):\n \"\"\"\n Return a serialized representation of the given instance suitable for use in a queued event.\n \"\"\"\n serializer_class = get_serializer_for_model(instance.__class__)\n serializer_context = {\n 'request': None,\n }\n serializer = serializer_class(instance, context=serializer_context)\n\n return serializer.data\n\n\ndef get_snapshots(instance, action):\n snapshots = {\n 'prechange': getattr(instance, '_prechange_snapshot', None),\n 'postchange': None,\n }\n if action != ObjectChangeActionChoices.ACTION_DELETE:\n # Use model's serialize_object() method if defined; fall back to serialize_object() utility function\n if hasattr(instance, 'serialize_object'):\n snapshots['postchange'] = instance.serialize_object()\n else:\n snapshots['postchange'] = serialize_object(instance)\n\n return snapshots\n\n\ndef enqueue_object(queue, instance, user, request_id, action):\n \"\"\"\n Enqueue a serialized representation of a created/updated/deleted object for the processing of\n events once the request has completed.\n \"\"\"\n # Determine whether this type of object supports event rules\n app_label = instance._meta.app_label\n model_name = instance._meta.model_name\n if model_name not in registry['model_features']['event_rules'].get(app_label, []):\n return\n\n queue.append({\n 'content_type': ContentType.objects.get_for_model(instance),\n 'object_id': instance.pk,\n 'event': action,\n 'data': serialize_for_event(instance),\n 'snapshots': get_snapshots(instance, action),\n 'username': user.username,\n 'request_id': request_id\n })\n\n\ndef process_event_rules(event_rules, model_name, event, data, username, snapshots=None, request_id=None):\n try:\n user = get_user_model().objects.get(username=username)\n except ObjectDoesNotExist:\n user = None\n\n for event_rule in event_rules:\n\n # Evaluate event rule conditions (if any)\n if not event_rule.eval_conditions(data):\n return\n\n # Webhooks\n if event_rule.action_type == EventRuleActionChoices.WEBHOOK:\n\n # Select the appropriate RQ queue\n queue_name = get_config().QUEUE_MAPPINGS.get('webhook', RQ_QUEUE_DEFAULT)\n rq_queue = get_queue(queue_name)\n\n # Compile the task parameters\n params = {\n \"event_rule\": event_rule,\n \"model_name\": model_name,\n \"event\": event,\n \"data\": data,\n \"snapshots\": snapshots,\n \"timestamp\": timezone.now().isoformat(),\n \"username\": username,\n \"retry\": get_rq_retry()\n }\n if snapshots:\n params[\"snapshots\"] = snapshots\n if request_id:\n params[\"request_id\"] = request_id\n\n # Enqueue the task\n rq_queue.enqueue(\n \"extras.webhooks.send_webhook\",\n **params\n )\n\n # Scripts\n elif event_rule.action_type == EventRuleActionChoices.SCRIPT:\n # Resolve the script from action parameters\n script_module = event_rule.action_object\n script_name = event_rule.action_parameters['script_name']\n script = script_module.scripts[script_name]()\n\n # Enqueue a Job to record the script's execution\n Job.enqueue(\n \"extras.scripts.run_script\",\n instance=script_module,\n name=script.class_name,\n user=user,\n data=data\n )\n\n else:\n raise ValueError(f\"Unknown action type for an event rule: {event_rule.action_type}\")\n\n\ndef process_event_queue(events):\n \"\"\"\n Flush a list of object representation to RQ for EventRule processing.\n \"\"\"\n events_cache = {\n 'type_create': {},\n 'type_update': {},\n 'type_delete': {},\n }\n\n for data in events:\n action_flag = {\n ObjectChangeActionChoices.ACTION_CREATE: 'type_create',\n ObjectChangeActionChoices.ACTION_UPDATE: 'type_update',\n ObjectChangeActionChoices.ACTION_DELETE: 'type_delete',\n }[data['event']]\n content_type = data['content_type']\n\n # Cache applicable Event Rules\n if content_type not in events_cache[action_flag]:\n events_cache[action_flag][content_type] = EventRule.objects.filter(\n **{action_flag: True},\n content_types=content_type,\n enabled=True\n )\n event_rules = events_cache[action_flag][content_type]\n\n process_event_rules(\n event_rules, content_type.model, data['event'], data['data'], data['username'],\n snapshots=data['snapshots'], request_id=data['request_id']\n )\n\n\ndef flush_events(queue):\n \"\"\"\n Flush a list of object representation to RQ for webhook processing.\n \"\"\"\n if queue:\n for name in settings.EVENTS_PIPELINE:\n try:\n func = import_string(name)\n func(queue)\n except Exception as e:\n logger.error(f\"Cannot import events pipeline {name} error: {e}\")\n", "path": "netbox/extras/events.py" } ]
[ { "content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.utils import timezone\nfrom django.utils.module_loading import import_string\nfrom django_rq import get_queue\n\nfrom core.models import Job\nfrom netbox.config import get_config\nfrom netbox.constants import RQ_QUEUE_DEFAULT\nfrom netbox.registry import registry\nfrom utilities.api import get_serializer_for_model\nfrom utilities.rqworker import get_rq_retry\nfrom utilities.utils import serialize_object\nfrom .choices import *\nfrom .models import EventRule, ScriptModule\n\nlogger = logging.getLogger('netbox.events_processor')\n\n\ndef serialize_for_event(instance):\n \"\"\"\n Return a serialized representation of the given instance suitable for use in a queued event.\n \"\"\"\n serializer_class = get_serializer_for_model(instance.__class__)\n serializer_context = {\n 'request': None,\n }\n serializer = serializer_class(instance, context=serializer_context)\n\n return serializer.data\n\n\ndef get_snapshots(instance, action):\n snapshots = {\n 'prechange': getattr(instance, '_prechange_snapshot', None),\n 'postchange': None,\n }\n if action != ObjectChangeActionChoices.ACTION_DELETE:\n # Use model's serialize_object() method if defined; fall back to serialize_object() utility function\n if hasattr(instance, 'serialize_object'):\n snapshots['postchange'] = instance.serialize_object()\n else:\n snapshots['postchange'] = serialize_object(instance)\n\n return snapshots\n\n\ndef enqueue_object(queue, instance, user, request_id, action):\n \"\"\"\n Enqueue a serialized representation of a created/updated/deleted object for the processing of\n events once the request has completed.\n \"\"\"\n # Determine whether this type of object supports event rules\n app_label = instance._meta.app_label\n model_name = instance._meta.model_name\n if model_name not in registry['model_features']['event_rules'].get(app_label, []):\n return\n\n queue.append({\n 'content_type': ContentType.objects.get_for_model(instance),\n 'object_id': instance.pk,\n 'event': action,\n 'data': serialize_for_event(instance),\n 'snapshots': get_snapshots(instance, action),\n 'username': user.username,\n 'request_id': request_id\n })\n\n\ndef process_event_rules(event_rules, model_name, event, data, username, snapshots=None, request_id=None):\n try:\n user = get_user_model().objects.get(username=username)\n except ObjectDoesNotExist:\n user = None\n\n for event_rule in event_rules:\n\n # Evaluate event rule conditions (if any)\n if not event_rule.eval_conditions(data):\n continue\n\n # Webhooks\n if event_rule.action_type == EventRuleActionChoices.WEBHOOK:\n\n # Select the appropriate RQ queue\n queue_name = get_config().QUEUE_MAPPINGS.get('webhook', RQ_QUEUE_DEFAULT)\n rq_queue = get_queue(queue_name)\n\n # Compile the task parameters\n params = {\n \"event_rule\": event_rule,\n \"model_name\": model_name,\n \"event\": event,\n \"data\": data,\n \"snapshots\": snapshots,\n \"timestamp\": timezone.now().isoformat(),\n \"username\": username,\n \"retry\": get_rq_retry()\n }\n if snapshots:\n params[\"snapshots\"] = snapshots\n if request_id:\n params[\"request_id\"] = request_id\n\n # Enqueue the task\n rq_queue.enqueue(\n \"extras.webhooks.send_webhook\",\n **params\n )\n\n # Scripts\n elif event_rule.action_type == EventRuleActionChoices.SCRIPT:\n # Resolve the script from action parameters\n script_module = event_rule.action_object\n script_name = event_rule.action_parameters['script_name']\n script = script_module.scripts[script_name]()\n\n # Enqueue a Job to record the script's execution\n Job.enqueue(\n \"extras.scripts.run_script\",\n instance=script_module,\n name=script.class_name,\n user=user,\n data=data\n )\n\n else:\n raise ValueError(f\"Unknown action type for an event rule: {event_rule.action_type}\")\n\n\ndef process_event_queue(events):\n \"\"\"\n Flush a list of object representation to RQ for EventRule processing.\n \"\"\"\n events_cache = {\n 'type_create': {},\n 'type_update': {},\n 'type_delete': {},\n }\n\n for data in events:\n action_flag = {\n ObjectChangeActionChoices.ACTION_CREATE: 'type_create',\n ObjectChangeActionChoices.ACTION_UPDATE: 'type_update',\n ObjectChangeActionChoices.ACTION_DELETE: 'type_delete',\n }[data['event']]\n content_type = data['content_type']\n\n # Cache applicable Event Rules\n if content_type not in events_cache[action_flag]:\n events_cache[action_flag][content_type] = EventRule.objects.filter(\n **{action_flag: True},\n content_types=content_type,\n enabled=True\n )\n event_rules = events_cache[action_flag][content_type]\n\n process_event_rules(\n event_rules, content_type.model, data['event'], data['data'], data['username'],\n snapshots=data['snapshots'], request_id=data['request_id']\n )\n\n\ndef flush_events(queue):\n \"\"\"\n Flush a list of object representation to RQ for webhook processing.\n \"\"\"\n if queue:\n for name in settings.EVENTS_PIPELINE:\n try:\n func = import_string(name)\n func(queue)\n except Exception as e:\n logger.error(f\"Cannot import events pipeline {name} error: {e}\")\n", "path": "netbox/extras/events.py" } ]
diff --git a/netbox/extras/events.py b/netbox/extras/events.py index 6d0654929fc..90cca83cd07 100644 --- a/netbox/extras/events.py +++ b/netbox/extras/events.py @@ -81,7 +81,7 @@ def process_event_rules(event_rules, model_name, event, data, username, snapshot # Evaluate event rule conditions (if any) if not event_rule.eval_conditions(data): - return + continue # Webhooks if event_rule.action_type == EventRuleActionChoices.WEBHOOK:
deeppavlov__DeepPavlov-79
What is "'Chainer' object has no attribute 'infer' 2018-03-04 14:09:23,638 (util.py:64 WorkerThread2) ERROR - TeleBot: "AttributeError occurred, args=("'Chainer' object has no attribute 'infer'",) Traceback (most recent call last): File "/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/util.py", line 58, in run task(*args, **kwargs) File "/Users/developer/Project/DeepPavlov/telegram_utils/telegram_ui.py", line 48, in handle_inference pred = model.infer(context) AttributeError: 'Chainer' object has no attribute 'infer' " 2018-03-04 14:09:23.638 ERROR in 'TeleBot'['util'] at line 64: AttributeError occurred, args=("'Chainer' object has no attribute 'infer'",) Traceback (most recent call last): File "/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/util.py", line 58, in run task(*args, **kwargs) File "/Users/developer/Project/DeepPavlov/telegram_utils/telegram_ui.py", line 48, in handle_inference pred = model.infer(context) AttributeError: 'Chainer' object has no attribute 'infer' Traceback (most recent call last): File "deep.py", line 60, in <module> main() File "deep.py", line 56, in main interact_model_by_telegram(pipeline_config_path, token) File "/Users/developer/Project/DeepPavlov/telegram_utils/telegram_ui.py", line 58, in interact_model_by_telegram init_bot_for_model(token, model) File "/Users/developer/Project/DeepPavlov/telegram_utils/telegram_ui.py", line 52, in init_bot_for_model bot.polling() File "/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/__init__.py", line 264, in polling self.__threaded_polling(none_stop, interval, timeout) File "/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/__init__.py", line 288, in __threaded_polling self.worker_pool.raise_exceptions() File "/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/util.py", line 107, in raise_exceptions six.reraise(self.exc_info[0], self.exc_info[1], self.exc_info[2]) File "/Users/developer/DeepPavlov/lib/python3.6/site-packages/six.py", line 693, in reraise raise value File "/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/util.py", line 58, in run task(*args, **kwargs) File "/Users/developer/Project/DeepPavlov/telegram_utils/telegram_ui.py", line 48, in handle_inference pred = model.infer(context) AttributeError: 'Chainer' object has no attribute 'infer' Telegram interface bug Alexander Seliverstov, [04.03.18 15:20] /start jhfirufoiug_bot, [04.03.18 15:20] Welcome to DeepPavlov inference bot! Alexander Seliverstov, [04.03.18 15:20] Hi jhfirufoiug_bot, [04.03.18 15:20] ['Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?'] Alexander Seliverstov, [04.03.18 15:20] I want cheap russian food jhfirufoiug_bot, [04.03.18 15:20] ['Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?', 'Hello, welcome to the Cambridge restaurant system. You can ask for restaurants by area, price range or food type. How may I help you?']
[ { "content": "\"\"\"\nCopyright 2017 Neural Networks and Deep Learning lab, MIPT\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport telebot\n\nfrom deeppavlov.core.common.file import read_json\nfrom deeppavlov.core.commands.infer import build_model_from_config\n\n\ndef init_bot_for_model(token, model):\n bot = telebot.TeleBot(token)\n\n model_name = type(model).__name__\n models_info = read_json('../telegram_utils/models_info.json')\n model_info = models_info[model_name] if model_name in models_info else models_info['@default']\n\n @bot.message_handler(commands=['start'])\n def send_start_message(message):\n chat_id = message.chat.id\n out_message = model_info['start_message']\n if hasattr(model, 'reset'):\n model.reset()\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler(commands=['help'])\n def send_help_message(message):\n chat_id = message.chat.id\n out_message = model_info['help_message']\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler()\n def handle_inference(message):\n chat_id = message.chat.id\n context = message.text\n\n pred = model.infer(context)\n reply_message = str(pred)\n bot.send_message(chat_id, reply_message)\n\n bot.polling()\n\n\ndef interact_model_by_telegram(config_path, token):\n config = read_json(config_path)\n model = build_model_from_config(config)\n init_bot_for_model(token, model)\n", "path": "telegram_utils/telegram_ui.py" } ]
[ { "content": "\"\"\"\nCopyright 2017 Neural Networks and Deep Learning lab, MIPT\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport telebot\n\nfrom deeppavlov.core.common.file import read_json\nfrom deeppavlov.core.commands.infer import build_model_from_config\n\n\ndef init_bot_for_model(token, model):\n bot = telebot.TeleBot(token)\n\n model_name = type(model).__name__\n models_info = read_json('../telegram_utils/models_info.json')\n model_info = models_info[model_name] if model_name in models_info else models_info['@default']\n\n @bot.message_handler(commands=['start'])\n def send_start_message(message):\n chat_id = message.chat.id\n out_message = model_info['start_message']\n if hasattr(model, 'reset'):\n model.reset()\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler(commands=['help'])\n def send_help_message(message):\n chat_id = message.chat.id\n out_message = model_info['help_message']\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler()\n def handle_inference(message):\n chat_id = message.chat.id\n context = message.text\n\n pred = model([context])\n reply_message = str(pred[0])\n bot.send_message(chat_id, reply_message)\n\n bot.polling()\n\n\ndef interact_model_by_telegram(config_path, token):\n config = read_json(config_path)\n model = build_model_from_config(config)\n init_bot_for_model(token, model)\n", "path": "telegram_utils/telegram_ui.py" } ]
diff --git a/README.md b/README.md index 300f903e81..85db4b47c3 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ # <center>DeepPavlov</center> ### *We are in a really early Alpha release. You have to be ready for hard adventures.* +### *If you have updated to version 0.0.2 - please re-download all pretrained models* An open-source conversational AI library, built on TensorFlow and Keras, and designed for * NLP and dialog systems research * implementation and evaluation of complex conversational systems @@ -21,7 +22,7 @@ and AI-application developers with: | Component | Description | | --------- | ----------- | -| [Slot filling component](deeppavlov/models/ner/README.md) | is based on neural Named Entity Recognition network and fuzzy Levenshtein search to extract normalized slot values from the text. The NER network component reproduces architecture from the paper [Application of a Hybrid Bi-LSTM-CRF model to the task of Russian Named Entity Recognition](https://arxiv.org/pdf/1709.09686.pdf), which is inspired by LSTM+CRF architecture from https://arxiv.org/pdf/1603.01360.pdf. | +| [Slot filling and NER componenst](deeppavlov/models/ner/README.md) | Based on neural Named Entity Recognition network and fuzzy Levenshtein search to extract normalized slot values from the text. The NER component reproduces architecture from the paper [Application of a Hybrid Bi-LSTM-CRF model to the task of Russian Named Entity Recognition](https://arxiv.org/pdf/1709.09686.pdf), which is inspired by Bi-LSTM+CRF architecture from https://arxiv.org/pdf/1603.01360.pdf. | | [Intent classification component](deeppavlov/models/classifiers/intents/README.md) | Based on shallow-and-wide Convolutional Neural Network architecture from [Kim Y. Convolutional neural networks for sentence classification – 2014](https://arxiv.org/pdf/1408.5882). The model allows multilabel classification of sentences. | | [Automatic spelling correction component](deeppavlov/models/spellers/error_model/README.md) | Based on [An Improved Error Model for Noisy Channel Spelling Correction by Eric Brill and Robert C. Moore](http://www.aclweb.org/anthology/P00-1037) and uses statistics based error model, a static dictionary and an ARPA language model to correct spelling errors. | | **Skill** | | @@ -37,7 +38,7 @@ View video demo of deploy goal-oriented bot and slot-filling model with Telegram * Run goal-oriented bot with Telegram interface: ``` - python deep.py interactbot configs/go_bot/config.json -t <TELEGRAM_TOKEN> + python deep.py interactbot configs/go_bot/gobot_dstc2.json -t <TELEGRAM_TOKEN> ``` * Run goal-oriented bot with console interface: ``` diff --git a/telegram_utils/telegram_ui.py b/telegram_utils/telegram_ui.py index 3841847438..cf86ee8947 100644 --- a/telegram_utils/telegram_ui.py +++ b/telegram_utils/telegram_ui.py @@ -45,8 +45,8 @@ def handle_inference(message): chat_id = message.chat.id context = message.text - pred = model.infer(context) - reply_message = str(pred) + pred = model([context]) + reply_message = str(pred[0]) bot.send_message(chat_id, reply_message) bot.polling()
lutris__lutris-559
Lutris shortcuts broken See: https://forums.lutris.net/t/desktop-shortcut-not-work-for-any-game/456
[ { "content": "import os\nimport re\nimport concurrent.futures\nfrom urllib.parse import urlparse, parse_qsl\n\nfrom lutris import settings\nfrom lutris import api\nfrom lutris.util.log import logger\nfrom lutris.util.http import Request\n\nBANNER = \"banner\"\nICON = \"icon\"\n\n\ndef get_icon_path(game, icon_type):\n if icon_type == BANNER:\n return os.path.join(settings.BANNER_PATH, \"%s.jpg\" % game)\n if icon_type == ICON:\n return os.path.join(settings.ICON_PATH, \"lutris_%s.png\" % game)\n\n\ndef has_icon(game, icon_type):\n if icon_type == BANNER:\n icon_path = get_icon_path(game, BANNER)\n return os.path.exists(icon_path)\n elif icon_type == ICON:\n icon_path = get_icon_path(game, ICON)\n return os.path.exists(icon_path)\n\n\ndef fetch_icons(game_slugs, callback=None):\n no_banners = [slug for slug in game_slugs if not has_icon(slug, BANNER)]\n no_icons = [slug for slug in game_slugs if not has_icon(slug, ICON)]\n\n # Remove duplicate slugs\n missing_media_slugs = list(set(no_banners) | set(no_icons))\n if not missing_media_slugs:\n return\n\n response = api.get_games(game_slugs=missing_media_slugs)\n if not response:\n logger.warning('Unable to get games from API')\n return\n results = response['results']\n while response.get('next'):\n page_match = re.search(r'page=(\\d+)', response['next'])\n if page_match:\n page = page_match.group(1)\n else:\n logger.error(\"No page found in %s\", response['next'])\n break\n response = api.get_games(game_slugs=missing_media_slugs, page=page)\n if not response:\n logger.warning(\"Unable to get response for page %s\", page)\n break\n else:\n results += response.get('results', [])\n\n banner_downloads = []\n icon_downloads = []\n updated_slugs = []\n for game in results:\n if game['slug'] in no_banners:\n banner_url = game['banner_url']\n if banner_url:\n dest_path = get_icon_path(game['slug'], BANNER)\n banner_downloads.append((game['banner_url'], dest_path))\n updated_slugs.append(game['slug'])\n if game['slug'] in no_icons:\n icon_url = game['icon_url']\n if icon_url:\n dest_path = get_icon_path(game['slug'], ICON)\n icon_downloads.append((game['icon_url'], dest_path))\n updated_slugs.append(game['slug'])\n\n updated_slugs = list(set(updated_slugs)) # Deduplicate slugs\n\n downloads = banner_downloads + icon_downloads\n with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:\n for url, dest_path in downloads:\n executor.submit(download_media, url, dest_path)\n\n if updated_slugs and callback:\n callback(updated_slugs)\n\n\ndef download_media(url, dest, overwrite=False):\n if os.path.exists(dest):\n if overwrite:\n os.remove(dest)\n else:\n return\n request = Request(url).get()\n request.write_to_file(dest)\n\n\ndef parse_installer_url(url):\n \"\"\"\n Parses `lutris:` urls, extracting any info necessary to install or run a game.\n \"\"\"\n try:\n parsed_url = urlparse(url, scheme=\"lutris\")\n except:\n return False\n if parsed_url.scheme != \"lutris\":\n return False\n game_slug = parsed_url.path\n if not game_slug:\n return False\n revision = None\n if parsed_url.query:\n query = dict(parse_qsl(parsed_url.query))\n revision = query.get('revision')\n return {\n 'game_slug': game_slug,\n 'revision': revision\n }\n", "path": "lutris/util/resources.py" } ]
[ { "content": "import os\nimport re\nimport concurrent.futures\nfrom urllib.parse import urlparse, parse_qsl\n\nfrom lutris import settings\nfrom lutris import api\nfrom lutris.util.log import logger\nfrom lutris.util.http import Request\n\nBANNER = \"banner\"\nICON = \"icon\"\n\n\ndef get_icon_path(game, icon_type):\n if icon_type == BANNER:\n return os.path.join(settings.BANNER_PATH, \"%s.jpg\" % game)\n if icon_type == ICON:\n return os.path.join(settings.ICON_PATH, \"lutris_%s.png\" % game)\n\n\ndef has_icon(game, icon_type):\n if icon_type == BANNER:\n icon_path = get_icon_path(game, BANNER)\n return os.path.exists(icon_path)\n elif icon_type == ICON:\n icon_path = get_icon_path(game, ICON)\n return os.path.exists(icon_path)\n\n\ndef fetch_icons(game_slugs, callback=None):\n no_banners = [slug for slug in game_slugs if not has_icon(slug, BANNER)]\n no_icons = [slug for slug in game_slugs if not has_icon(slug, ICON)]\n\n # Remove duplicate slugs\n missing_media_slugs = list(set(no_banners) | set(no_icons))\n if not missing_media_slugs:\n return\n\n response = api.get_games(game_slugs=missing_media_slugs)\n if not response:\n logger.warning('Unable to get games from API')\n return\n results = response['results']\n while response.get('next'):\n page_match = re.search(r'page=(\\d+)', response['next'])\n if page_match:\n page = page_match.group(1)\n else:\n logger.error(\"No page found in %s\", response['next'])\n break\n response = api.get_games(game_slugs=missing_media_slugs, page=page)\n if not response:\n logger.warning(\"Unable to get response for page %s\", page)\n break\n else:\n results += response.get('results', [])\n\n banner_downloads = []\n icon_downloads = []\n updated_slugs = []\n for game in results:\n if game['slug'] in no_banners:\n banner_url = game['banner_url']\n if banner_url:\n dest_path = get_icon_path(game['slug'], BANNER)\n banner_downloads.append((game['banner_url'], dest_path))\n updated_slugs.append(game['slug'])\n if game['slug'] in no_icons:\n icon_url = game['icon_url']\n if icon_url:\n dest_path = get_icon_path(game['slug'], ICON)\n icon_downloads.append((game['icon_url'], dest_path))\n updated_slugs.append(game['slug'])\n\n updated_slugs = list(set(updated_slugs)) # Deduplicate slugs\n\n downloads = banner_downloads + icon_downloads\n with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:\n for url, dest_path in downloads:\n executor.submit(download_media, url, dest_path)\n\n if updated_slugs and callback:\n callback(updated_slugs)\n\n\ndef download_media(url, dest, overwrite=False):\n if os.path.exists(dest):\n if overwrite:\n os.remove(dest)\n else:\n return\n request = Request(url).get()\n request.write_to_file(dest)\n\n\ndef parse_installer_url(url):\n \"\"\"\n Parses `lutris:` urls, extracting any info necessary to install or run a game.\n \"\"\"\n try:\n parsed_url = urlparse(url, scheme=\"lutris\")\n except:\n return False\n if parsed_url.scheme != \"lutris\":\n return False\n game_slug = parsed_url.path\n if not game_slug:\n return False\n if game_slug.startswith('lutris:'):\n game_slug = game_slug[7:]\n revision = None\n if parsed_url.query:\n query = dict(parse_qsl(parsed_url.query))\n revision = query.get('revision')\n return {\n 'game_slug': game_slug,\n 'revision': revision\n }\n", "path": "lutris/util/resources.py" } ]
diff --git a/lutris/util/resources.py b/lutris/util/resources.py index 52530544ce..65eb9e40e2 100644 --- a/lutris/util/resources.py +++ b/lutris/util/resources.py @@ -107,6 +107,8 @@ def parse_installer_url(url): game_slug = parsed_url.path if not game_slug: return False + if game_slug.startswith('lutris:'): + game_slug = game_slug[7:] revision = None if parsed_url.query: query = dict(parse_qsl(parsed_url.query))
xonsh__xonsh-1890
No output from scp command While running scp in xonsh, the progress does not showed up: https://asciinema.org/a/322p80uvb0pjyaic2e51iqmhq I'm using version 3f45378
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"Module for caching command & alias names as well as for predicting whether\na command will be able to be run in the background.\n\nA background predictor is a function that accepect a single argument list\nand returns whethere or not the process can be run in the background (returns\nTrue) or must be run the foreground (returns False).\n\"\"\"\nimport os\nimport builtins\nimport argparse\nimport collections\nimport collections.abc as cabc\n\nfrom xonsh.platform import ON_WINDOWS, pathbasename\nfrom xonsh.tools import executables_in\nfrom xonsh.lazyasd import lazyobject\n\n\nclass CommandsCache(cabc.Mapping):\n \"\"\"A lazy cache representing the commands available on the file system.\n The keys are the command names and the values a tuple of (loc, has_alias)\n where loc is either a str pointing to the executable on the file system or\n None (if no executable exists) and has_alias is a boolean flag for whether\n the command has an alias.\n \"\"\"\n\n def __init__(self):\n self._cmds_cache = {}\n self._path_checksum = None\n self._alias_checksum = None\n self._path_mtime = -1\n self.threadable_predictors = default_threadable_predictors()\n\n def __contains__(self, key):\n _ = self.all_commands\n return self.lazyin(key)\n\n def __iter__(self):\n for cmd, (path, is_alias) in self.all_commands.items():\n if ON_WINDOWS and path is not None:\n # All comand keys are stored in uppercase on Windows.\n # This ensures the original command name is returned.\n cmd = pathbasename(path)\n yield cmd\n\n def __len__(self):\n return len(self.all_commands)\n\n def __getitem__(self, key):\n _ = self.all_commands\n return self.lazyget(key)\n\n def is_empty(self):\n \"\"\"Returns whether the cache is populated or not.\"\"\"\n return len(self._cmds_cache) == 0\n\n @staticmethod\n def get_possible_names(name):\n \"\"\"Generates the possible `PATHEXT` extension variants of a given executable\n name on Windows as a list, conserving the ordering in `PATHEXT`.\n Returns a list as `name` being the only item in it on other platforms.\"\"\"\n if ON_WINDOWS:\n pathext = builtins.__xonsh_env__.get('PATHEXT')\n name = name.upper()\n return [\n name + ext\n for ext in ([''] + pathext)\n ]\n else:\n return [name]\n\n @property\n def all_commands(self):\n paths = builtins.__xonsh_env__.get('PATH', [])\n pathset = frozenset(x for x in paths if os.path.isdir(x))\n # did PATH change?\n path_hash = hash(pathset)\n cache_valid = path_hash == self._path_checksum\n self._path_checksum = path_hash\n # did aliases change?\n alss = getattr(builtins, 'aliases', set())\n al_hash = hash(frozenset(alss))\n cache_valid = cache_valid and al_hash == self._alias_checksum\n self._alias_checksum = al_hash\n # did the contents of any directory in PATH change?\n max_mtime = 0\n for path in pathset:\n mtime = os.stat(path).st_mtime\n if mtime > max_mtime:\n max_mtime = mtime\n cache_valid = cache_valid and (max_mtime <= self._path_mtime)\n self._path_mtime = max_mtime\n if cache_valid:\n return self._cmds_cache\n allcmds = {}\n for path in reversed(paths):\n # iterate backwards so that entries at the front of PATH overwrite\n # entries at the back.\n for cmd in executables_in(path):\n key = cmd.upper() if ON_WINDOWS else cmd\n allcmds[key] = (os.path.join(path, cmd), cmd in alss)\n for cmd in alss:\n if cmd not in allcmds:\n key = cmd.upper() if ON_WINDOWS else cmd\n allcmds[key] = (cmd, True)\n self._cmds_cache = allcmds\n return allcmds\n\n def cached_name(self, name):\n \"\"\"Returns the name that would appear in the cache, if it was exists.\"\"\"\n if name is None:\n return None\n cached = pathbasename(name)\n if ON_WINDOWS:\n keys = self.get_possible_names(cached)\n cached = next((k for k in keys if k in self._cmds_cache), None)\n return cached\n\n def lazyin(self, key):\n \"\"\"Checks if the value is in the current cache without the potential to\n update the cache. It just says whether the value is known *now*. This\n may not reflect precisely what is on the $PATH.\n \"\"\"\n return self.cached_name(key) in self._cmds_cache\n\n def lazyiter(self):\n \"\"\"Returns an iterator over the current cache contents without the\n potential to update the cache. This may not reflect what is on the\n $PATH.\n \"\"\"\n return iter(self._cmds_cache)\n\n def lazylen(self):\n \"\"\"Returns the length of the current cache contents without the\n potential to update the cache. This may not reflect precisely\n what is on the $PATH.\n \"\"\"\n return len(self._cmds_cache)\n\n def lazyget(self, key, default=None):\n \"\"\"A lazy value getter.\"\"\"\n return self._cmds_cache.get(self.cached_name(key), default)\n\n def locate_binary(self, name):\n \"\"\"Locates an executable on the file system using the cache.\"\"\"\n # make sure the cache is up to date by accessing the property\n _ = self.all_commands\n return self.lazy_locate_binary(name)\n\n def lazy_locate_binary(self, name):\n \"\"\"Locates an executable in the cache, without checking its validity.\"\"\"\n possibilities = self.get_possible_names(name)\n if ON_WINDOWS:\n # Windows users expect to be able to execute files in the same\n # directory without `./`\n local_bin = next((fn for fn in possibilities if os.path.isfile(fn)),\n None)\n if local_bin:\n return os.path.abspath(local_bin)\n cached = next((cmd for cmd in possibilities if cmd in self._cmds_cache),\n None)\n if cached:\n (path, is_alias) = self._cmds_cache[cached]\n return path if not is_alias else None\n elif os.path.isfile(name) and name != pathbasename(name):\n return name\n\n def predict_threadable(self, cmd):\n \"\"\"Predicts whether a command list is able to be run on a background\n thread, rather than the main thread.\n \"\"\"\n name = self.cached_name(cmd[0])\n if ON_WINDOWS:\n # On all names (keys) are stored in upper case so instead\n # we get the original cmd or alias name\n path, _ = self.lazyget(name, (None, None))\n if path is None:\n return True\n else:\n name = pathbasename(path)\n predictor = self.threadable_predictors[name]\n return predictor(cmd[1:])\n\n#\n# Background Predictors\n#\n\n\ndef predict_true(args):\n \"\"\"Always say the process is threadable.\"\"\"\n return True\n\n\ndef predict_false(args):\n \"\"\"Never say the process is threadable.\"\"\"\n return False\n\n\n@lazyobject\ndef SHELL_PREDICTOR_PARSER():\n p = argparse.ArgumentParser('shell', add_help=False)\n p.add_argument('-c', nargs='?', default=None)\n p.add_argument('filename', nargs='?', default=None)\n return p\n\n\ndef predict_shell(args):\n \"\"\"Precict the backgroundability of the normal shell interface, which\n comes down to whether it is being run in subproc mode.\n \"\"\"\n ns, _ = SHELL_PREDICTOR_PARSER.parse_known_args(args)\n if ns.c is None and ns.filename is None:\n pred = False\n else:\n pred = True\n return pred\n\n\n@lazyobject\ndef HELP_VER_PREDICTOR_PARSER():\n p = argparse.ArgumentParser('cmd', add_help=False)\n p.add_argument('-h', '--help', dest='help',\n action='store_true', default=None)\n p.add_argument('-v', '-V', '--version', dest='version',\n action='store_true', default=None)\n return p\n\n\ndef predict_help_ver(args):\n \"\"\"Precict the backgroundability of commands that have help & version\n switches: -h, --help, -v, -V, --version. If either of these options is\n present, the command is assumed to print to stdout normally and is therefore\n threadable. Otherwise, the command is assumed to not be threadable.\n This is useful for commands, like top, that normally enter alternate mode\n but may not in certain circumstances.\n \"\"\"\n ns, _ = HELP_VER_PREDICTOR_PARSER.parse_known_args(args)\n pred = ns.help is not None or ns.version is not None\n return pred\n\n\ndef default_threadable_predictors():\n \"\"\"Generates a new defaultdict for known threadable predictors.\n The default is to predict true.\n \"\"\"\n # alphabetical, for what it is worth.\n predictors = {\n 'bash': predict_shell,\n 'csh': predict_shell,\n 'clear': predict_false,\n 'clear.exe': predict_false,\n 'cls': predict_false,\n 'cmd': predict_shell,\n 'fish': predict_shell,\n 'htop': predict_help_ver,\n 'ksh': predict_shell,\n 'less': predict_help_ver,\n 'man': predict_help_ver,\n 'more': predict_help_ver,\n 'sh': predict_shell,\n 'ssh': predict_false,\n 'startx': predict_false,\n 'sudo': predict_help_ver,\n 'tcsh': predict_shell,\n 'top': predict_help_ver,\n 'vi': predict_false,\n 'vim': predict_false,\n 'vimpager': predict_help_ver,\n 'xo': predict_help_ver,\n 'xonsh': predict_shell,\n 'zsh': predict_shell,\n }\n return collections.defaultdict(lambda: predict_true, predictors)\n", "path": "xonsh/commands_cache.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"Module for caching command & alias names as well as for predicting whether\na command will be able to be run in the background.\n\nA background predictor is a function that accepect a single argument list\nand returns whethere or not the process can be run in the background (returns\nTrue) or must be run the foreground (returns False).\n\"\"\"\nimport os\nimport builtins\nimport argparse\nimport collections\nimport collections.abc as cabc\n\nfrom xonsh.platform import ON_WINDOWS, pathbasename\nfrom xonsh.tools import executables_in\nfrom xonsh.lazyasd import lazyobject\n\n\nclass CommandsCache(cabc.Mapping):\n \"\"\"A lazy cache representing the commands available on the file system.\n The keys are the command names and the values a tuple of (loc, has_alias)\n where loc is either a str pointing to the executable on the file system or\n None (if no executable exists) and has_alias is a boolean flag for whether\n the command has an alias.\n \"\"\"\n\n def __init__(self):\n self._cmds_cache = {}\n self._path_checksum = None\n self._alias_checksum = None\n self._path_mtime = -1\n self.threadable_predictors = default_threadable_predictors()\n\n def __contains__(self, key):\n _ = self.all_commands\n return self.lazyin(key)\n\n def __iter__(self):\n for cmd, (path, is_alias) in self.all_commands.items():\n if ON_WINDOWS and path is not None:\n # All comand keys are stored in uppercase on Windows.\n # This ensures the original command name is returned.\n cmd = pathbasename(path)\n yield cmd\n\n def __len__(self):\n return len(self.all_commands)\n\n def __getitem__(self, key):\n _ = self.all_commands\n return self.lazyget(key)\n\n def is_empty(self):\n \"\"\"Returns whether the cache is populated or not.\"\"\"\n return len(self._cmds_cache) == 0\n\n @staticmethod\n def get_possible_names(name):\n \"\"\"Generates the possible `PATHEXT` extension variants of a given executable\n name on Windows as a list, conserving the ordering in `PATHEXT`.\n Returns a list as `name` being the only item in it on other platforms.\"\"\"\n if ON_WINDOWS:\n pathext = builtins.__xonsh_env__.get('PATHEXT')\n name = name.upper()\n return [\n name + ext\n for ext in ([''] + pathext)\n ]\n else:\n return [name]\n\n @property\n def all_commands(self):\n paths = builtins.__xonsh_env__.get('PATH', [])\n pathset = frozenset(x for x in paths if os.path.isdir(x))\n # did PATH change?\n path_hash = hash(pathset)\n cache_valid = path_hash == self._path_checksum\n self._path_checksum = path_hash\n # did aliases change?\n alss = getattr(builtins, 'aliases', set())\n al_hash = hash(frozenset(alss))\n cache_valid = cache_valid and al_hash == self._alias_checksum\n self._alias_checksum = al_hash\n # did the contents of any directory in PATH change?\n max_mtime = 0\n for path in pathset:\n mtime = os.stat(path).st_mtime\n if mtime > max_mtime:\n max_mtime = mtime\n cache_valid = cache_valid and (max_mtime <= self._path_mtime)\n self._path_mtime = max_mtime\n if cache_valid:\n return self._cmds_cache\n allcmds = {}\n for path in reversed(paths):\n # iterate backwards so that entries at the front of PATH overwrite\n # entries at the back.\n for cmd in executables_in(path):\n key = cmd.upper() if ON_WINDOWS else cmd\n allcmds[key] = (os.path.join(path, cmd), cmd in alss)\n for cmd in alss:\n if cmd not in allcmds:\n key = cmd.upper() if ON_WINDOWS else cmd\n allcmds[key] = (cmd, True)\n self._cmds_cache = allcmds\n return allcmds\n\n def cached_name(self, name):\n \"\"\"Returns the name that would appear in the cache, if it was exists.\"\"\"\n if name is None:\n return None\n cached = pathbasename(name)\n if ON_WINDOWS:\n keys = self.get_possible_names(cached)\n cached = next((k for k in keys if k in self._cmds_cache), None)\n return cached\n\n def lazyin(self, key):\n \"\"\"Checks if the value is in the current cache without the potential to\n update the cache. It just says whether the value is known *now*. This\n may not reflect precisely what is on the $PATH.\n \"\"\"\n return self.cached_name(key) in self._cmds_cache\n\n def lazyiter(self):\n \"\"\"Returns an iterator over the current cache contents without the\n potential to update the cache. This may not reflect what is on the\n $PATH.\n \"\"\"\n return iter(self._cmds_cache)\n\n def lazylen(self):\n \"\"\"Returns the length of the current cache contents without the\n potential to update the cache. This may not reflect precisely\n what is on the $PATH.\n \"\"\"\n return len(self._cmds_cache)\n\n def lazyget(self, key, default=None):\n \"\"\"A lazy value getter.\"\"\"\n return self._cmds_cache.get(self.cached_name(key), default)\n\n def locate_binary(self, name):\n \"\"\"Locates an executable on the file system using the cache.\"\"\"\n # make sure the cache is up to date by accessing the property\n _ = self.all_commands\n return self.lazy_locate_binary(name)\n\n def lazy_locate_binary(self, name):\n \"\"\"Locates an executable in the cache, without checking its validity.\"\"\"\n possibilities = self.get_possible_names(name)\n if ON_WINDOWS:\n # Windows users expect to be able to execute files in the same\n # directory without `./`\n local_bin = next((fn for fn in possibilities if os.path.isfile(fn)),\n None)\n if local_bin:\n return os.path.abspath(local_bin)\n cached = next((cmd for cmd in possibilities if cmd in self._cmds_cache),\n None)\n if cached:\n (path, is_alias) = self._cmds_cache[cached]\n return path if not is_alias else None\n elif os.path.isfile(name) and name != pathbasename(name):\n return name\n\n def predict_threadable(self, cmd):\n \"\"\"Predicts whether a command list is able to be run on a background\n thread, rather than the main thread.\n \"\"\"\n name = self.cached_name(cmd[0])\n if ON_WINDOWS:\n # On all names (keys) are stored in upper case so instead\n # we get the original cmd or alias name\n path, _ = self.lazyget(name, (None, None))\n if path is None:\n return True\n else:\n name = pathbasename(path)\n predictor = self.threadable_predictors[name]\n return predictor(cmd[1:])\n\n#\n# Background Predictors\n#\n\n\ndef predict_true(args):\n \"\"\"Always say the process is threadable.\"\"\"\n return True\n\n\ndef predict_false(args):\n \"\"\"Never say the process is threadable.\"\"\"\n return False\n\n\n@lazyobject\ndef SHELL_PREDICTOR_PARSER():\n p = argparse.ArgumentParser('shell', add_help=False)\n p.add_argument('-c', nargs='?', default=None)\n p.add_argument('filename', nargs='?', default=None)\n return p\n\n\ndef predict_shell(args):\n \"\"\"Precict the backgroundability of the normal shell interface, which\n comes down to whether it is being run in subproc mode.\n \"\"\"\n ns, _ = SHELL_PREDICTOR_PARSER.parse_known_args(args)\n if ns.c is None and ns.filename is None:\n pred = False\n else:\n pred = True\n return pred\n\n\n@lazyobject\ndef HELP_VER_PREDICTOR_PARSER():\n p = argparse.ArgumentParser('cmd', add_help=False)\n p.add_argument('-h', '--help', dest='help',\n action='store_true', default=None)\n p.add_argument('-v', '-V', '--version', dest='version',\n action='store_true', default=None)\n return p\n\n\ndef predict_help_ver(args):\n \"\"\"Precict the backgroundability of commands that have help & version\n switches: -h, --help, -v, -V, --version. If either of these options is\n present, the command is assumed to print to stdout normally and is therefore\n threadable. Otherwise, the command is assumed to not be threadable.\n This is useful for commands, like top, that normally enter alternate mode\n but may not in certain circumstances.\n \"\"\"\n ns, _ = HELP_VER_PREDICTOR_PARSER.parse_known_args(args)\n pred = ns.help is not None or ns.version is not None\n return pred\n\n\ndef default_threadable_predictors():\n \"\"\"Generates a new defaultdict for known threadable predictors.\n The default is to predict true.\n \"\"\"\n # alphabetical, for what it is worth.\n predictors = {\n 'bash': predict_shell,\n 'csh': predict_shell,\n 'clear': predict_false,\n 'clear.exe': predict_false,\n 'cls': predict_false,\n 'cmd': predict_shell,\n 'fish': predict_shell,\n 'htop': predict_help_ver,\n 'ksh': predict_shell,\n 'less': predict_help_ver,\n 'man': predict_help_ver,\n 'more': predict_help_ver,\n 'scp': predict_false,\n 'sh': predict_shell,\n 'ssh': predict_false,\n 'startx': predict_false,\n 'sudo': predict_help_ver,\n 'tcsh': predict_shell,\n 'top': predict_help_ver,\n 'vi': predict_false,\n 'vim': predict_false,\n 'vimpager': predict_help_ver,\n 'xo': predict_help_ver,\n 'xonsh': predict_shell,\n 'zsh': predict_shell,\n }\n return collections.defaultdict(lambda: predict_true, predictors)\n", "path": "xonsh/commands_cache.py" } ]
diff --git a/news/scp_predict_false.rst b/news/scp_predict_false.rst new file mode 100644 index 0000000000..ac4523f110 --- /dev/null +++ b/news/scp_predict_false.rst @@ -0,0 +1,13 @@ +**Added:** None + +**Changed:** None + +**Deprecated:** None + +**Removed:** None + +**Fixed:** + +* Fix ``scp`` progress not being outputted + +**Security:** None diff --git a/xonsh/commands_cache.py b/xonsh/commands_cache.py index 92176c23a6..0dee3febdf 100644 --- a/xonsh/commands_cache.py +++ b/xonsh/commands_cache.py @@ -258,6 +258,7 @@ def default_threadable_predictors(): 'less': predict_help_ver, 'man': predict_help_ver, 'more': predict_help_ver, + 'scp': predict_false, 'sh': predict_shell, 'ssh': predict_false, 'startx': predict_false,
geopandas__geopandas-785
VIS/PERF: only flatten geometries when there are actually multi-geometries Currently we always loop through all geometries in `_flatten_multi_geoms` when plotting geometries, even when there are no Multi-Point/LineString/Polygons. A check in advance if there are multi-geometries, and thus to know whether the flattening is needed, would increase the performance of the plotting of many single geometries.
[ { "content": "from __future__ import print_function\nfrom distutils.version import LooseVersion\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\ndef _flatten_multi_geoms(geoms, colors=None):\n \"\"\"\n Returns Series like geoms and colors, except that any Multi geometries\n are split into their components and colors are repeated for all component\n in the same Multi geometry. Maintains 1:1 matching of geometry to color.\n Passing `color` is optional, and when no `color` is passed a list of None\n values is returned as `component_colors`.\n\n \"Colors\" are treated opaquely and so can actually contain any values.\n\n Returns\n -------\n\n components : list of geometry\n\n component_colors : list of whatever type `colors` contains\n \"\"\"\n if colors is None:\n colors = [None] * len(geoms)\n\n components, component_colors = [], []\n\n # precondition, so zip can't short-circuit\n assert len(geoms) == len(colors)\n for geom, color in zip(geoms, colors):\n if geom.type.startswith('Multi'):\n for poly in geom:\n components.append(poly)\n # repeat same color for all components\n component_colors.append(color)\n else:\n components.append(geom)\n component_colors.append(color)\n\n return components, component_colors\n\n\ndef plot_polygon_collection(ax, geoms, values=None, color=None,\n cmap=None, vmin=None, vmax=None, **kwargs):\n \"\"\"\n Plots a collection of Polygon and MultiPolygon geometries to `ax`\n\n Parameters\n ----------\n\n ax : matplotlib.axes.Axes\n where shapes will be plotted\n\n geoms : a sequence of `N` Polygons and/or MultiPolygons (can be mixed)\n\n values : a sequence of `N` values, optional\n Values will be mapped to colors using vmin/vmax/cmap. They should\n have 1:1 correspondence with the geometries (not their components).\n Otherwise follows `color` / `facecolor` kwargs.\n\n edgecolor : single color or sequence of `N` colors\n Color for the edge of the polygons\n\n facecolor : single color or sequence of `N` colors\n Color to fill the polygons. Cannot be used together with `values`.\n\n color : single color or sequence of `N` colors\n Sets both `edgecolor` and `facecolor`\n\n **kwargs\n Additional keyword arguments passed to the collection\n\n Returns\n -------\n\n collection : matplotlib.collections.Collection that was plotted\n \"\"\"\n\n try:\n from descartes.patch import PolygonPatch\n except ImportError:\n raise ImportError(\"The descartes package is required\"\n \" for plotting polygons in geopandas.\")\n from matplotlib.collections import PatchCollection\n\n geoms, values = _flatten_multi_geoms(geoms, values)\n if None in values:\n values = None\n\n # PatchCollection does not accept some kwargs.\n if 'markersize' in kwargs:\n del kwargs['markersize']\n\n # color=None overwrites specified facecolor/edgecolor with default color\n if color is not None:\n kwargs['color'] = color\n\n collection = PatchCollection([PolygonPatch(poly) for poly in geoms],\n **kwargs)\n\n if values is not None:\n collection.set_array(np.asarray(values))\n collection.set_cmap(cmap)\n collection.set_clim(vmin, vmax)\n\n ax.add_collection(collection, autolim=True)\n ax.autoscale_view()\n return collection\n\n\ndef plot_linestring_collection(ax, geoms, values=None, color=None,\n cmap=None, vmin=None, vmax=None, **kwargs):\n \"\"\"\n Plots a collection of LineString and MultiLineString geometries to `ax`\n\n Parameters\n ----------\n\n ax : matplotlib.axes.Axes\n where shapes will be plotted\n\n geoms : a sequence of `N` LineStrings and/or MultiLineStrings (can be\n mixed)\n\n values : a sequence of `N` values, optional\n Values will be mapped to colors using vmin/vmax/cmap. They should\n have 1:1 correspondence with the geometries (not their components).\n\n color : single color or sequence of `N` colors\n Cannot be used together with `values`.\n\n Returns\n -------\n\n collection : matplotlib.collections.Collection that was plotted\n\n \"\"\"\n from matplotlib.collections import LineCollection\n\n geoms, values = _flatten_multi_geoms(geoms, values)\n if None in values:\n values = None\n\n # LineCollection does not accept some kwargs.\n if 'markersize' in kwargs:\n del kwargs['markersize']\n\n # color=None gives black instead of default color cycle\n if color is not None:\n kwargs['color'] = color\n\n segments = [np.array(linestring)[:, :2] for linestring in geoms]\n collection = LineCollection(segments, **kwargs)\n\n if values is not None:\n collection.set_array(np.asarray(values))\n collection.set_cmap(cmap)\n collection.set_clim(vmin, vmax)\n\n ax.add_collection(collection, autolim=True)\n ax.autoscale_view()\n return collection\n\n\ndef plot_point_collection(ax, geoms, values=None, color=None,\n cmap=None, vmin=None, vmax=None,\n marker='o', markersize=None, **kwargs):\n \"\"\"\n Plots a collection of Point and MultiPoint geometries to `ax`\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n where shapes will be plotted\n geoms : sequence of `N` Points or MultiPoints\n\n values : a sequence of `N` values, optional\n Values mapped to colors using vmin, vmax, and cmap.\n Cannot be specified together with `color`.\n markersize : scalar or array-like, optional\n Size of the markers. Note that under the hood ``scatter`` is\n used, so the specified value will be proportional to the\n area of the marker (size in points^2).\n\n Returns\n -------\n collection : matplotlib.collections.Collection that was plotted\n \"\"\"\n if values is not None and color is not None:\n raise ValueError(\"Can only specify one of 'values' and 'color' kwargs\")\n\n geoms, values = _flatten_multi_geoms(geoms, values)\n if None in values:\n values = None\n x = [p.x for p in geoms]\n y = [p.y for p in geoms]\n\n # matplotlib 1.4 does not support c=None, and < 2.0 does not support s=None\n if values is not None:\n kwargs['c'] = values\n if markersize is not None:\n kwargs['s'] = markersize\n\n collection = ax.scatter(x, y, color=color, vmin=vmin, vmax=vmax, cmap=cmap,\n marker=marker, **kwargs)\n return collection\n\n\ndef plot_series(s, cmap=None, color=None, ax=None, figsize=None, **style_kwds):\n \"\"\"\n Plot a GeoSeries.\n\n Generate a plot of a GeoSeries geometry with matplotlib.\n\n Parameters\n ----------\n s : Series\n The GeoSeries to be plotted. Currently Polygon,\n MultiPolygon, LineString, MultiLineString and Point\n geometries can be plotted.\n cmap : str (default None)\n The name of a colormap recognized by matplotlib. Any\n colormap will work, but categorical colormaps are\n generally recommended. Examples of useful discrete\n colormaps include:\n\n tab10, tab20, Accent, Dark2, Paired, Pastel1, Set1, Set2\n\n color : str (default None)\n If specified, all objects will be colored uniformly.\n ax : matplotlib.pyplot.Artist (default None)\n axes on which to draw the plot\n figsize : pair of floats (default None)\n Size of the resulting matplotlib.figure.Figure. If the argument\n ax is given explicitly, figsize is ignored.\n **style_kwds : dict\n Color options to be passed on to the actual plot function, such\n as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,\n ``alpha``.\n\n Returns\n -------\n ax : matplotlib axes instance\n \"\"\"\n if 'colormap' in style_kwds:\n warnings.warn(\"'colormap' is deprecated, please use 'cmap' instead \"\n \"(for consistency with matplotlib)\", FutureWarning)\n cmap = style_kwds.pop('colormap')\n if 'axes' in style_kwds:\n warnings.warn(\"'axes' is deprecated, please use 'ax' instead \"\n \"(for consistency with pandas)\", FutureWarning)\n ax = style_kwds.pop('axes')\n\n import matplotlib.pyplot as plt\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize)\n ax.set_aspect('equal')\n\n if s.empty:\n warnings.warn(\"The GeoSeries you are attempting to plot is \"\n \"empty. Nothing has been displayed.\", UserWarning)\n return ax\n\n # if cmap is specified, create range of colors based on cmap\n values = None\n if cmap is not None:\n values = np.arange(len(s))\n if hasattr(cmap, 'N'):\n values = values % cmap.N\n style_kwds['vmin'] = style_kwds.get('vmin', values.min())\n style_kwds['vmax'] = style_kwds.get('vmax', values.max())\n\n geom_types = s.geometry.type\n poly_idx = np.asarray((geom_types == 'Polygon')\n | (geom_types == 'MultiPolygon'))\n line_idx = np.asarray((geom_types == 'LineString')\n | (geom_types == 'MultiLineString'))\n point_idx = np.asarray((geom_types == 'Point')\n | (geom_types == 'MultiPoint'))\n\n # plot all Polygons and all MultiPolygon components in the same collection\n polys = s.geometry[poly_idx]\n\n if not polys.empty:\n # color overrides both face and edgecolor. As we want people to be\n # able to use edgecolor as well, pass color to facecolor\n facecolor = style_kwds.pop('facecolor', None)\n if color is not None:\n facecolor = color\n values_ = values[poly_idx] if cmap else None\n plot_polygon_collection(ax, polys, values_, facecolor=facecolor,\n cmap=cmap, **style_kwds)\n\n # plot all LineStrings and MultiLineString components in same collection\n lines = s.geometry[line_idx]\n if not lines.empty:\n values_ = values[line_idx] if cmap else None\n plot_linestring_collection(ax, lines, values_, color=color, cmap=cmap,\n **style_kwds)\n\n # plot all Points in the same collection\n points = s.geometry[point_idx]\n if not points.empty:\n values_ = values[point_idx] if cmap else None\n plot_point_collection(ax, points, values_, color=color, cmap=cmap,\n **style_kwds)\n\n plt.draw()\n return ax\n\n\ndef plot_dataframe(df, column=None, cmap=None, color=None, ax=None,\n categorical=False, legend=False, scheme=None, k=5,\n vmin=None, vmax=None, markersize=None, figsize=None,\n legend_kwds=None, **style_kwds):\n \"\"\"\n Plot a GeoDataFrame.\n\n Generate a plot of a GeoDataFrame with matplotlib. If a\n column is specified, the plot coloring will be based on values\n in that column.\n\n Parameters\n ----------\n df : GeoDataFrame\n The GeoDataFrame to be plotted. Currently Polygon,\n MultiPolygon, LineString, MultiLineString and Point\n geometries can be plotted.\n column : str, np.array, pd.Series (default None)\n The name of the dataframe column, np.array, or pd.Series to be plotted.\n If np.array or pd.Series are used then it must have same length as\n dataframe. Values are used to color the plot. Ignored if `color` is\n also set.\n cmap : str (default None)\n The name of a colormap recognized by matplotlib.\n color : str (default None)\n If specified, all objects will be colored uniformly.\n ax : matplotlib.pyplot.Artist (default None)\n axes on which to draw the plot\n categorical : bool (default False)\n If False, cmap will reflect numerical values of the\n column being plotted. For non-numerical columns, this\n will be set to True.\n legend : bool (default False)\n Plot a legend. Ignored if no `column` is given, or if `color` is given.\n scheme : str (default None)\n Name of a choropleth classification scheme (requires PySAL).\n A pysal.esda.mapclassify.Map_Classifier object will be used\n under the hood. Supported schemes: 'Equal_interval', 'Quantiles',\n 'Fisher_Jenks'\n k : int (default 5)\n Number of classes (ignored if scheme is None)\n vmin : None or float (default None)\n Minimum value of cmap. If None, the minimum data value\n in the column to be plotted is used.\n vmax : None or float (default None)\n Maximum value of cmap. If None, the maximum data value\n in the column to be plotted is used.\n markersize : str or float or sequence (default None)\n Only applies to point geometries within a frame.\n If a str, will use the values in the column of the frame specified\n by markersize to set the size of markers. Otherwise can be a value\n to apply to all points, or a sequence of the same length as the\n number of points.\n figsize : tuple of integers (default None)\n Size of the resulting matplotlib.figure.Figure. If the argument\n axes is given explicitly, figsize is ignored.\n legend_kwds : dict (default None)\n Keyword arguments to pass to ax.legend()\n\n **style_kwds : dict\n Color options to be passed on to the actual plot function, such\n as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,\n ``alpha``.\n\n Returns\n -------\n ax : matplotlib axes instance\n\n \"\"\"\n if 'colormap' in style_kwds:\n warnings.warn(\"'colormap' is deprecated, please use 'cmap' instead \"\n \"(for consistency with matplotlib)\", FutureWarning)\n cmap = style_kwds.pop('colormap')\n if 'axes' in style_kwds:\n warnings.warn(\"'axes' is deprecated, please use 'ax' instead \"\n \"(for consistency with pandas)\", FutureWarning)\n ax = style_kwds.pop('axes')\n if column is not None and color is not None:\n warnings.warn(\"Only specify one of 'column' or 'color'. Using \"\n \"'color'.\", UserWarning)\n column = None\n\n import matplotlib\n import matplotlib.pyplot as plt\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize)\n ax.set_aspect('equal')\n\n if df.empty:\n warnings.warn(\"The GeoDataFrame you are attempting to plot is \"\n \"empty. Nothing has been displayed.\", UserWarning)\n return ax\n\n if isinstance(markersize, str):\n markersize = df[markersize].values\n\n if column is None:\n return plot_series(df.geometry, cmap=cmap, color=color, ax=ax,\n figsize=figsize, markersize=markersize,\n **style_kwds)\n\n # To accept pd.Series and np.arrays as column\n if isinstance(column, (np.ndarray, pd.Series)):\n if column.shape[0] != df.shape[0]:\n raise ValueError(\"The dataframe and given column have different \"\n \"number of rows.\")\n else:\n values = np.asarray(column)\n else:\n values = np.asarray(df[column])\n\n if values.dtype is np.dtype('O'):\n categorical = True\n\n # Define `values` as a Series\n if categorical:\n if cmap is None:\n if LooseVersion(matplotlib.__version__) >= '2.0.1':\n cmap = 'tab10'\n elif LooseVersion(matplotlib.__version__) >= '2.0.0':\n # Erroneous name.\n cmap = 'Vega10'\n else:\n cmap = 'Set1'\n categories = list(set(values))\n categories.sort()\n valuemap = dict((k, v) for (v, k) in enumerate(categories))\n values = np.array([valuemap[k] for k in values])\n\n if scheme is not None:\n binning = __pysal_choro(values, scheme, k=k)\n # set categorical to True for creating the legend\n categorical = True\n binedges = [values.min()] + binning.bins.tolist()\n categories = ['{0:.2f} - {1:.2f}'.format(binedges[i], binedges[i+1])\n for i in range(len(binedges)-1)]\n values = np.array(binning.yb)\n\n mn = values.min() if vmin is None else vmin\n mx = values.max() if vmax is None else vmax\n\n geom_types = df.geometry.type\n poly_idx = np.asarray((geom_types == 'Polygon')\n | (geom_types == 'MultiPolygon'))\n line_idx = np.asarray((geom_types == 'LineString')\n | (geom_types == 'MultiLineString'))\n point_idx = np.asarray((geom_types == 'Point')\n | (geom_types == 'MultiPoint'))\n\n # plot all Polygons and all MultiPolygon components in the same collection\n polys = df.geometry[poly_idx]\n if not polys.empty:\n plot_polygon_collection(ax, polys, values[poly_idx],\n vmin=mn, vmax=mx, cmap=cmap, **style_kwds)\n\n # plot all LineStrings and MultiLineString components in same collection\n lines = df.geometry[line_idx]\n if not lines.empty:\n plot_linestring_collection(ax, lines, values[line_idx],\n vmin=mn, vmax=mx, cmap=cmap, **style_kwds)\n\n # plot all Points in the same collection\n points = df.geometry[point_idx]\n if not points.empty:\n if isinstance(markersize, np.ndarray):\n markersize = markersize[point_idx]\n plot_point_collection(ax, points, values[point_idx], vmin=mn, vmax=mx,\n markersize=markersize, cmap=cmap,\n **style_kwds)\n\n if legend and not color:\n from matplotlib.lines import Line2D\n from matplotlib.colors import Normalize\n from matplotlib import cm\n\n norm = Normalize(vmin=mn, vmax=mx)\n n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)\n if categorical:\n patches = []\n for value, cat in enumerate(categories):\n patches.append(\n Line2D([0], [0], linestyle=\"none\", marker=\"o\",\n alpha=style_kwds.get('alpha', 1), markersize=10,\n markerfacecolor=n_cmap.to_rgba(value)))\n if legend_kwds is None:\n legend_kwds = {}\n legend_kwds.setdefault('numpoints', 1)\n legend_kwds.setdefault('loc', 'best')\n ax.legend(patches, categories, **legend_kwds)\n else:\n n_cmap.set_array([])\n ax.get_figure().colorbar(n_cmap, ax=ax)\n\n plt.draw()\n return ax\n\n\ndef __pysal_choro(values, scheme, k=5):\n \"\"\"\n Wrapper for choropleth schemes from PySAL for use with plot_dataframe\n\n Parameters\n ----------\n values\n Series to be plotted\n scheme : str\n One of pysal.esda.mapclassify classification schemes\n Options are 'Equal_interval', 'Quantiles', 'Fisher_Jenks'\n k : int\n number of classes (2 <= k <=9)\n\n Returns\n -------\n binning\n Binning objects that holds the Series with values replaced with\n class identifier and the bins.\n\n \"\"\"\n try:\n from pysal.esda.mapclassify import (\n Quantiles, Equal_Interval, Fisher_Jenks)\n schemes = {}\n schemes['equal_interval'] = Equal_Interval\n schemes['quantiles'] = Quantiles\n schemes['fisher_jenks'] = Fisher_Jenks\n scheme = scheme.lower()\n if scheme not in schemes:\n raise ValueError(\"Invalid scheme. Scheme must be in the\"\n \" set: %r\" % schemes.keys())\n binning = schemes[scheme](values, k)\n return binning\n except ImportError:\n raise ImportError(\"PySAL is required to use the 'scheme' keyword\")\n", "path": "geopandas/plotting.py" } ]
[ { "content": "from __future__ import print_function\nfrom distutils.version import LooseVersion\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\ndef _flatten_multi_geoms(geoms, colors=None):\n \"\"\"\n Returns Series like geoms and colors, except that any Multi geometries\n are split into their components and colors are repeated for all component\n in the same Multi geometry. Maintains 1:1 matching of geometry to color.\n Passing `color` is optional, and when no `color` is passed a list of None\n values is returned as `component_colors`.\n\n \"Colors\" are treated opaquely and so can actually contain any values.\n\n Returns\n -------\n\n components : list of geometry\n\n component_colors : list of whatever type `colors` contains\n \"\"\"\n if colors is None:\n colors = [None] * len(geoms)\n\n components, component_colors = [], []\n \n if not geoms.geom_type.str.startswith('Multi').any():\n return geoms, colors\n\n # precondition, so zip can't short-circuit\n assert len(geoms) == len(colors)\n for geom, color in zip(geoms, colors):\n if geom.type.startswith('Multi'):\n for poly in geom:\n components.append(poly)\n # repeat same color for all components\n component_colors.append(color)\n else:\n components.append(geom)\n component_colors.append(color)\n\n return components, component_colors\n\n\ndef plot_polygon_collection(ax, geoms, values=None, color=None,\n cmap=None, vmin=None, vmax=None, **kwargs):\n \"\"\"\n Plots a collection of Polygon and MultiPolygon geometries to `ax`\n\n Parameters\n ----------\n\n ax : matplotlib.axes.Axes\n where shapes will be plotted\n\n geoms : a sequence of `N` Polygons and/or MultiPolygons (can be mixed)\n\n values : a sequence of `N` values, optional\n Values will be mapped to colors using vmin/vmax/cmap. They should\n have 1:1 correspondence with the geometries (not their components).\n Otherwise follows `color` / `facecolor` kwargs.\n\n edgecolor : single color or sequence of `N` colors\n Color for the edge of the polygons\n\n facecolor : single color or sequence of `N` colors\n Color to fill the polygons. Cannot be used together with `values`.\n\n color : single color or sequence of `N` colors\n Sets both `edgecolor` and `facecolor`\n\n **kwargs\n Additional keyword arguments passed to the collection\n\n Returns\n -------\n\n collection : matplotlib.collections.Collection that was plotted\n \"\"\"\n\n try:\n from descartes.patch import PolygonPatch\n except ImportError:\n raise ImportError(\"The descartes package is required\"\n \" for plotting polygons in geopandas.\")\n from matplotlib.collections import PatchCollection\n\n geoms, values = _flatten_multi_geoms(geoms, values)\n if None in values:\n values = None\n\n # PatchCollection does not accept some kwargs.\n if 'markersize' in kwargs:\n del kwargs['markersize']\n\n # color=None overwrites specified facecolor/edgecolor with default color\n if color is not None:\n kwargs['color'] = color\n\n collection = PatchCollection([PolygonPatch(poly) for poly in geoms],\n **kwargs)\n\n if values is not None:\n collection.set_array(np.asarray(values))\n collection.set_cmap(cmap)\n collection.set_clim(vmin, vmax)\n\n ax.add_collection(collection, autolim=True)\n ax.autoscale_view()\n return collection\n\n\ndef plot_linestring_collection(ax, geoms, values=None, color=None,\n cmap=None, vmin=None, vmax=None, **kwargs):\n \"\"\"\n Plots a collection of LineString and MultiLineString geometries to `ax`\n\n Parameters\n ----------\n\n ax : matplotlib.axes.Axes\n where shapes will be plotted\n\n geoms : a sequence of `N` LineStrings and/or MultiLineStrings (can be\n mixed)\n\n values : a sequence of `N` values, optional\n Values will be mapped to colors using vmin/vmax/cmap. They should\n have 1:1 correspondence with the geometries (not their components).\n\n color : single color or sequence of `N` colors\n Cannot be used together with `values`.\n\n Returns\n -------\n\n collection : matplotlib.collections.Collection that was plotted\n\n \"\"\"\n from matplotlib.collections import LineCollection\n\n geoms, values = _flatten_multi_geoms(geoms, values)\n if None in values:\n values = None\n\n # LineCollection does not accept some kwargs.\n if 'markersize' in kwargs:\n del kwargs['markersize']\n\n # color=None gives black instead of default color cycle\n if color is not None:\n kwargs['color'] = color\n\n segments = [np.array(linestring)[:, :2] for linestring in geoms]\n collection = LineCollection(segments, **kwargs)\n\n if values is not None:\n collection.set_array(np.asarray(values))\n collection.set_cmap(cmap)\n collection.set_clim(vmin, vmax)\n\n ax.add_collection(collection, autolim=True)\n ax.autoscale_view()\n return collection\n\n\ndef plot_point_collection(ax, geoms, values=None, color=None,\n cmap=None, vmin=None, vmax=None,\n marker='o', markersize=None, **kwargs):\n \"\"\"\n Plots a collection of Point and MultiPoint geometries to `ax`\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n where shapes will be plotted\n geoms : sequence of `N` Points or MultiPoints\n\n values : a sequence of `N` values, optional\n Values mapped to colors using vmin, vmax, and cmap.\n Cannot be specified together with `color`.\n markersize : scalar or array-like, optional\n Size of the markers. Note that under the hood ``scatter`` is\n used, so the specified value will be proportional to the\n area of the marker (size in points^2).\n\n Returns\n -------\n collection : matplotlib.collections.Collection that was plotted\n \"\"\"\n if values is not None and color is not None:\n raise ValueError(\"Can only specify one of 'values' and 'color' kwargs\")\n\n geoms, values = _flatten_multi_geoms(geoms, values)\n if None in values:\n values = None\n x = [p.x for p in geoms]\n y = [p.y for p in geoms]\n\n # matplotlib 1.4 does not support c=None, and < 2.0 does not support s=None\n if values is not None:\n kwargs['c'] = values\n if markersize is not None:\n kwargs['s'] = markersize\n\n collection = ax.scatter(x, y, color=color, vmin=vmin, vmax=vmax, cmap=cmap,\n marker=marker, **kwargs)\n return collection\n\n\ndef plot_series(s, cmap=None, color=None, ax=None, figsize=None, **style_kwds):\n \"\"\"\n Plot a GeoSeries.\n\n Generate a plot of a GeoSeries geometry with matplotlib.\n\n Parameters\n ----------\n s : Series\n The GeoSeries to be plotted. Currently Polygon,\n MultiPolygon, LineString, MultiLineString and Point\n geometries can be plotted.\n cmap : str (default None)\n The name of a colormap recognized by matplotlib. Any\n colormap will work, but categorical colormaps are\n generally recommended. Examples of useful discrete\n colormaps include:\n\n tab10, tab20, Accent, Dark2, Paired, Pastel1, Set1, Set2\n\n color : str (default None)\n If specified, all objects will be colored uniformly.\n ax : matplotlib.pyplot.Artist (default None)\n axes on which to draw the plot\n figsize : pair of floats (default None)\n Size of the resulting matplotlib.figure.Figure. If the argument\n ax is given explicitly, figsize is ignored.\n **style_kwds : dict\n Color options to be passed on to the actual plot function, such\n as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,\n ``alpha``.\n\n Returns\n -------\n ax : matplotlib axes instance\n \"\"\"\n if 'colormap' in style_kwds:\n warnings.warn(\"'colormap' is deprecated, please use 'cmap' instead \"\n \"(for consistency with matplotlib)\", FutureWarning)\n cmap = style_kwds.pop('colormap')\n if 'axes' in style_kwds:\n warnings.warn(\"'axes' is deprecated, please use 'ax' instead \"\n \"(for consistency with pandas)\", FutureWarning)\n ax = style_kwds.pop('axes')\n\n import matplotlib.pyplot as plt\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize)\n ax.set_aspect('equal')\n\n if s.empty:\n warnings.warn(\"The GeoSeries you are attempting to plot is \"\n \"empty. Nothing has been displayed.\", UserWarning)\n return ax\n\n # if cmap is specified, create range of colors based on cmap\n values = None\n if cmap is not None:\n values = np.arange(len(s))\n if hasattr(cmap, 'N'):\n values = values % cmap.N\n style_kwds['vmin'] = style_kwds.get('vmin', values.min())\n style_kwds['vmax'] = style_kwds.get('vmax', values.max())\n\n geom_types = s.geometry.type\n poly_idx = np.asarray((geom_types == 'Polygon')\n | (geom_types == 'MultiPolygon'))\n line_idx = np.asarray((geom_types == 'LineString')\n | (geom_types == 'MultiLineString'))\n point_idx = np.asarray((geom_types == 'Point')\n | (geom_types == 'MultiPoint'))\n\n # plot all Polygons and all MultiPolygon components in the same collection\n polys = s.geometry[poly_idx]\n\n if not polys.empty:\n # color overrides both face and edgecolor. As we want people to be\n # able to use edgecolor as well, pass color to facecolor\n facecolor = style_kwds.pop('facecolor', None)\n if color is not None:\n facecolor = color\n values_ = values[poly_idx] if cmap else None\n plot_polygon_collection(ax, polys, values_, facecolor=facecolor,\n cmap=cmap, **style_kwds)\n\n # plot all LineStrings and MultiLineString components in same collection\n lines = s.geometry[line_idx]\n if not lines.empty:\n values_ = values[line_idx] if cmap else None\n plot_linestring_collection(ax, lines, values_, color=color, cmap=cmap,\n **style_kwds)\n\n # plot all Points in the same collection\n points = s.geometry[point_idx]\n if not points.empty:\n values_ = values[point_idx] if cmap else None\n plot_point_collection(ax, points, values_, color=color, cmap=cmap,\n **style_kwds)\n\n plt.draw()\n return ax\n\n\ndef plot_dataframe(df, column=None, cmap=None, color=None, ax=None,\n categorical=False, legend=False, scheme=None, k=5,\n vmin=None, vmax=None, markersize=None, figsize=None,\n legend_kwds=None, **style_kwds):\n \"\"\"\n Plot a GeoDataFrame.\n\n Generate a plot of a GeoDataFrame with matplotlib. If a\n column is specified, the plot coloring will be based on values\n in that column.\n\n Parameters\n ----------\n df : GeoDataFrame\n The GeoDataFrame to be plotted. Currently Polygon,\n MultiPolygon, LineString, MultiLineString and Point\n geometries can be plotted.\n column : str, np.array, pd.Series (default None)\n The name of the dataframe column, np.array, or pd.Series to be plotted.\n If np.array or pd.Series are used then it must have same length as\n dataframe. Values are used to color the plot. Ignored if `color` is\n also set.\n cmap : str (default None)\n The name of a colormap recognized by matplotlib.\n color : str (default None)\n If specified, all objects will be colored uniformly.\n ax : matplotlib.pyplot.Artist (default None)\n axes on which to draw the plot\n categorical : bool (default False)\n If False, cmap will reflect numerical values of the\n column being plotted. For non-numerical columns, this\n will be set to True.\n legend : bool (default False)\n Plot a legend. Ignored if no `column` is given, or if `color` is given.\n scheme : str (default None)\n Name of a choropleth classification scheme (requires PySAL).\n A pysal.esda.mapclassify.Map_Classifier object will be used\n under the hood. Supported schemes: 'Equal_interval', 'Quantiles',\n 'Fisher_Jenks'\n k : int (default 5)\n Number of classes (ignored if scheme is None)\n vmin : None or float (default None)\n Minimum value of cmap. If None, the minimum data value\n in the column to be plotted is used.\n vmax : None or float (default None)\n Maximum value of cmap. If None, the maximum data value\n in the column to be plotted is used.\n markersize : str or float or sequence (default None)\n Only applies to point geometries within a frame.\n If a str, will use the values in the column of the frame specified\n by markersize to set the size of markers. Otherwise can be a value\n to apply to all points, or a sequence of the same length as the\n number of points.\n figsize : tuple of integers (default None)\n Size of the resulting matplotlib.figure.Figure. If the argument\n axes is given explicitly, figsize is ignored.\n legend_kwds : dict (default None)\n Keyword arguments to pass to ax.legend()\n\n **style_kwds : dict\n Color options to be passed on to the actual plot function, such\n as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,\n ``alpha``.\n\n Returns\n -------\n ax : matplotlib axes instance\n\n \"\"\"\n if 'colormap' in style_kwds:\n warnings.warn(\"'colormap' is deprecated, please use 'cmap' instead \"\n \"(for consistency with matplotlib)\", FutureWarning)\n cmap = style_kwds.pop('colormap')\n if 'axes' in style_kwds:\n warnings.warn(\"'axes' is deprecated, please use 'ax' instead \"\n \"(for consistency with pandas)\", FutureWarning)\n ax = style_kwds.pop('axes')\n if column is not None and color is not None:\n warnings.warn(\"Only specify one of 'column' or 'color'. Using \"\n \"'color'.\", UserWarning)\n column = None\n\n import matplotlib\n import matplotlib.pyplot as plt\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize)\n ax.set_aspect('equal')\n\n if df.empty:\n warnings.warn(\"The GeoDataFrame you are attempting to plot is \"\n \"empty. Nothing has been displayed.\", UserWarning)\n return ax\n\n if isinstance(markersize, str):\n markersize = df[markersize].values\n\n if column is None:\n return plot_series(df.geometry, cmap=cmap, color=color, ax=ax,\n figsize=figsize, markersize=markersize,\n **style_kwds)\n\n # To accept pd.Series and np.arrays as column\n if isinstance(column, (np.ndarray, pd.Series)):\n if column.shape[0] != df.shape[0]:\n raise ValueError(\"The dataframe and given column have different \"\n \"number of rows.\")\n else:\n values = np.asarray(column)\n else:\n values = np.asarray(df[column])\n\n if values.dtype is np.dtype('O'):\n categorical = True\n\n # Define `values` as a Series\n if categorical:\n if cmap is None:\n if LooseVersion(matplotlib.__version__) >= '2.0.1':\n cmap = 'tab10'\n elif LooseVersion(matplotlib.__version__) >= '2.0.0':\n # Erroneous name.\n cmap = 'Vega10'\n else:\n cmap = 'Set1'\n categories = list(set(values))\n categories.sort()\n valuemap = dict((k, v) for (v, k) in enumerate(categories))\n values = np.array([valuemap[k] for k in values])\n\n if scheme is not None:\n binning = __pysal_choro(values, scheme, k=k)\n # set categorical to True for creating the legend\n categorical = True\n binedges = [values.min()] + binning.bins.tolist()\n categories = ['{0:.2f} - {1:.2f}'.format(binedges[i], binedges[i+1])\n for i in range(len(binedges)-1)]\n values = np.array(binning.yb)\n\n mn = values.min() if vmin is None else vmin\n mx = values.max() if vmax is None else vmax\n\n geom_types = df.geometry.type\n poly_idx = np.asarray((geom_types == 'Polygon')\n | (geom_types == 'MultiPolygon'))\n line_idx = np.asarray((geom_types == 'LineString')\n | (geom_types == 'MultiLineString'))\n point_idx = np.asarray((geom_types == 'Point')\n | (geom_types == 'MultiPoint'))\n\n # plot all Polygons and all MultiPolygon components in the same collection\n polys = df.geometry[poly_idx]\n if not polys.empty:\n plot_polygon_collection(ax, polys, values[poly_idx],\n vmin=mn, vmax=mx, cmap=cmap, **style_kwds)\n\n # plot all LineStrings and MultiLineString components in same collection\n lines = df.geometry[line_idx]\n if not lines.empty:\n plot_linestring_collection(ax, lines, values[line_idx],\n vmin=mn, vmax=mx, cmap=cmap, **style_kwds)\n\n # plot all Points in the same collection\n points = df.geometry[point_idx]\n if not points.empty:\n if isinstance(markersize, np.ndarray):\n markersize = markersize[point_idx]\n plot_point_collection(ax, points, values[point_idx], vmin=mn, vmax=mx,\n markersize=markersize, cmap=cmap,\n **style_kwds)\n\n if legend and not color:\n from matplotlib.lines import Line2D\n from matplotlib.colors import Normalize\n from matplotlib import cm\n\n norm = Normalize(vmin=mn, vmax=mx)\n n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)\n if categorical:\n patches = []\n for value, cat in enumerate(categories):\n patches.append(\n Line2D([0], [0], linestyle=\"none\", marker=\"o\",\n alpha=style_kwds.get('alpha', 1), markersize=10,\n markerfacecolor=n_cmap.to_rgba(value)))\n if legend_kwds is None:\n legend_kwds = {}\n legend_kwds.setdefault('numpoints', 1)\n legend_kwds.setdefault('loc', 'best')\n ax.legend(patches, categories, **legend_kwds)\n else:\n n_cmap.set_array([])\n ax.get_figure().colorbar(n_cmap, ax=ax)\n\n plt.draw()\n return ax\n\n\ndef __pysal_choro(values, scheme, k=5):\n \"\"\"\n Wrapper for choropleth schemes from PySAL for use with plot_dataframe\n\n Parameters\n ----------\n values\n Series to be plotted\n scheme : str\n One of pysal.esda.mapclassify classification schemes\n Options are 'Equal_interval', 'Quantiles', 'Fisher_Jenks'\n k : int\n number of classes (2 <= k <=9)\n\n Returns\n -------\n binning\n Binning objects that holds the Series with values replaced with\n class identifier and the bins.\n\n \"\"\"\n try:\n from pysal.esda.mapclassify import (\n Quantiles, Equal_Interval, Fisher_Jenks)\n schemes = {}\n schemes['equal_interval'] = Equal_Interval\n schemes['quantiles'] = Quantiles\n schemes['fisher_jenks'] = Fisher_Jenks\n scheme = scheme.lower()\n if scheme not in schemes:\n raise ValueError(\"Invalid scheme. Scheme must be in the\"\n \" set: %r\" % schemes.keys())\n binning = schemes[scheme](values, k)\n return binning\n except ImportError:\n raise ImportError(\"PySAL is required to use the 'scheme' keyword\")\n", "path": "geopandas/plotting.py" } ]
diff --git a/geopandas/plotting.py b/geopandas/plotting.py index becf65540f..02763e67f5 100644 --- a/geopandas/plotting.py +++ b/geopandas/plotting.py @@ -26,6 +26,9 @@ def _flatten_multi_geoms(geoms, colors=None): colors = [None] * len(geoms) components, component_colors = [], [] + + if not geoms.geom_type.str.startswith('Multi').any(): + return geoms, colors # precondition, so zip can't short-circuit assert len(geoms) == len(colors)
buildbot__buildbot-3106
Tarball on pypi lacks secrets/providers subdirectory https://pypi.python.org/pypi/buildbot/0.9.5 The tarball provided there lacks buildbot/master /buildbot/secrets/providers (it does contain secrets itself though). Hence unit tests won't run completely. ``` ]$ ls -al buildbot/secrets/ total 9 drwxr-xr-x 2 502 staff 5 Mar 20 14:54 . drwxr-xr-x 24 502 staff 41 Mar 20 14:54 .. -rw-r--r-- 1 502 staff 705 Mar 20 11:28 __init__.py -rw-r--r-- 1 502 staff 1621 Mar 20 11:28 manager.py -rw-r--r-- 1 502 staff 1713 Mar 20 11:28 secret.py ```
[ { "content": "#!/usr/bin/env python\n#\n# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\n\"\"\"\nStandard setup script.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport glob\nimport os\nimport pkg_resources\nimport sys\nfrom distutils.command.install_data import install_data\nfrom distutils.command.sdist import sdist\nfrom distutils.version import LooseVersion\n\nfrom setuptools import setup\n\nfrom buildbot import version\n\nif \"bdist_wheel\" in sys.argv:\n BUILDING_WHEEL = True\nelse:\n BUILDING_WHEEL = False\n\n\ndef include(d, e):\n \"\"\"Generate a pair of (directory, file-list) for installation.\n\n 'd' -- A directory\n 'e' -- A glob pattern\"\"\"\n\n return (d, [f for f in glob.glob('%s/%s' % (d, e)) if os.path.isfile(f)])\n\n\ndef include_statics(d):\n r = []\n for root, ds, fs in os.walk(d):\n r.append((root, [os.path.join(root, f) for f in fs]))\n return r\n\n\nclass install_data_twisted(install_data):\n\n \"\"\"make sure data files are installed in package.\n this is evil.\n copied from Twisted/setup.py.\n \"\"\"\n\n def finalize_options(self):\n self.set_undefined_options('install',\n ('install_lib', 'install_dir'),\n )\n install_data.finalize_options(self)\n\n def run(self):\n install_data.run(self)\n # ensure there's a buildbot/VERSION file\n fn = os.path.join(self.install_dir, 'buildbot', 'VERSION')\n open(fn, 'w').write(version)\n self.outfiles.append(fn)\n\n\nclass our_sdist(sdist):\n\n def make_release_tree(self, base_dir, files):\n sdist.make_release_tree(self, base_dir, files)\n\n # ensure there's a buildbot/VERSION file\n fn = os.path.join(base_dir, 'buildbot', 'VERSION')\n open(fn, 'w').write(version)\n\n # ensure that NEWS has a copy of the latest release notes, with the\n # proper version substituted\n src_fn = os.path.join('docs', 'relnotes/index.rst')\n with open(src_fn) as f:\n src = f.read()\n src = src.replace('|version|', version)\n dst_fn = os.path.join(base_dir, 'NEWS')\n with open(dst_fn, 'w') as f:\n f.write(src)\n\n\ndef define_plugin_entry(name, module_name):\n \"\"\"\n helper to produce lines suitable for setup.py's entry_points\n \"\"\"\n if isinstance(name, tuple):\n entry, name = name\n else:\n entry = name\n return '%s = %s:%s' % (entry, module_name, name)\n\n\ndef concat_dicts(*dicts):\n result = dict()\n for d in dicts:\n result.update(d)\n return result\n\n\ndef define_plugin_entries(groups):\n \"\"\"\n helper to all groups for plugins\n \"\"\"\n result = dict()\n\n for group, modules in groups:\n tempo = []\n for module_name, names in modules:\n tempo.extend([define_plugin_entry(name, module_name)\n for name in names])\n result[group] = tempo\n\n return result\n\n\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as long_d_f:\n long_description = long_d_f.read()\n\nsetup_args = {\n 'name': \"buildbot\",\n 'version': version,\n 'description': \"The Continuous Integration Framework\",\n 'long_description': long_description,\n 'author': \"Brian Warner\",\n 'author_email': \"[email protected]\",\n 'maintainer': \"Dustin J. Mitchell\",\n 'maintainer_email': \"[email protected]\",\n 'url': \"http://buildbot.net/\",\n 'license': \"GNU GPL\",\n 'classifiers': [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: No Input/Output (Daemon)',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License (GPL)',\n 'Topic :: Software Development :: Build Tools',\n 'Topic :: Software Development :: Testing',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'\n ],\n\n 'packages': [\n \"buildbot\",\n \"buildbot.buildslave\",\n \"buildbot.worker\",\n \"buildbot.worker.protocols\",\n \"buildbot.changes\",\n \"buildbot.clients\",\n \"buildbot.data\",\n \"buildbot.db\",\n \"buildbot.db.migrate.versions\",\n \"buildbot.db.types\",\n \"buildbot.monkeypatches\",\n \"buildbot.mq\",\n \"buildbot.plugins\",\n \"buildbot.process\",\n \"buildbot.process.users\",\n \"buildbot.reporters\",\n \"buildbot.schedulers\",\n \"buildbot.scripts\",\n \"buildbot.secrets\",\n \"buildbot.statistics\",\n \"buildbot.statistics.storage_backends\",\n \"buildbot.status\",\n \"buildbot.steps\",\n \"buildbot.steps.package\",\n \"buildbot.steps.package.deb\",\n \"buildbot.steps.package.rpm\",\n \"buildbot.steps.source\",\n \"buildbot.util\",\n \"buildbot.wamp\",\n \"buildbot.www\",\n \"buildbot.www.hooks\",\n \"buildbot.www.authz\",\n ] + ([] if BUILDING_WHEEL else [ # skip tests for wheels (save 50% of the archive)\n \"buildbot.test\",\n \"buildbot.test.util\",\n \"buildbot.test.fake\",\n \"buildbot.test.fuzz\",\n \"buildbot.test.integration\",\n \"buildbot.test.regressions\",\n \"buildbot.test.unit\",\n ]),\n 'data_files': [\n (\"buildbot\", [\n \"buildbot/buildbot.png\",\n ]),\n include(\"buildbot/reporters/templates\", \"*.txt\"),\n (\"buildbot/db/migrate\", [\n \"buildbot/db/migrate/migrate.cfg\",\n ]),\n include(\"buildbot/db/migrate/versions\", \"*.py\"),\n (\"buildbot/scripts\", [\n \"buildbot/scripts/sample.cfg\",\n \"buildbot/scripts/buildbot_tac.tmpl\",\n ]),\n include(\"buildbot/spec\", \"*.raml\"),\n include(\"buildbot/spec/types\", \"*.raml\"),\n include(\"buildbot/test/unit/test_templates_dir\", \"*.html\"),\n include(\"buildbot/test/unit/test_templates_dir/plugin\", \"*.*\"),\n ] + include_statics(\"buildbot/www/static\"),\n 'cmdclass': {'install_data': install_data_twisted,\n 'sdist': our_sdist},\n 'entry_points': concat_dicts(define_plugin_entries([\n ('buildbot.changes', [\n ('buildbot.changes.mail', [\n 'MaildirSource', 'CVSMaildirSource',\n 'SVNCommitEmailMaildirSource',\n 'BzrLaunchpadEmailMaildirSource']),\n ('buildbot.changes.bitbucket', ['BitbucketPullrequestPoller']),\n ('buildbot.changes.github', ['GitHubPullrequestPoller']),\n ('buildbot.changes.bonsaipoller', ['BonsaiPoller']),\n ('buildbot.changes.gerritchangesource', ['GerritChangeSource']),\n ('buildbot.changes.gitpoller', ['GitPoller']),\n ('buildbot.changes.hgpoller', ['HgPoller']),\n ('buildbot.changes.p4poller', ['P4Source']),\n ('buildbot.changes.pb', ['PBChangeSource']),\n ('buildbot.changes.svnpoller', ['SVNPoller'])\n ]),\n ('buildbot.schedulers', [\n ('buildbot.schedulers.basic', [\n 'SingleBranchScheduler', 'AnyBranchScheduler']),\n ('buildbot.schedulers.dependent', ['Dependent']),\n ('buildbot.schedulers.triggerable', ['Triggerable']),\n ('buildbot.schedulers.forcesched', ['ForceScheduler']),\n ('buildbot.schedulers.timed', [\n 'Periodic', 'Nightly', 'NightlyTriggerable']),\n ('buildbot.schedulers.trysched', [\n 'Try_Jobdir', 'Try_Userpass'])\n ]),\n ('buildbot.worker', [\n ('buildbot.worker.base', ['Worker']),\n ('buildbot.worker.ec2', ['EC2LatentWorker']),\n ('buildbot.worker.libvirt', ['LibVirtWorker']),\n ('buildbot.worker.openstack', ['OpenStackLatentWorker']),\n ('buildbot.worker.docker', ['DockerLatentWorker']),\n ('buildbot.worker.hyper', ['HyperLatentWorker']),\n ('buildbot.worker.local', ['LocalWorker']),\n ]),\n ('buildbot.steps', [\n ('buildbot.process.buildstep', ['BuildStep']),\n ('buildbot.steps.cmake', ['CMake']),\n ('buildbot.steps.cppcheck', ['Cppcheck']),\n ('buildbot.steps.http', [\n 'HTTPStep', 'POST', 'GET', 'PUT', 'DELETE', 'HEAD',\n 'OPTIONS']),\n ('buildbot.steps.master', [\n 'MasterShellCommand', 'SetProperty', 'SetProperties', 'LogRenderable']),\n ('buildbot.steps.maxq', ['MaxQ']),\n ('buildbot.steps.mswin', ['Robocopy']),\n ('buildbot.steps.mtrlogobserver', ['MTR']),\n ('buildbot.steps.package.deb.lintian', ['DebLintian']),\n ('buildbot.steps.package.deb.pbuilder', [\n 'DebPbuilder', 'DebCowbuilder', 'UbuPbuilder',\n 'UbuCowbuilder']),\n ('buildbot.steps.package.rpm.mock', [\n 'Mock', 'MockBuildSRPM', 'MockRebuild']),\n ('buildbot.steps.package.rpm.rpmbuild', ['RpmBuild']),\n ('buildbot.steps.package.rpm.rpmlint', ['RpmLint']),\n ('buildbot.steps.package.rpm.rpmspec', ['RpmSpec']),\n ('buildbot.steps.python', [\n 'BuildEPYDoc', 'PyFlakes', 'PyLint', 'Sphinx']),\n ('buildbot.steps.python_twisted', [\n 'HLint', 'Trial', 'RemovePYCs']),\n ('buildbot.steps.shell', [\n 'ShellCommand', 'TreeSize', 'SetPropertyFromCommand',\n 'Configure', 'WarningCountingShellCommand', 'Compile',\n 'Test', 'PerlModuleTest']),\n ('buildbot.steps.shellsequence', ['ShellSequence']),\n ('buildbot.steps.source.bzr', ['Bzr']),\n ('buildbot.steps.source.cvs', ['CVS']),\n ('buildbot.steps.source.darcs', ['Darcs']),\n ('buildbot.steps.source.gerrit', ['Gerrit']),\n ('buildbot.steps.source.git', ['Git']),\n ('buildbot.steps.source.github', ['GitHub']),\n ('buildbot.steps.source.mercurial', ['Mercurial']),\n ('buildbot.steps.source.mtn', ['Monotone']),\n ('buildbot.steps.source.p4', ['P4']),\n ('buildbot.steps.source.repo', ['Repo']),\n ('buildbot.steps.source.svn', ['SVN']),\n ('buildbot.steps.subunit', ['SubunitShellCommand']),\n ('buildbot.steps.transfer', [\n 'FileUpload', 'DirectoryUpload', 'MultipleFileUpload',\n 'FileDownload', 'StringDownload', 'JSONStringDownload',\n 'JSONPropertiesDownload']),\n ('buildbot.steps.trigger', ['Trigger']),\n ('buildbot.steps.vstudio', [\n 'VC6', 'VC7', 'VS2003', 'VC8', 'VS2005', 'VCExpress9', 'VC9',\n 'VS2008', 'VC10', 'VS2010', 'VC11', 'VS2012', 'VC12', 'VS2013',\n 'VC14', 'VS2015', 'MsBuild4', 'MsBuild', 'MsBuild12', 'MsBuild14']),\n ('buildbot.steps.worker', [\n 'SetPropertiesFromEnv', 'FileExists', 'CopyDirectory',\n 'RemoveDirectory', 'MakeDirectory']),\n ]),\n ('buildbot.reporters', [\n ('buildbot.reporters.mail', ['MailNotifier']),\n ('buildbot.reporters.message', ['MessageFormatter']),\n ('buildbot.reporters.gerrit', ['GerritStatusPush']),\n ('buildbot.reporters.gerrit_verify_status',\n ['GerritVerifyStatusPush']),\n ('buildbot.reporters.http', ['HttpStatusPush']),\n ('buildbot.reporters.github', ['GitHubStatusPush', 'GitHubCommentPush']),\n ('buildbot.reporters.gitlab', ['GitLabStatusPush']),\n ('buildbot.reporters.stash', ['StashStatusPush']),\n ('buildbot.reporters.bitbucket', ['BitbucketStatusPush']),\n ('buildbot.reporters.irc', ['IRC']),\n ]),\n ('buildbot.util', [\n # Connection seems to be a way too generic name, though\n ('buildbot.worker.libvirt', ['Connection']),\n ('buildbot.changes.filter', ['ChangeFilter']),\n ('buildbot.changes.gerritchangesource', ['GerritChangeFilter']),\n ('buildbot.changes.svnpoller', [\n ('svn.split_file_projects_branches',\n 'split_file_projects_branches'),\n ('svn.split_file_branches', 'split_file_branches'),\n ('svn.split_file_alwaystrunk', 'split_file_alwaystrunk')]),\n ('buildbot.config', ['BuilderConfig']),\n ('buildbot.locks', [\n 'MasterLock',\n 'WorkerLock',\n ]),\n ('buildbot.manhole', [\n 'AuthorizedKeysManhole', 'PasswordManhole', 'TelnetManhole']),\n ('buildbot.process.builder', [\n 'enforceChosenWorker',\n ]),\n ('buildbot.process.factory', [\n 'BuildFactory', 'GNUAutoconf', 'CPAN', 'Distutils', 'Trial',\n 'BasicBuildFactory', 'QuickBuildFactory', 'BasicSVN']),\n ('buildbot.process.logobserver', ['LogLineObserver']),\n ('buildbot.process.properties', [\n 'FlattenList', 'Interpolate', 'Property', 'Transform',\n 'WithProperties', 'renderer']),\n ('buildbot.process.properties', [\n 'CommandlineUserManager']),\n ('buildbot.revlinks', ['RevlinkMatch']),\n ('buildbot.reporters.utils', ['URLForBuild']),\n ('buildbot.schedulers.forcesched', [\n 'AnyPropertyParameter', 'BooleanParameter',\n 'ChoiceStringParameter',\n 'CodebaseParameter', 'FixedParameter', 'InheritBuildParameter',\n 'IntParameter', 'NestedParameter', 'ParameterGroup',\n 'StringParameter', 'TextParameter', 'UserNameParameter',\n 'WorkerChoiceParameter',\n ]),\n ('buildbot.process.results', [\n 'Results', 'SUCCESS', 'WARNINGS', 'FAILURE', 'SKIPPED',\n 'EXCEPTION', 'RETRY', 'CANCELLED']),\n ('buildbot.steps.mtrlogobserver', ['EqConnectionPool']),\n ('buildbot.steps.source.repo', [\n ('repo.DownloadsFromChangeSource',\n 'RepoDownloadsFromChangeSource'),\n ('repo.DownloadsFromProperties',\n 'RepoDownloadsFromProperties')]),\n ('buildbot.steps.shellsequence', ['ShellArg']),\n ('buildbot.www.avatar', ['AvatarGravatar']),\n ('buildbot.www.auth', [\n 'UserPasswordAuth', 'HTPasswdAuth', 'RemoteUserAuth']),\n ('buildbot.www.ldapuserinfo', ['LdapUserInfo']),\n ('buildbot.www.oauth2', [\n 'GoogleAuth', 'GitHubAuth', 'GitLabAuth', 'BitbucketAuth']),\n ('buildbot.db.dbconfig', [\n 'DbConfig']),\n ('buildbot.www.authz', [\n 'Authz', 'fnmatchStrMatcher', 'reStrMatcher']),\n ('buildbot.www.authz.roles', [\n 'RolesFromEmails', 'RolesFromGroups', 'RolesFromOwner', 'RolesFromUsername']),\n ('buildbot.www.authz.endpointmatchers', [\n 'AnyEndpointMatcher', 'StopBuildEndpointMatcher', 'ForceBuildEndpointMatcher',\n 'RebuildBuildEndpointMatcher', 'AnyControlEndpointMatcher', 'EnableSchedulerEndpointMatcher']),\n ])\n ]), {\n 'console_scripts': [\n 'buildbot=buildbot.scripts.runner:run',\n # this will also be shipped on non windows :-(\n 'buildbot_windows_service=buildbot.scripts.windows_service:HandleCommandLine',\n ]}\n )\n}\n\n# set zip_safe to false to force Windows installs to always unpack eggs\n# into directories, which seems to work better --\n# see http://buildbot.net/trac/ticket/907\nif sys.platform == \"win32\":\n setup_args['zip_safe'] = False\n\npy_26 = sys.version_info[0] > 2 or (\n sys.version_info[0] == 2 and sys.version_info[1] >= 6)\nif not py_26:\n raise RuntimeError(\"Buildbot master requires at least Python-2.6\")\n\n# pip<1.4 doesn't have the --pre flag, and will thus attempt to install alpha\n# and beta versions of Buildbot. Prevent that from happening.\nVERSION_MSG = \"\"\"\nThis is a pre-release version of Buildbot, which can only be installed with\npip-1.4 or later Try installing the latest stable version of Buildbot instead:\n pip install buildbot==0.8.12\nSee https://pypi.python.org/pypi/buildbot to verify the current stable version.\n\"\"\"\nif 'a' in version or 'b' in version:\n try:\n pip_dist = pkg_resources.get_distribution('pip')\n except pkg_resources.DistributionNotFound:\n pip_dist = None\n\n if pip_dist:\n if LooseVersion(pip_dist.version) < LooseVersion('1.4'):\n raise RuntimeError(VERSION_MSG)\n\nif sys.version_info[0] >= 3:\n twisted_ver = \">= 17.1.0\"\nelse:\n twisted_ver = \">= 14.0.1\"\nautobahn_ver = \">= 0.16.0\"\ntxaio_ver = \">= 2.2.2\"\n\nbundle_version = version.split(\"-\")[0]\n\n# dependencies\nsetup_args['install_requires'] = [\n 'setuptools >= 8.0',\n 'Twisted ' + twisted_ver,\n 'Jinja2 >= 2.1',\n # required for tests, but Twisted requires this anyway\n 'zope.interface >= 4.1.1',\n # python-future required for py2/3 compatibility\n 'future',\n 'sqlalchemy>=0.8.0',\n 'sqlalchemy-migrate>=0.9',\n 'python-dateutil>=1.5',\n 'txaio ' + txaio_ver,\n 'autobahn ' + autobahn_ver,\n 'PyJWT',\n 'distro'\n]\n\n# Unit test dependencies.\ntest_deps = [\n # http client libraries\n 'treq',\n 'txrequests',\n # pyjade required for custom templates tests\n 'pyjade',\n # boto3 and moto required for running EC2 tests\n 'boto3',\n 'moto',\n # txgithub required to run buildbot.status.github module tests\n 'txgithub',\n 'ramlfications',\n 'mock>=2.0.0',\n]\nif sys.platform != 'win32':\n test_deps += [\n # LZ4 fails to build on Windows:\n # https://github.com/steeve/python-lz4/issues/27\n # lz4 required for log compression tests.\n 'lz4',\n ]\n\nsetup_args['tests_require'] = test_deps\n\nsetup_args['extras_require'] = {\n 'test': [\n 'setuptools_trial',\n 'isort',\n # spellcheck introduced in version 1.4.0\n 'pylint>=1.4.0',\n 'pyenchant',\n 'flake8~=2.6.0',\n ] + test_deps,\n 'bundle': [\n \"buildbot-www=={0}\".format(bundle_version),\n \"buildbot-worker=={0}\".format(bundle_version),\n \"buildbot-waterfall-view=={0}\".format(bundle_version),\n \"buildbot-console-view=={0}\".format(bundle_version),\n ],\n 'tls': [\n 'Twisted[tls] ' + twisted_ver,\n # There are bugs with extras inside extras:\n # <https://github.com/pypa/pip/issues/3516>\n # so we explicitly include Twisted[tls] dependencies.\n 'pyopenssl >= 16.0.0',\n 'service_identity',\n 'idna >= 0.6',\n ],\n 'docs': [\n 'docutils<0.13.0',\n 'sphinx>1.4.0',\n 'sphinxcontrib-blockdiag',\n 'sphinxcontrib-spelling',\n 'pyenchant',\n 'docutils>=0.8',\n 'ramlfications',\n 'sphinx-jinja',\n 'towncrier'\n ],\n}\n\nif '--help-commands' in sys.argv or 'trial' in sys.argv or 'test' in sys.argv:\n setup_args['setup_requires'] = [\n 'setuptools_trial',\n ]\n\nif os.getenv('NO_INSTALL_REQS'):\n setup_args['install_requires'] = None\n setup_args['extras_require'] = None\n\nsetup(**setup_args)\n\n# Local Variables:\n# fill-column: 71\n# End:\n", "path": "master/setup.py" } ]
[ { "content": "#!/usr/bin/env python\n#\n# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\n\"\"\"\nStandard setup script.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport glob\nimport os\nimport pkg_resources\nimport sys\nfrom distutils.command.install_data import install_data\nfrom distutils.command.sdist import sdist\nfrom distutils.version import LooseVersion\n\nfrom setuptools import setup\n\nfrom buildbot import version\n\nif \"bdist_wheel\" in sys.argv:\n BUILDING_WHEEL = True\nelse:\n BUILDING_WHEEL = False\n\n\ndef include(d, e):\n \"\"\"Generate a pair of (directory, file-list) for installation.\n\n 'd' -- A directory\n 'e' -- A glob pattern\"\"\"\n\n return (d, [f for f in glob.glob('%s/%s' % (d, e)) if os.path.isfile(f)])\n\n\ndef include_statics(d):\n r = []\n for root, ds, fs in os.walk(d):\n r.append((root, [os.path.join(root, f) for f in fs]))\n return r\n\n\nclass install_data_twisted(install_data):\n\n \"\"\"make sure data files are installed in package.\n this is evil.\n copied from Twisted/setup.py.\n \"\"\"\n\n def finalize_options(self):\n self.set_undefined_options('install',\n ('install_lib', 'install_dir'),\n )\n install_data.finalize_options(self)\n\n def run(self):\n install_data.run(self)\n # ensure there's a buildbot/VERSION file\n fn = os.path.join(self.install_dir, 'buildbot', 'VERSION')\n open(fn, 'w').write(version)\n self.outfiles.append(fn)\n\n\nclass our_sdist(sdist):\n\n def make_release_tree(self, base_dir, files):\n sdist.make_release_tree(self, base_dir, files)\n\n # ensure there's a buildbot/VERSION file\n fn = os.path.join(base_dir, 'buildbot', 'VERSION')\n open(fn, 'w').write(version)\n\n # ensure that NEWS has a copy of the latest release notes, with the\n # proper version substituted\n src_fn = os.path.join('docs', 'relnotes/index.rst')\n with open(src_fn) as f:\n src = f.read()\n src = src.replace('|version|', version)\n dst_fn = os.path.join(base_dir, 'NEWS')\n with open(dst_fn, 'w') as f:\n f.write(src)\n\n\ndef define_plugin_entry(name, module_name):\n \"\"\"\n helper to produce lines suitable for setup.py's entry_points\n \"\"\"\n if isinstance(name, tuple):\n entry, name = name\n else:\n entry = name\n return '%s = %s:%s' % (entry, module_name, name)\n\n\ndef concat_dicts(*dicts):\n result = dict()\n for d in dicts:\n result.update(d)\n return result\n\n\ndef define_plugin_entries(groups):\n \"\"\"\n helper to all groups for plugins\n \"\"\"\n result = dict()\n\n for group, modules in groups:\n tempo = []\n for module_name, names in modules:\n tempo.extend([define_plugin_entry(name, module_name)\n for name in names])\n result[group] = tempo\n\n return result\n\n\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as long_d_f:\n long_description = long_d_f.read()\n\nsetup_args = {\n 'name': \"buildbot\",\n 'version': version,\n 'description': \"The Continuous Integration Framework\",\n 'long_description': long_description,\n 'author': \"Brian Warner\",\n 'author_email': \"[email protected]\",\n 'maintainer': \"Dustin J. Mitchell\",\n 'maintainer_email': \"[email protected]\",\n 'url': \"http://buildbot.net/\",\n 'license': \"GNU GPL\",\n 'classifiers': [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: No Input/Output (Daemon)',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License (GPL)',\n 'Topic :: Software Development :: Build Tools',\n 'Topic :: Software Development :: Testing',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'\n ],\n\n 'packages': [\n \"buildbot\",\n \"buildbot.buildslave\",\n \"buildbot.worker\",\n \"buildbot.worker.protocols\",\n \"buildbot.changes\",\n \"buildbot.clients\",\n \"buildbot.data\",\n \"buildbot.db\",\n \"buildbot.db.migrate.versions\",\n \"buildbot.db.types\",\n \"buildbot.monkeypatches\",\n \"buildbot.mq\",\n \"buildbot.plugins\",\n \"buildbot.process\",\n \"buildbot.process.users\",\n \"buildbot.reporters\",\n \"buildbot.schedulers\",\n \"buildbot.scripts\",\n \"buildbot.secrets\",\n \"buildbot.secrets.providers\",\n \"buildbot.statistics\",\n \"buildbot.statistics.storage_backends\",\n \"buildbot.status\",\n \"buildbot.steps\",\n \"buildbot.steps.package\",\n \"buildbot.steps.package.deb\",\n \"buildbot.steps.package.rpm\",\n \"buildbot.steps.source\",\n \"buildbot.util\",\n \"buildbot.wamp\",\n \"buildbot.www\",\n \"buildbot.www.hooks\",\n \"buildbot.www.authz\",\n ] + ([] if BUILDING_WHEEL else [ # skip tests for wheels (save 50% of the archive)\n \"buildbot.test\",\n \"buildbot.test.util\",\n \"buildbot.test.fake\",\n \"buildbot.test.fuzz\",\n \"buildbot.test.integration\",\n \"buildbot.test.regressions\",\n \"buildbot.test.unit\",\n ]),\n 'data_files': [\n (\"buildbot\", [\n \"buildbot/buildbot.png\",\n ]),\n include(\"buildbot/reporters/templates\", \"*.txt\"),\n (\"buildbot/db/migrate\", [\n \"buildbot/db/migrate/migrate.cfg\",\n ]),\n include(\"buildbot/db/migrate/versions\", \"*.py\"),\n (\"buildbot/scripts\", [\n \"buildbot/scripts/sample.cfg\",\n \"buildbot/scripts/buildbot_tac.tmpl\",\n ]),\n include(\"buildbot/spec\", \"*.raml\"),\n include(\"buildbot/spec/types\", \"*.raml\"),\n include(\"buildbot/test/unit/test_templates_dir\", \"*.html\"),\n include(\"buildbot/test/unit/test_templates_dir/plugin\", \"*.*\"),\n ] + include_statics(\"buildbot/www/static\"),\n 'cmdclass': {'install_data': install_data_twisted,\n 'sdist': our_sdist},\n 'entry_points': concat_dicts(define_plugin_entries([\n ('buildbot.changes', [\n ('buildbot.changes.mail', [\n 'MaildirSource', 'CVSMaildirSource',\n 'SVNCommitEmailMaildirSource',\n 'BzrLaunchpadEmailMaildirSource']),\n ('buildbot.changes.bitbucket', ['BitbucketPullrequestPoller']),\n ('buildbot.changes.github', ['GitHubPullrequestPoller']),\n ('buildbot.changes.bonsaipoller', ['BonsaiPoller']),\n ('buildbot.changes.gerritchangesource', ['GerritChangeSource']),\n ('buildbot.changes.gitpoller', ['GitPoller']),\n ('buildbot.changes.hgpoller', ['HgPoller']),\n ('buildbot.changes.p4poller', ['P4Source']),\n ('buildbot.changes.pb', ['PBChangeSource']),\n ('buildbot.changes.svnpoller', ['SVNPoller'])\n ]),\n ('buildbot.schedulers', [\n ('buildbot.schedulers.basic', [\n 'SingleBranchScheduler', 'AnyBranchScheduler']),\n ('buildbot.schedulers.dependent', ['Dependent']),\n ('buildbot.schedulers.triggerable', ['Triggerable']),\n ('buildbot.schedulers.forcesched', ['ForceScheduler']),\n ('buildbot.schedulers.timed', [\n 'Periodic', 'Nightly', 'NightlyTriggerable']),\n ('buildbot.schedulers.trysched', [\n 'Try_Jobdir', 'Try_Userpass'])\n ]),\n ('buildbot.worker', [\n ('buildbot.worker.base', ['Worker']),\n ('buildbot.worker.ec2', ['EC2LatentWorker']),\n ('buildbot.worker.libvirt', ['LibVirtWorker']),\n ('buildbot.worker.openstack', ['OpenStackLatentWorker']),\n ('buildbot.worker.docker', ['DockerLatentWorker']),\n ('buildbot.worker.hyper', ['HyperLatentWorker']),\n ('buildbot.worker.local', ['LocalWorker']),\n ]),\n ('buildbot.steps', [\n ('buildbot.process.buildstep', ['BuildStep']),\n ('buildbot.steps.cmake', ['CMake']),\n ('buildbot.steps.cppcheck', ['Cppcheck']),\n ('buildbot.steps.http', [\n 'HTTPStep', 'POST', 'GET', 'PUT', 'DELETE', 'HEAD',\n 'OPTIONS']),\n ('buildbot.steps.master', [\n 'MasterShellCommand', 'SetProperty', 'SetProperties', 'LogRenderable']),\n ('buildbot.steps.maxq', ['MaxQ']),\n ('buildbot.steps.mswin', ['Robocopy']),\n ('buildbot.steps.mtrlogobserver', ['MTR']),\n ('buildbot.steps.package.deb.lintian', ['DebLintian']),\n ('buildbot.steps.package.deb.pbuilder', [\n 'DebPbuilder', 'DebCowbuilder', 'UbuPbuilder',\n 'UbuCowbuilder']),\n ('buildbot.steps.package.rpm.mock', [\n 'Mock', 'MockBuildSRPM', 'MockRebuild']),\n ('buildbot.steps.package.rpm.rpmbuild', ['RpmBuild']),\n ('buildbot.steps.package.rpm.rpmlint', ['RpmLint']),\n ('buildbot.steps.package.rpm.rpmspec', ['RpmSpec']),\n ('buildbot.steps.python', [\n 'BuildEPYDoc', 'PyFlakes', 'PyLint', 'Sphinx']),\n ('buildbot.steps.python_twisted', [\n 'HLint', 'Trial', 'RemovePYCs']),\n ('buildbot.steps.shell', [\n 'ShellCommand', 'TreeSize', 'SetPropertyFromCommand',\n 'Configure', 'WarningCountingShellCommand', 'Compile',\n 'Test', 'PerlModuleTest']),\n ('buildbot.steps.shellsequence', ['ShellSequence']),\n ('buildbot.steps.source.bzr', ['Bzr']),\n ('buildbot.steps.source.cvs', ['CVS']),\n ('buildbot.steps.source.darcs', ['Darcs']),\n ('buildbot.steps.source.gerrit', ['Gerrit']),\n ('buildbot.steps.source.git', ['Git']),\n ('buildbot.steps.source.github', ['GitHub']),\n ('buildbot.steps.source.mercurial', ['Mercurial']),\n ('buildbot.steps.source.mtn', ['Monotone']),\n ('buildbot.steps.source.p4', ['P4']),\n ('buildbot.steps.source.repo', ['Repo']),\n ('buildbot.steps.source.svn', ['SVN']),\n ('buildbot.steps.subunit', ['SubunitShellCommand']),\n ('buildbot.steps.transfer', [\n 'FileUpload', 'DirectoryUpload', 'MultipleFileUpload',\n 'FileDownload', 'StringDownload', 'JSONStringDownload',\n 'JSONPropertiesDownload']),\n ('buildbot.steps.trigger', ['Trigger']),\n ('buildbot.steps.vstudio', [\n 'VC6', 'VC7', 'VS2003', 'VC8', 'VS2005', 'VCExpress9', 'VC9',\n 'VS2008', 'VC10', 'VS2010', 'VC11', 'VS2012', 'VC12', 'VS2013',\n 'VC14', 'VS2015', 'MsBuild4', 'MsBuild', 'MsBuild12', 'MsBuild14']),\n ('buildbot.steps.worker', [\n 'SetPropertiesFromEnv', 'FileExists', 'CopyDirectory',\n 'RemoveDirectory', 'MakeDirectory']),\n ]),\n ('buildbot.reporters', [\n ('buildbot.reporters.mail', ['MailNotifier']),\n ('buildbot.reporters.message', ['MessageFormatter']),\n ('buildbot.reporters.gerrit', ['GerritStatusPush']),\n ('buildbot.reporters.gerrit_verify_status',\n ['GerritVerifyStatusPush']),\n ('buildbot.reporters.http', ['HttpStatusPush']),\n ('buildbot.reporters.github', ['GitHubStatusPush', 'GitHubCommentPush']),\n ('buildbot.reporters.gitlab', ['GitLabStatusPush']),\n ('buildbot.reporters.stash', ['StashStatusPush']),\n ('buildbot.reporters.bitbucket', ['BitbucketStatusPush']),\n ('buildbot.reporters.irc', ['IRC']),\n ]),\n ('buildbot.util', [\n # Connection seems to be a way too generic name, though\n ('buildbot.worker.libvirt', ['Connection']),\n ('buildbot.changes.filter', ['ChangeFilter']),\n ('buildbot.changes.gerritchangesource', ['GerritChangeFilter']),\n ('buildbot.changes.svnpoller', [\n ('svn.split_file_projects_branches',\n 'split_file_projects_branches'),\n ('svn.split_file_branches', 'split_file_branches'),\n ('svn.split_file_alwaystrunk', 'split_file_alwaystrunk')]),\n ('buildbot.config', ['BuilderConfig']),\n ('buildbot.locks', [\n 'MasterLock',\n 'WorkerLock',\n ]),\n ('buildbot.manhole', [\n 'AuthorizedKeysManhole', 'PasswordManhole', 'TelnetManhole']),\n ('buildbot.process.builder', [\n 'enforceChosenWorker',\n ]),\n ('buildbot.process.factory', [\n 'BuildFactory', 'GNUAutoconf', 'CPAN', 'Distutils', 'Trial',\n 'BasicBuildFactory', 'QuickBuildFactory', 'BasicSVN']),\n ('buildbot.process.logobserver', ['LogLineObserver']),\n ('buildbot.process.properties', [\n 'FlattenList', 'Interpolate', 'Property', 'Transform',\n 'WithProperties', 'renderer']),\n ('buildbot.process.properties', [\n 'CommandlineUserManager']),\n ('buildbot.revlinks', ['RevlinkMatch']),\n ('buildbot.reporters.utils', ['URLForBuild']),\n ('buildbot.schedulers.forcesched', [\n 'AnyPropertyParameter', 'BooleanParameter',\n 'ChoiceStringParameter',\n 'CodebaseParameter', 'FixedParameter', 'InheritBuildParameter',\n 'IntParameter', 'NestedParameter', 'ParameterGroup',\n 'StringParameter', 'TextParameter', 'UserNameParameter',\n 'WorkerChoiceParameter',\n ]),\n ('buildbot.process.results', [\n 'Results', 'SUCCESS', 'WARNINGS', 'FAILURE', 'SKIPPED',\n 'EXCEPTION', 'RETRY', 'CANCELLED']),\n ('buildbot.steps.mtrlogobserver', ['EqConnectionPool']),\n ('buildbot.steps.source.repo', [\n ('repo.DownloadsFromChangeSource',\n 'RepoDownloadsFromChangeSource'),\n ('repo.DownloadsFromProperties',\n 'RepoDownloadsFromProperties')]),\n ('buildbot.steps.shellsequence', ['ShellArg']),\n ('buildbot.www.avatar', ['AvatarGravatar']),\n ('buildbot.www.auth', [\n 'UserPasswordAuth', 'HTPasswdAuth', 'RemoteUserAuth']),\n ('buildbot.www.ldapuserinfo', ['LdapUserInfo']),\n ('buildbot.www.oauth2', [\n 'GoogleAuth', 'GitHubAuth', 'GitLabAuth', 'BitbucketAuth']),\n ('buildbot.db.dbconfig', [\n 'DbConfig']),\n ('buildbot.www.authz', [\n 'Authz', 'fnmatchStrMatcher', 'reStrMatcher']),\n ('buildbot.www.authz.roles', [\n 'RolesFromEmails', 'RolesFromGroups', 'RolesFromOwner', 'RolesFromUsername']),\n ('buildbot.www.authz.endpointmatchers', [\n 'AnyEndpointMatcher', 'StopBuildEndpointMatcher', 'ForceBuildEndpointMatcher',\n 'RebuildBuildEndpointMatcher', 'AnyControlEndpointMatcher', 'EnableSchedulerEndpointMatcher']),\n ])\n ]), {\n 'console_scripts': [\n 'buildbot=buildbot.scripts.runner:run',\n # this will also be shipped on non windows :-(\n 'buildbot_windows_service=buildbot.scripts.windows_service:HandleCommandLine',\n ]}\n )\n}\n\n# set zip_safe to false to force Windows installs to always unpack eggs\n# into directories, which seems to work better --\n# see http://buildbot.net/trac/ticket/907\nif sys.platform == \"win32\":\n setup_args['zip_safe'] = False\n\npy_26 = sys.version_info[0] > 2 or (\n sys.version_info[0] == 2 and sys.version_info[1] >= 6)\nif not py_26:\n raise RuntimeError(\"Buildbot master requires at least Python-2.6\")\n\n# pip<1.4 doesn't have the --pre flag, and will thus attempt to install alpha\n# and beta versions of Buildbot. Prevent that from happening.\nVERSION_MSG = \"\"\"\nThis is a pre-release version of Buildbot, which can only be installed with\npip-1.4 or later Try installing the latest stable version of Buildbot instead:\n pip install buildbot==0.8.12\nSee https://pypi.python.org/pypi/buildbot to verify the current stable version.\n\"\"\"\nif 'a' in version or 'b' in version:\n try:\n pip_dist = pkg_resources.get_distribution('pip')\n except pkg_resources.DistributionNotFound:\n pip_dist = None\n\n if pip_dist:\n if LooseVersion(pip_dist.version) < LooseVersion('1.4'):\n raise RuntimeError(VERSION_MSG)\n\nif sys.version_info[0] >= 3:\n twisted_ver = \">= 17.1.0\"\nelse:\n twisted_ver = \">= 14.0.1\"\nautobahn_ver = \">= 0.16.0\"\ntxaio_ver = \">= 2.2.2\"\n\nbundle_version = version.split(\"-\")[0]\n\n# dependencies\nsetup_args['install_requires'] = [\n 'setuptools >= 8.0',\n 'Twisted ' + twisted_ver,\n 'Jinja2 >= 2.1',\n # required for tests, but Twisted requires this anyway\n 'zope.interface >= 4.1.1',\n # python-future required for py2/3 compatibility\n 'future',\n 'sqlalchemy>=0.8.0',\n 'sqlalchemy-migrate>=0.9',\n 'python-dateutil>=1.5',\n 'txaio ' + txaio_ver,\n 'autobahn ' + autobahn_ver,\n 'PyJWT',\n 'distro'\n]\n\n# Unit test dependencies.\ntest_deps = [\n # http client libraries\n 'treq',\n 'txrequests',\n # pyjade required for custom templates tests\n 'pyjade',\n # boto3 and moto required for running EC2 tests\n 'boto3',\n 'moto',\n # txgithub required to run buildbot.status.github module tests\n 'txgithub',\n 'ramlfications',\n 'mock>=2.0.0',\n]\nif sys.platform != 'win32':\n test_deps += [\n # LZ4 fails to build on Windows:\n # https://github.com/steeve/python-lz4/issues/27\n # lz4 required for log compression tests.\n 'lz4',\n ]\n\nsetup_args['tests_require'] = test_deps\n\nsetup_args['extras_require'] = {\n 'test': [\n 'setuptools_trial',\n 'isort',\n # spellcheck introduced in version 1.4.0\n 'pylint>=1.4.0',\n 'pyenchant',\n 'flake8~=2.6.0',\n ] + test_deps,\n 'bundle': [\n \"buildbot-www=={0}\".format(bundle_version),\n \"buildbot-worker=={0}\".format(bundle_version),\n \"buildbot-waterfall-view=={0}\".format(bundle_version),\n \"buildbot-console-view=={0}\".format(bundle_version),\n ],\n 'tls': [\n 'Twisted[tls] ' + twisted_ver,\n # There are bugs with extras inside extras:\n # <https://github.com/pypa/pip/issues/3516>\n # so we explicitly include Twisted[tls] dependencies.\n 'pyopenssl >= 16.0.0',\n 'service_identity',\n 'idna >= 0.6',\n ],\n 'docs': [\n 'docutils<0.13.0',\n 'sphinx>1.4.0',\n 'sphinxcontrib-blockdiag',\n 'sphinxcontrib-spelling',\n 'pyenchant',\n 'docutils>=0.8',\n 'ramlfications',\n 'sphinx-jinja',\n 'towncrier'\n ],\n}\n\nif '--help-commands' in sys.argv or 'trial' in sys.argv or 'test' in sys.argv:\n setup_args['setup_requires'] = [\n 'setuptools_trial',\n ]\n\nif os.getenv('NO_INSTALL_REQS'):\n setup_args['install_requires'] = None\n setup_args['extras_require'] = None\n\nsetup(**setup_args)\n\n# Local Variables:\n# fill-column: 71\n# End:\n", "path": "master/setup.py" } ]
diff --git a/master/setup.py b/master/setup.py index 05b97e576a8a..eac3ea4c4c8a 100755 --- a/master/setup.py +++ b/master/setup.py @@ -180,6 +180,7 @@ def define_plugin_entries(groups): "buildbot.schedulers", "buildbot.scripts", "buildbot.secrets", + "buildbot.secrets.providers", "buildbot.statistics", "buildbot.statistics.storage_backends", "buildbot.status",
pypi__warehouse-8210
Ordering at /stats is incorrect for TestPyPI The list at https://test.pypi.org/stats/ is incorrect and lists projects with 0 bytes first: <img width="736" alt="Screen Shot 2020-07-01 at 10 06 59 AM" src="https://user-images.githubusercontent.com/294415/86260240-a06f1b00-bb82-11ea-8940-4cc7aced95f6.png"> The 0-byte projects should not be included in that list. --- **Good First Issue**: This issue is good for first time contributors. If you've already contributed to Warehouse, work on [another issue without this label](https://github.com/pypa/warehouse/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+-label%3A%22good+first+issue%22) instead. If there is not a corresponding pull request for this issue, it is up for grabs. For directions for getting set up, see our [Getting Started Guide](https://warehouse.pypa.io/development/getting-started/). If you are working on this issue and have questions, feel free to ask them here, in the [`#pypa-dev` chat channel on Freenode](https://webchat.freenode.net/?channels=%23pypa-dev), or on the [distutils-sig.python.org mailing list](https://mail.python.org/mailman3/lists/distutils-sig.python.org/). **Screenshot Required**: *If your pull request makes a visual change*, include a screenshot of your update. This helps our team give you feedback faster. Ordering at /stats is incorrect for TestPyPI The list at https://test.pypi.org/stats/ is incorrect and lists projects with 0 bytes first: <img width="736" alt="Screen Shot 2020-07-01 at 10 06 59 AM" src="https://user-images.githubusercontent.com/294415/86260240-a06f1b00-bb82-11ea-8940-4cc7aced95f6.png"> The 0-byte projects should not be included in that list. --- **Good First Issue**: This issue is good for first time contributors. If you've already contributed to Warehouse, work on [another issue without this label](https://github.com/pypa/warehouse/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+-label%3A%22good+first+issue%22) instead. If there is not a corresponding pull request for this issue, it is up for grabs. For directions for getting set up, see our [Getting Started Guide](https://warehouse.pypa.io/development/getting-started/). If you are working on this issue and have questions, feel free to ask them here, in the [`#pypa-dev` chat channel on Freenode](https://webchat.freenode.net/?channels=%23pypa-dev), or on the [distutils-sig.python.org mailing list](https://mail.python.org/mailman3/lists/distutils-sig.python.org/). **Screenshot Required**: *If your pull request makes a visual change*, include a screenshot of your update. This helps our team give you feedback faster.
[ { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport collections\n\nimport elasticsearch\n\nfrom pyramid.exceptions import PredicateMismatch\nfrom pyramid.httpexceptions import (\n HTTPBadRequest,\n HTTPException,\n HTTPMovedPermanently,\n HTTPNotFound,\n HTTPSeeOther,\n HTTPServiceUnavailable,\n exception_response,\n)\nfrom pyramid.i18n import make_localizer\nfrom pyramid.interfaces import ITranslationDirectories\nfrom pyramid.renderers import render_to_response\nfrom pyramid.response import Response\nfrom pyramid.view import (\n exception_view_config,\n forbidden_view_config,\n notfound_view_config,\n view_config,\n)\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import aliased, joinedload\nfrom sqlalchemy.sql import exists\nfrom trove_classifiers import classifiers, deprecated_classifiers\n\nfrom warehouse.accounts import REDIRECT_FIELD_NAME\nfrom warehouse.accounts.models import User\nfrom warehouse.cache.http import add_vary, cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.classifiers.models import Classifier\nfrom warehouse.db import DatabaseNotAvailable\nfrom warehouse.forms import SetLocaleForm\nfrom warehouse.i18n import LOCALE_ATTR\nfrom warehouse.metrics import IMetricsService\nfrom warehouse.packaging.models import File, Project, Release, release_classifiers\nfrom warehouse.search.queries import SEARCH_FILTER_ORDER, get_es_query\nfrom warehouse.utils.http import is_safe_url\nfrom warehouse.utils.paginate import ElasticsearchPage, paginate_url_factory\nfrom warehouse.utils.row_counter import RowCount\n\n\n@view_config(context=HTTPException)\n@notfound_view_config(append_slash=HTTPMovedPermanently)\ndef httpexception_view(exc, request):\n # This special case exists for the easter egg that appears on the 404\n # response page. We don't generally allow youtube embeds, but we make an\n # except for this one.\n if isinstance(exc, HTTPNotFound):\n request.find_service(name=\"csp\").merge(\n {\n \"frame-src\": [\"https://www.youtube-nocookie.com\"],\n \"script-src\": [\"https://www.youtube.com\", \"https://s.ytimg.com\"],\n }\n )\n try:\n # Lightweight version of 404 page for `/simple/`\n if isinstance(exc, HTTPNotFound) and request.path.startswith(\"/simple/\"):\n response = Response(body=\"404 Not Found\", content_type=\"text/plain\")\n else:\n response = render_to_response(\n \"{}.html\".format(exc.status_code), {}, request=request\n )\n except LookupError:\n # We don't have a customized template for this error, so we'll just let\n # the default happen instead.\n return exc\n\n # Copy over the important values from our HTTPException to our new response\n # object.\n response.status = exc.status\n response.headers.extend(\n (k, v) for k, v in exc.headers.items() if k not in response.headers\n )\n\n return response\n\n\n@forbidden_view_config()\n@exception_view_config(PredicateMismatch)\ndef forbidden(exc, request, redirect_to=\"accounts.login\"):\n # If the forbidden error is because the user isn't logged in, then we'll\n # redirect them to the log in page.\n if request.authenticated_userid is None:\n url = request.route_url(\n redirect_to, _query={REDIRECT_FIELD_NAME: request.path_qs}\n )\n return HTTPSeeOther(url)\n\n # If we've reached here, then the user is logged in and they are genuinely\n # not allowed to access this page.\n return httpexception_view(exc, request)\n\n\n@forbidden_view_config(path_info=r\"^/_includes/\")\n@exception_view_config(PredicateMismatch, path_info=r\"^/_includes/\")\ndef forbidden_include(exc, request):\n # If the forbidden error is for a client-side-include, just return an empty\n # response instead of redirecting\n return Response(status=403)\n\n\n@view_config(context=DatabaseNotAvailable)\ndef service_unavailable(exc, request):\n return httpexception_view(HTTPServiceUnavailable(), request)\n\n\n@view_config(\n route_name=\"robots.txt\",\n renderer=\"robots.txt\",\n decorator=[\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=6 * 60 * 60, # 6 hours\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef robotstxt(request):\n request.response.content_type = \"text/plain\"\n return {}\n\n\n@view_config(\n route_name=\"opensearch.xml\",\n renderer=\"opensearch.xml\",\n decorator=[\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=6 * 60 * 60, # 6 hours\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef opensearchxml(request):\n request.response.content_type = \"text/xml\"\n return {}\n\n\n@view_config(\n route_name=\"index\",\n renderer=\"index.html\",\n decorator=[\n origin_cache(\n 1 * 60 * 60, # 1 hour\n stale_while_revalidate=10 * 60, # 10 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n keys=[\"all-projects\", \"trending\"],\n )\n ],\n has_translations=True,\n)\ndef index(request):\n project_ids = [\n r[0]\n for r in (\n request.db.query(Project.id)\n .order_by(Project.zscore.desc().nullslast(), func.random())\n .limit(5)\n .all()\n )\n ]\n release_a = aliased(\n Release,\n request.db.query(Release)\n .distinct(Release.project_id)\n .filter(Release.project_id.in_(project_ids))\n .order_by(\n Release.project_id,\n Release.is_prerelease.nullslast(),\n Release._pypi_ordering.desc(),\n )\n .subquery(),\n )\n trending_projects = (\n request.db.query(release_a)\n .options(joinedload(release_a.project))\n .order_by(func.array_idx(project_ids, release_a.project_id))\n .all()\n )\n\n latest_releases = (\n request.db.query(Release)\n .options(joinedload(Release.project))\n .order_by(Release.created.desc())\n .limit(5)\n .all()\n )\n\n counts = dict(\n request.db.query(RowCount.table_name, RowCount.count)\n .filter(\n RowCount.table_name.in_(\n [\n Project.__tablename__,\n Release.__tablename__,\n File.__tablename__,\n User.__tablename__,\n ]\n )\n )\n .all()\n )\n\n return {\n \"latest_releases\": latest_releases,\n \"trending_projects\": trending_projects,\n \"num_projects\": counts.get(Project.__tablename__, 0),\n \"num_releases\": counts.get(Release.__tablename__, 0),\n \"num_files\": counts.get(File.__tablename__, 0),\n \"num_users\": counts.get(User.__tablename__, 0),\n }\n\n\n@view_config(\n route_name=\"locale\",\n request_method=\"GET\",\n request_param=SetLocaleForm.__params__,\n uses_session=True,\n)\ndef locale(request):\n form = SetLocaleForm(**request.GET)\n\n redirect_to = request.referer\n if not is_safe_url(redirect_to, host=request.host):\n redirect_to = request.route_path(\"index\")\n resp = HTTPSeeOther(redirect_to)\n\n if form.validate():\n # Build a localizer for the locale we're about to switch to. This will\n # happen automatically once the cookie is set, but if we want the flash\n # message indicating success to be in the new language as well, we need\n # to do it here.\n tdirs = request.registry.queryUtility(ITranslationDirectories)\n _ = make_localizer(form.locale_id.data, tdirs).translate\n request.session.flash(_(\"Locale updated\"), queue=\"success\")\n resp.set_cookie(LOCALE_ATTR, form.locale_id.data)\n\n return resp\n\n\n@view_config(\n route_name=\"classifiers\", renderer=\"pages/classifiers.html\", has_translations=True\n)\ndef list_classifiers(request):\n return {\"classifiers\": sorted(classifiers)}\n\n\n@view_config(\n route_name=\"search\",\n renderer=\"search/results.html\",\n decorator=[\n origin_cache(\n 1 * 60 * 60, # 1 hour\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n keys=[\"all-projects\"],\n )\n ],\n has_translations=True,\n)\ndef search(request):\n metrics = request.find_service(IMetricsService, context=None)\n\n querystring = request.params.get(\"q\", \"\").replace(\"'\", '\"')\n order = request.params.get(\"o\", \"\")\n classifiers = request.params.getall(\"c\")\n query = get_es_query(request.es, querystring, order, classifiers)\n\n try:\n page_num = int(request.params.get(\"page\", 1))\n except ValueError:\n raise HTTPBadRequest(\"'page' must be an integer.\")\n\n try:\n page = ElasticsearchPage(\n query, page=page_num, url_maker=paginate_url_factory(request)\n )\n except elasticsearch.TransportError:\n metrics.increment(\"warehouse.views.search.error\")\n raise HTTPServiceUnavailable\n\n if page.page_count and page_num > page.page_count:\n raise HTTPNotFound\n\n available_filters = collections.defaultdict(list)\n\n classifiers_q = (\n request.db.query(Classifier)\n .with_entities(Classifier.classifier)\n .filter(\n exists([release_classifiers.c.trove_id]).where(\n release_classifiers.c.trove_id == Classifier.id\n ),\n Classifier.classifier.notin_(deprecated_classifiers.keys()),\n )\n .order_by(Classifier.classifier)\n )\n\n for cls in classifiers_q:\n first, *_ = cls.classifier.split(\" :: \")\n available_filters[first].append(cls.classifier)\n\n def filter_key(item):\n try:\n return 0, SEARCH_FILTER_ORDER.index(item[0]), item[0]\n except ValueError:\n return 1, 0, item[0]\n\n def form_filters_tree(split_list):\n \"\"\"\n Takes a list of lists, each of them containing a filter and\n one of its children.\n Returns a dictionary, each key being a filter and each value being\n the filter's children.\n \"\"\"\n d = {}\n for list_ in split_list:\n current_level = d\n for part in list_:\n if part not in current_level:\n current_level[part] = {}\n current_level = current_level[part]\n return d\n\n def process_available_filters():\n \"\"\"\n Processes available filters and returns a list of dictionaries.\n The value of a key in the dictionary represents its children\n \"\"\"\n sorted_filters = sorted(available_filters.items(), key=filter_key)\n output = []\n for f in sorted_filters:\n classifier_list = f[1]\n split_list = [i.split(\" :: \") for i in classifier_list]\n tree = form_filters_tree(split_list)\n output.append(tree)\n return output\n\n metrics = request.find_service(IMetricsService, context=None)\n metrics.histogram(\"warehouse.views.search.results\", page.item_count)\n\n return {\n \"page\": page,\n \"term\": querystring,\n \"order\": order,\n \"available_filters\": process_available_filters(),\n \"applied_filters\": request.params.getall(\"c\"),\n }\n\n\n@view_config(\n route_name=\"stats\",\n renderer=\"pages/stats.html\",\n decorator=[\n add_vary(\"Accept\"),\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n has_translations=True,\n)\n@view_config(\n route_name=\"stats.json\",\n renderer=\"json\",\n decorator=[\n add_vary(\"Accept\"),\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n accept=\"application/json\",\n)\ndef stats(request):\n total_size = int(request.db.query(func.sum(Project.total_size)).first()[0])\n top_100_packages = (\n request.db.query(Project)\n .with_entities(Project.name, Project.total_size)\n .order_by(Project.total_size.desc())\n .limit(100)\n .all()\n )\n # Move top packages into a dict to make JSON more self describing\n top_packages = {\n pkg_name: {\"size\": int(pkg_bytes) if pkg_bytes is not None else 0}\n for pkg_name, pkg_bytes in top_100_packages\n }\n\n return {\"total_packages_size\": total_size, \"top_packages\": top_packages}\n\n\n@view_config(\n route_name=\"includes.current-user-indicator\",\n renderer=\"includes/current-user-indicator.html\",\n uses_session=True,\n has_translations=True,\n)\ndef current_user_indicator(request):\n return {}\n\n\n@view_config(\n route_name=\"includes.flash-messages\",\n renderer=\"includes/flash-messages.html\",\n uses_session=True,\n has_translations=True,\n)\ndef flash_messages(request):\n return {}\n\n\n@view_config(\n route_name=\"includes.session-notifications\",\n renderer=\"includes/session-notifications.html\",\n uses_session=True,\n has_translations=True,\n)\ndef session_notifications(request):\n return {}\n\n\n@view_config(route_name=\"health\", renderer=\"string\")\ndef health(request):\n # This will ensure that we can access the database and run queries against\n # it without doing anything that will take a lock or block other queries.\n request.db.execute(\"SELECT 1\")\n\n # Nothing will actually check this, but it's a little nicer to have\n # something to return besides an empty body.\n return \"OK\"\n\n\n@view_config(route_name=\"force-status\")\ndef force_status(request):\n try:\n raise exception_response(int(request.matchdict[\"status\"]))\n except KeyError:\n raise exception_response(404) from None\n", "path": "warehouse/views.py" } ]
[ { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport collections\n\nimport elasticsearch\n\nfrom pyramid.exceptions import PredicateMismatch\nfrom pyramid.httpexceptions import (\n HTTPBadRequest,\n HTTPException,\n HTTPMovedPermanently,\n HTTPNotFound,\n HTTPSeeOther,\n HTTPServiceUnavailable,\n exception_response,\n)\nfrom pyramid.i18n import make_localizer\nfrom pyramid.interfaces import ITranslationDirectories\nfrom pyramid.renderers import render_to_response\nfrom pyramid.response import Response\nfrom pyramid.view import (\n exception_view_config,\n forbidden_view_config,\n notfound_view_config,\n view_config,\n)\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import aliased, joinedload\nfrom sqlalchemy.sql import exists\nfrom trove_classifiers import classifiers, deprecated_classifiers\n\nfrom warehouse.accounts import REDIRECT_FIELD_NAME\nfrom warehouse.accounts.models import User\nfrom warehouse.cache.http import add_vary, cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.classifiers.models import Classifier\nfrom warehouse.db import DatabaseNotAvailable\nfrom warehouse.forms import SetLocaleForm\nfrom warehouse.i18n import LOCALE_ATTR\nfrom warehouse.metrics import IMetricsService\nfrom warehouse.packaging.models import File, Project, Release, release_classifiers\nfrom warehouse.search.queries import SEARCH_FILTER_ORDER, get_es_query\nfrom warehouse.utils.http import is_safe_url\nfrom warehouse.utils.paginate import ElasticsearchPage, paginate_url_factory\nfrom warehouse.utils.row_counter import RowCount\n\n\n@view_config(context=HTTPException)\n@notfound_view_config(append_slash=HTTPMovedPermanently)\ndef httpexception_view(exc, request):\n # This special case exists for the easter egg that appears on the 404\n # response page. We don't generally allow youtube embeds, but we make an\n # except for this one.\n if isinstance(exc, HTTPNotFound):\n request.find_service(name=\"csp\").merge(\n {\n \"frame-src\": [\"https://www.youtube-nocookie.com\"],\n \"script-src\": [\"https://www.youtube.com\", \"https://s.ytimg.com\"],\n }\n )\n try:\n # Lightweight version of 404 page for `/simple/`\n if isinstance(exc, HTTPNotFound) and request.path.startswith(\"/simple/\"):\n response = Response(body=\"404 Not Found\", content_type=\"text/plain\")\n else:\n response = render_to_response(\n \"{}.html\".format(exc.status_code), {}, request=request\n )\n except LookupError:\n # We don't have a customized template for this error, so we'll just let\n # the default happen instead.\n return exc\n\n # Copy over the important values from our HTTPException to our new response\n # object.\n response.status = exc.status\n response.headers.extend(\n (k, v) for k, v in exc.headers.items() if k not in response.headers\n )\n\n return response\n\n\n@forbidden_view_config()\n@exception_view_config(PredicateMismatch)\ndef forbidden(exc, request, redirect_to=\"accounts.login\"):\n # If the forbidden error is because the user isn't logged in, then we'll\n # redirect them to the log in page.\n if request.authenticated_userid is None:\n url = request.route_url(\n redirect_to, _query={REDIRECT_FIELD_NAME: request.path_qs}\n )\n return HTTPSeeOther(url)\n\n # If we've reached here, then the user is logged in and they are genuinely\n # not allowed to access this page.\n return httpexception_view(exc, request)\n\n\n@forbidden_view_config(path_info=r\"^/_includes/\")\n@exception_view_config(PredicateMismatch, path_info=r\"^/_includes/\")\ndef forbidden_include(exc, request):\n # If the forbidden error is for a client-side-include, just return an empty\n # response instead of redirecting\n return Response(status=403)\n\n\n@view_config(context=DatabaseNotAvailable)\ndef service_unavailable(exc, request):\n return httpexception_view(HTTPServiceUnavailable(), request)\n\n\n@view_config(\n route_name=\"robots.txt\",\n renderer=\"robots.txt\",\n decorator=[\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=6 * 60 * 60, # 6 hours\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef robotstxt(request):\n request.response.content_type = \"text/plain\"\n return {}\n\n\n@view_config(\n route_name=\"opensearch.xml\",\n renderer=\"opensearch.xml\",\n decorator=[\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=6 * 60 * 60, # 6 hours\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef opensearchxml(request):\n request.response.content_type = \"text/xml\"\n return {}\n\n\n@view_config(\n route_name=\"index\",\n renderer=\"index.html\",\n decorator=[\n origin_cache(\n 1 * 60 * 60, # 1 hour\n stale_while_revalidate=10 * 60, # 10 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n keys=[\"all-projects\", \"trending\"],\n )\n ],\n has_translations=True,\n)\ndef index(request):\n project_ids = [\n r[0]\n for r in (\n request.db.query(Project.id)\n .order_by(Project.zscore.desc().nullslast(), func.random())\n .limit(5)\n .all()\n )\n ]\n release_a = aliased(\n Release,\n request.db.query(Release)\n .distinct(Release.project_id)\n .filter(Release.project_id.in_(project_ids))\n .order_by(\n Release.project_id,\n Release.is_prerelease.nullslast(),\n Release._pypi_ordering.desc(),\n )\n .subquery(),\n )\n trending_projects = (\n request.db.query(release_a)\n .options(joinedload(release_a.project))\n .order_by(func.array_idx(project_ids, release_a.project_id))\n .all()\n )\n\n latest_releases = (\n request.db.query(Release)\n .options(joinedload(Release.project))\n .order_by(Release.created.desc())\n .limit(5)\n .all()\n )\n\n counts = dict(\n request.db.query(RowCount.table_name, RowCount.count)\n .filter(\n RowCount.table_name.in_(\n [\n Project.__tablename__,\n Release.__tablename__,\n File.__tablename__,\n User.__tablename__,\n ]\n )\n )\n .all()\n )\n\n return {\n \"latest_releases\": latest_releases,\n \"trending_projects\": trending_projects,\n \"num_projects\": counts.get(Project.__tablename__, 0),\n \"num_releases\": counts.get(Release.__tablename__, 0),\n \"num_files\": counts.get(File.__tablename__, 0),\n \"num_users\": counts.get(User.__tablename__, 0),\n }\n\n\n@view_config(\n route_name=\"locale\",\n request_method=\"GET\",\n request_param=SetLocaleForm.__params__,\n uses_session=True,\n)\ndef locale(request):\n form = SetLocaleForm(**request.GET)\n\n redirect_to = request.referer\n if not is_safe_url(redirect_to, host=request.host):\n redirect_to = request.route_path(\"index\")\n resp = HTTPSeeOther(redirect_to)\n\n if form.validate():\n # Build a localizer for the locale we're about to switch to. This will\n # happen automatically once the cookie is set, but if we want the flash\n # message indicating success to be in the new language as well, we need\n # to do it here.\n tdirs = request.registry.queryUtility(ITranslationDirectories)\n _ = make_localizer(form.locale_id.data, tdirs).translate\n request.session.flash(_(\"Locale updated\"), queue=\"success\")\n resp.set_cookie(LOCALE_ATTR, form.locale_id.data)\n\n return resp\n\n\n@view_config(\n route_name=\"classifiers\", renderer=\"pages/classifiers.html\", has_translations=True\n)\ndef list_classifiers(request):\n return {\"classifiers\": sorted(classifiers)}\n\n\n@view_config(\n route_name=\"search\",\n renderer=\"search/results.html\",\n decorator=[\n origin_cache(\n 1 * 60 * 60, # 1 hour\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n keys=[\"all-projects\"],\n )\n ],\n has_translations=True,\n)\ndef search(request):\n metrics = request.find_service(IMetricsService, context=None)\n\n querystring = request.params.get(\"q\", \"\").replace(\"'\", '\"')\n order = request.params.get(\"o\", \"\")\n classifiers = request.params.getall(\"c\")\n query = get_es_query(request.es, querystring, order, classifiers)\n\n try:\n page_num = int(request.params.get(\"page\", 1))\n except ValueError:\n raise HTTPBadRequest(\"'page' must be an integer.\")\n\n try:\n page = ElasticsearchPage(\n query, page=page_num, url_maker=paginate_url_factory(request)\n )\n except elasticsearch.TransportError:\n metrics.increment(\"warehouse.views.search.error\")\n raise HTTPServiceUnavailable\n\n if page.page_count and page_num > page.page_count:\n raise HTTPNotFound\n\n available_filters = collections.defaultdict(list)\n\n classifiers_q = (\n request.db.query(Classifier)\n .with_entities(Classifier.classifier)\n .filter(\n exists([release_classifiers.c.trove_id]).where(\n release_classifiers.c.trove_id == Classifier.id\n ),\n Classifier.classifier.notin_(deprecated_classifiers.keys()),\n )\n .order_by(Classifier.classifier)\n )\n\n for cls in classifiers_q:\n first, *_ = cls.classifier.split(\" :: \")\n available_filters[first].append(cls.classifier)\n\n def filter_key(item):\n try:\n return 0, SEARCH_FILTER_ORDER.index(item[0]), item[0]\n except ValueError:\n return 1, 0, item[0]\n\n def form_filters_tree(split_list):\n \"\"\"\n Takes a list of lists, each of them containing a filter and\n one of its children.\n Returns a dictionary, each key being a filter and each value being\n the filter's children.\n \"\"\"\n d = {}\n for list_ in split_list:\n current_level = d\n for part in list_:\n if part not in current_level:\n current_level[part] = {}\n current_level = current_level[part]\n return d\n\n def process_available_filters():\n \"\"\"\n Processes available filters and returns a list of dictionaries.\n The value of a key in the dictionary represents its children\n \"\"\"\n sorted_filters = sorted(available_filters.items(), key=filter_key)\n output = []\n for f in sorted_filters:\n classifier_list = f[1]\n split_list = [i.split(\" :: \") for i in classifier_list]\n tree = form_filters_tree(split_list)\n output.append(tree)\n return output\n\n metrics = request.find_service(IMetricsService, context=None)\n metrics.histogram(\"warehouse.views.search.results\", page.item_count)\n\n return {\n \"page\": page,\n \"term\": querystring,\n \"order\": order,\n \"available_filters\": process_available_filters(),\n \"applied_filters\": request.params.getall(\"c\"),\n }\n\n\n@view_config(\n route_name=\"stats\",\n renderer=\"pages/stats.html\",\n decorator=[\n add_vary(\"Accept\"),\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n has_translations=True,\n)\n@view_config(\n route_name=\"stats.json\",\n renderer=\"json\",\n decorator=[\n add_vary(\"Accept\"),\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n accept=\"application/json\",\n)\ndef stats(request):\n total_size = int(request.db.query(func.sum(Project.total_size)).first()[0])\n top_100_packages = (\n request.db.query(Project)\n .with_entities(Project.name, Project.total_size)\n .order_by(Project.total_size.desc().nullslast())\n .limit(100)\n .all()\n )\n # Move top packages into a dict to make JSON more self describing\n top_packages = {\n pkg_name: {\"size\": int(pkg_bytes) if pkg_bytes is not None else 0}\n for pkg_name, pkg_bytes in top_100_packages\n }\n\n return {\"total_packages_size\": total_size, \"top_packages\": top_packages}\n\n\n@view_config(\n route_name=\"includes.current-user-indicator\",\n renderer=\"includes/current-user-indicator.html\",\n uses_session=True,\n has_translations=True,\n)\ndef current_user_indicator(request):\n return {}\n\n\n@view_config(\n route_name=\"includes.flash-messages\",\n renderer=\"includes/flash-messages.html\",\n uses_session=True,\n has_translations=True,\n)\ndef flash_messages(request):\n return {}\n\n\n@view_config(\n route_name=\"includes.session-notifications\",\n renderer=\"includes/session-notifications.html\",\n uses_session=True,\n has_translations=True,\n)\ndef session_notifications(request):\n return {}\n\n\n@view_config(route_name=\"health\", renderer=\"string\")\ndef health(request):\n # This will ensure that we can access the database and run queries against\n # it without doing anything that will take a lock or block other queries.\n request.db.execute(\"SELECT 1\")\n\n # Nothing will actually check this, but it's a little nicer to have\n # something to return besides an empty body.\n return \"OK\"\n\n\n@view_config(route_name=\"force-status\")\ndef force_status(request):\n try:\n raise exception_response(int(request.matchdict[\"status\"]))\n except KeyError:\n raise exception_response(404) from None\n", "path": "warehouse/views.py" } ]
diff --git a/warehouse/views.py b/warehouse/views.py index bcd126a68a8f..9ea8e60252d3 100644 --- a/warehouse/views.py +++ b/warehouse/views.py @@ -399,7 +399,7 @@ def stats(request): top_100_packages = ( request.db.query(Project) .with_entities(Project.name, Project.total_size) - .order_by(Project.total_size.desc()) + .order_by(Project.total_size.desc().nullslast()) .limit(100) .all() )
learningequality__kolibri-7702
Subquery in `FaclityViewset` `last_synced` annotation can return multiple results <!-- Instructions: * Fill out the sections below, replace …'s with information about your issue * Use the 'preview' function above this text box to verify formatting before submitting --> ### Observed behavior This subquery from the `TransferSession` table can return multiple results, which causes the `/api/auth/facility` endpoint to break https://github.com/learningequality/kolibri/blob/release-v0.14.x/kolibri/core/auth/api.py#L359 ### Expected behavior The `/api/auth/facility` endpoint doesn't break because of this query. The subquery should be limited to a returning a single value. ### User-facing consequences <!-- Implications and real-world consequences for learners, coaches, admins, and other users of the application --> … ### Errors and logs <!-- Relevant logs from: * the command line * ~/.kolibri/logs/kolibri.txt * the browser console Please wrap errors in triple backticks for clean formatting like this: ``` 01:10 info: something happened 01:12 error: something bad happened ``` --> … ### Steps to reproduce <!-- Precise steps that someone else can follow in order to see this behavior --> … ### Context <!-- Tell us about your environment, including: * Kolibri version * Operating system * Browser --> …
[ { "content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport time\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom itertools import groupby\nfrom uuid import uuid4\n\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth import login\nfrom django.contrib.auth import logout\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import transaction\nfrom django.db.models import Func\nfrom django.db.models import OuterRef\nfrom django.db.models import Q\nfrom django.db.models import Subquery\nfrom django.db.models import TextField\nfrom django.db.models import Value\nfrom django.db.models.functions import Cast\nfrom django.db.models.query import F\nfrom django.http import Http404\nfrom django.utils.decorators import method_decorator\nfrom django.utils.timezone import now\nfrom django.views.decorators.csrf import ensure_csrf_cookie\nfrom django_filters.rest_framework import CharFilter\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom django_filters.rest_framework import FilterSet\nfrom django_filters.rest_framework import ModelChoiceFilter\nfrom morango.models import TransferSession\nfrom rest_framework import filters\nfrom rest_framework import permissions\nfrom rest_framework import status\nfrom rest_framework import views\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\n\nfrom .constants import collection_kinds\nfrom .constants import role_kinds\nfrom .filters import HierarchyRelationsFilter\nfrom .models import Classroom\nfrom .models import Collection\nfrom .models import Facility\nfrom .models import FacilityDataset\nfrom .models import FacilityUser\nfrom .models import LearnerGroup\nfrom .models import Membership\nfrom .models import Role\nfrom .serializers import ClassroomSerializer\nfrom .serializers import FacilityDatasetSerializer\nfrom .serializers import FacilitySerializer\nfrom .serializers import FacilityUserSerializer\nfrom .serializers import LearnerGroupSerializer\nfrom .serializers import MembershipSerializer\nfrom .serializers import PublicFacilitySerializer\nfrom .serializers import RoleSerializer\nfrom kolibri.core import error_constants\nfrom kolibri.core.api import ValuesViewset\nfrom kolibri.core.device.utils import allow_guest_access\nfrom kolibri.core.device.utils import allow_other_browsers_to_connect\nfrom kolibri.core.device.utils import valid_app_key_on_request\nfrom kolibri.core.logger.models import UserSessionLog\nfrom kolibri.core.mixins import BulkCreateMixin\nfrom kolibri.core.mixins import BulkDeleteMixin\nfrom kolibri.core.query import annotate_array_aggregate\nfrom kolibri.core.query import SQCount\nfrom kolibri.plugins.app.utils import interface\n\n\nclass KolibriAuthPermissionsFilter(filters.BaseFilterBackend):\n \"\"\"\n A Django REST Framework filter backend that limits results to those where the\n requesting user has read object level permissions. This filtering is delegated\n to the ``filter_readable`` method on ``KolibriAbstractBaseUser``.\n \"\"\"\n\n def filter_queryset(self, request, queryset, view):\n if request.method == \"GET\" and request.resolver_match.url_name.endswith(\n \"-list\"\n ):\n # only filter down the queryset in the case of the list view being requested\n return request.user.filter_readable(queryset)\n else:\n # otherwise, return the full queryset, as permission checks will happen object-by-object\n # (and filtering here then leads to 404's instead of the more correct 403's)\n return queryset\n\n\ndef _ensure_raw_dict(d):\n if hasattr(d, \"dict\"):\n d = d.dict()\n return dict(d)\n\n\nclass KolibriAuthPermissions(permissions.BasePermission):\n \"\"\"\n A Django REST Framework permissions class that defers to Kolibri's permissions\n system to determine object-level permissions.\n \"\"\"\n\n def validator(self, request, view, datum):\n model = view.get_serializer_class().Meta.model\n validated_data = view.get_serializer().to_internal_value(\n _ensure_raw_dict(datum)\n )\n return request.user.can_create(model, validated_data)\n\n def has_permission(self, request, view):\n\n # as `has_object_permission` isn't called for POST/create, we need to check here\n if request.method == \"POST\" and request.data:\n if type(request.data) is list:\n data = request.data\n else:\n data = [request.data]\n\n return all(self.validator(request, view, datum) for datum in data)\n\n # for other methods, we return True, as their permissions get checked below\n return True\n\n def has_object_permission(self, request, view, obj):\n # note that there is no entry for POST here, as creation is handled by `has_permission`, above\n if request.method in permissions.SAFE_METHODS: # 'GET', 'OPTIONS' or 'HEAD'\n return request.user.can_read(obj)\n elif request.method in [\"PUT\", \"PATCH\"]:\n return request.user.can_update(obj)\n elif request.method == \"DELETE\":\n return request.user.can_delete(obj)\n else:\n return False\n\n\nclass FacilityDatasetViewSet(ValuesViewset):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter,)\n serializer_class = FacilityDatasetSerializer\n\n values = (\n \"id\",\n \"learner_can_edit_username\",\n \"learner_can_edit_name\",\n \"learner_can_edit_password\",\n \"learner_can_sign_up\",\n \"learner_can_delete_account\",\n \"learner_can_login_with_no_password\",\n \"show_download_button_in_learn\",\n \"description\",\n \"location\",\n \"registered\",\n \"preset\",\n )\n\n field_map = {\"allow_guest_access\": lambda x: allow_guest_access()}\n\n def get_queryset(self):\n queryset = FacilityDataset.objects.filter(\n collection__kind=collection_kinds.FACILITY\n )\n facility_id = self.request.query_params.get(\"facility_id\", None)\n if facility_id is not None:\n queryset = queryset.filter(collection__id=facility_id)\n return queryset\n\n\nclass FacilityUserFilter(FilterSet):\n\n member_of = ModelChoiceFilter(\n method=\"filter_member_of\", queryset=Collection.objects.all()\n )\n\n def filter_member_of(self, queryset, name, value):\n return queryset.filter(Q(memberships__collection=value) | Q(facility=value))\n\n class Meta:\n model = FacilityUser\n fields = [\"member_of\"]\n\n\nclass FacilityUserViewSet(ValuesViewset):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = FacilityUser.objects.all()\n serializer_class = FacilityUserSerializer\n filter_class = FacilityUserFilter\n\n values = (\n \"id\",\n \"username\",\n \"full_name\",\n \"facility\",\n \"roles__kind\",\n \"roles__collection\",\n \"roles__id\",\n \"devicepermissions__is_superuser\",\n \"id_number\",\n \"gender\",\n \"birth_year\",\n )\n\n field_map = {\n \"is_superuser\": lambda x: bool(x.pop(\"devicepermissions__is_superuser\"))\n }\n\n def consolidate(self, items, queryset):\n output = []\n items = sorted(items, key=lambda x: x[\"id\"])\n for key, group in groupby(items, lambda x: x[\"id\"]):\n roles = []\n for item in group:\n role = {\n \"collection\": item.pop(\"roles__collection\"),\n \"kind\": item.pop(\"roles__kind\"),\n \"id\": item.pop(\"roles__id\"),\n }\n if role[\"collection\"]:\n # Our values call will return null for users with no assigned roles\n # So filter them here.\n roles.append(role)\n item[\"roles\"] = roles\n output.append(item)\n return output\n\n def set_password_if_needed(self, instance, serializer):\n with transaction.atomic():\n if serializer.validated_data.get(\"password\", \"\"):\n if serializer.validated_data.get(\"password\", \"\") != \"NOT_SPECIFIED\":\n instance.set_password(serializer.validated_data[\"password\"])\n instance.save()\n return instance\n\n def perform_update(self, serializer):\n instance = serializer.save()\n self.set_password_if_needed(instance, serializer)\n # if the user is updating their own password, ensure they don't get logged out\n if self.request.user == instance:\n update_session_auth_hash(self.request, instance)\n\n def perform_create(self, serializer):\n instance = serializer.save()\n self.set_password_if_needed(instance, serializer)\n\n\nclass FacilityUsernameViewSet(ValuesViewset):\n filter_backends = (DjangoFilterBackend, filters.SearchFilter)\n filter_fields = (\"facility\",)\n search_fields = (\"^username\",)\n\n read_only = True\n\n values = (\"username\",)\n\n def get_queryset(self):\n if valid_app_key_on_request(self.request):\n # Special case for app context to return usernames for\n # the list display\n return FacilityUser.objects.all()\n return FacilityUser.objects.filter(\n dataset__learner_can_login_with_no_password=True, roles=None\n ).filter(\n Q(devicepermissions__is_superuser=False) | Q(devicepermissions__isnull=True)\n )\n\n\nclass MembershipFilter(FilterSet):\n user_ids = CharFilter(method=\"filter_user_ids\")\n\n def filter_user_ids(self, queryset, name, value):\n return queryset.filter(user_id__in=value.split(\",\"))\n\n class Meta:\n model = Membership\n fields = [\"user\", \"collection\", \"user_ids\"]\n\n\nclass MembershipViewSet(BulkDeleteMixin, BulkCreateMixin, viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = Membership.objects.all()\n serializer_class = MembershipSerializer\n filter_class = MembershipFilter\n filter_fields = [\"user\", \"collection\", \"user_ids\"]\n\n\nclass RoleFilter(FilterSet):\n user_ids = CharFilter(method=\"filter_user_ids\")\n\n def filter_user_ids(self, queryset, name, value):\n return queryset.filter(user_id__in=value.split(\",\"))\n\n class Meta:\n model = Role\n fields = [\"user\", \"collection\", \"kind\", \"user_ids\"]\n\n\nclass RoleViewSet(BulkDeleteMixin, BulkCreateMixin, viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = Role.objects.all()\n serializer_class = RoleSerializer\n filter_class = RoleFilter\n filter_fields = [\"user\", \"collection\", \"kind\", \"user_ids\"]\n\n\nclass FacilityViewSet(ValuesViewset):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter,)\n queryset = Facility.objects.all()\n serializer_class = FacilitySerializer\n\n facility_values = [\"id\", \"name\", \"num_classrooms\", \"num_users\", \"last_synced\"]\n\n dataset_keys = [\n \"dataset__id\",\n \"dataset__learner_can_edit_username\",\n \"dataset__learner_can_edit_name\",\n \"dataset__learner_can_edit_password\",\n \"dataset__learner_can_sign_up\",\n \"dataset__learner_can_delete_account\",\n \"dataset__learner_can_login_with_no_password\",\n \"dataset__show_download_button_in_learn\",\n \"dataset__description\",\n \"dataset__location\",\n \"dataset__registered\",\n \"dataset__preset\",\n ]\n\n values = tuple(facility_values + dataset_keys)\n\n # map function to pop() all of the dataset__ items into an dict\n # then assign that new dict to the `dataset` key of the facility\n def _map_dataset(facility, dataset_keys=dataset_keys):\n dataset = {}\n for dataset_key in dataset_keys:\n stripped_key = dataset_key.replace(\"dataset__\", \"\")\n dataset[stripped_key] = facility.pop(dataset_key)\n return dataset\n\n field_map = {\"dataset\": _map_dataset}\n\n def annotate_queryset(self, queryset):\n return (\n queryset.annotate(\n num_users=SQCount(\n FacilityUser.objects.filter(facility=OuterRef(\"id\")), field=\"id\"\n )\n )\n .annotate(\n num_classrooms=SQCount(\n Classroom.objects.filter(parent=OuterRef(\"id\")), field=\"id\"\n )\n )\n .annotate(\n last_synced=Subquery(\n TransferSession.objects.filter(\n filter=Func(\n Cast(OuterRef(\"dataset\"), TextField()),\n Value(\"-\"),\n Value(\"\"),\n function=\"replace\",\n output_field=TextField(),\n )\n )\n .order_by(\"-last_activity_timestamp\")\n .values(\"last_activity_timestamp\")\n )\n )\n )\n\n\nclass PublicFacilityViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = Facility.objects.all()\n serializer_class = PublicFacilitySerializer\n\n\nclass ClassroomFilter(FilterSet):\n\n role = CharFilter(method=\"filter_has_role_for\")\n parent = ModelChoiceFilter(queryset=Facility.objects.all())\n\n def filter_has_role_for(self, queryset, name, value):\n requesting_user = self.request.user\n if requesting_user.is_superuser:\n return queryset\n\n # filter queryset by admin role and coach role\n return HierarchyRelationsFilter(queryset).filter_by_hierarchy(\n source_user=requesting_user,\n role_kind=role_kinds.ADMIN,\n descendant_collection=F(\"id\"),\n ) | HierarchyRelationsFilter(queryset).filter_by_hierarchy(\n source_user=requesting_user, role_kind=value, descendant_collection=F(\"id\")\n )\n\n class Meta:\n model = Classroom\n fields = [\"role\", \"parent\"]\n\n\nclass ClassroomViewSet(ValuesViewset):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = Classroom.objects.all()\n serializer_class = ClassroomSerializer\n filter_class = ClassroomFilter\n\n values = (\n \"id\",\n \"name\",\n \"parent\",\n \"learner_count\",\n \"role__user__id\",\n \"role__user__devicepermissions__is_superuser\",\n \"role__user__full_name\",\n \"role__user__username\",\n )\n\n def annotate_queryset(self, queryset):\n return queryset.annotate(\n learner_count=SQCount(\n FacilityUser.objects.filter(memberships__collection=OuterRef(\"id\")),\n field=\"id\",\n )\n )\n\n def consolidate(self, items, queryset):\n output = []\n items = sorted(items, key=lambda x: x[\"id\"])\n coach_ids = list(\n set(\n [\n item[\"role__user__id\"]\n for item in items\n if item[\"role__user__id\"] is not None\n ]\n )\n )\n facility_roles = {\n obj.pop(\"user\"): obj\n for obj in Role.objects.filter(\n user_id__in=coach_ids, collection__kind=collection_kinds.FACILITY\n ).values(\"user\", \"kind\", \"collection\", \"id\")\n }\n for key, group in groupby(items, lambda x: x[\"id\"]):\n coaches = []\n for item in group:\n user_id = item.pop(\"role__user__id\")\n if (\n user_id in facility_roles\n and facility_roles[user_id][\"collection\"] == item[\"parent\"]\n ):\n roles = [facility_roles[user_id]]\n else:\n roles = []\n coach = {\n \"id\": user_id,\n \"facility\": item[\"parent\"],\n # Coerce to bool if None\n \"is_superuser\": bool(\n item.pop(\"role__user__devicepermissions__is_superuser\")\n ),\n \"full_name\": item.pop(\"role__user__full_name\"),\n \"username\": item.pop(\"role__user__username\"),\n \"roles\": roles,\n }\n if coach[\"id\"]:\n coaches.append(coach)\n item[\"coaches\"] = coaches\n output.append(item)\n return output\n\n\nclass LearnerGroupViewSet(ValuesViewset):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = LearnerGroup.objects.all()\n serializer_class = LearnerGroupSerializer\n\n filter_fields = (\"parent\",)\n\n values = (\"id\", \"name\", \"parent\", \"user_ids\")\n\n def annotate_queryset(self, queryset):\n return annotate_array_aggregate(queryset, user_ids=\"membership__user__id\")\n\n\nclass SignUpViewSet(viewsets.ViewSet):\n\n serializer_class = FacilityUserSerializer\n\n def extract_request_data(self, request):\n return {\n \"username\": request.data.get(\"username\", \"\"),\n \"full_name\": request.data.get(\"full_name\", \"\"),\n \"password\": request.data.get(\"password\", \"\"),\n \"facility\": request.data.get(\n \"facility\", Facility.get_default_facility().id\n ),\n \"gender\": request.data.get(\"gender\", \"\"),\n \"birth_year\": request.data.get(\"birth_year\", \"\"),\n }\n\n def create(self, request):\n\n data = self.extract_request_data(request)\n # we validate the user's input, and if valid, login as user\n serialized_user = self.serializer_class(data=data)\n if serialized_user.is_valid(raise_exception=True):\n serialized_user.save()\n if data[\"password\"] != \"NOT_SPECIFIED\":\n serialized_user.instance.set_password(data[\"password\"])\n serialized_user.instance.save()\n authenticated_user = authenticate(\n username=data[\"username\"],\n password=data[\"password\"],\n facility=data[\"facility\"],\n )\n login(request, authenticated_user)\n return Response(serialized_user.data, status=status.HTTP_201_CREATED)\n\n\nclass SetNonSpecifiedPasswordView(views.APIView):\n def post(self, request):\n username = request.data.get(\"username\", \"\")\n password = request.data.get(\"password\", \"\")\n facility_id = request.data.get(\"facility\", None)\n\n if not username or not password or not facility_id:\n return Response(\n \"Must specify username, password, and facility\",\n status=status.HTTP_400_BAD_REQUEST,\n )\n\n error_message = \"Suitable user does not exist\"\n\n try:\n user = FacilityUser.objects.get(username=username, facility=facility_id)\n except ObjectDoesNotExist:\n raise Http404(error_message)\n\n if user.password != \"NOT_SPECIFIED\":\n raise Http404(error_message)\n\n user.set_password(password)\n user.save()\n\n return Response()\n\n\n@method_decorator(ensure_csrf_cookie, name=\"dispatch\")\nclass SessionViewSet(viewsets.ViewSet):\n def create(self, request):\n username = request.data.get(\"username\", \"\")\n password = request.data.get(\"password\", \"\")\n facility_id = request.data.get(\"facility\", None)\n\n # Only enforce this when running in an app\n if (\n interface.enabled\n and not allow_other_browsers_to_connect()\n and not valid_app_key_on_request(request)\n ):\n return Response(\n [{\"id\": error_constants.INVALID_CREDENTIALS, \"metadata\": {}}],\n status=status.HTTP_401_UNAUTHORIZED,\n )\n\n # Find the FacilityUser we're looking for use later on\n try:\n unauthenticated_user = FacilityUser.objects.get(\n username__iexact=username, facility=facility_id\n )\n except ObjectDoesNotExist:\n unauthenticated_user = None\n\n user = authenticate(username=username, password=password, facility=facility_id)\n if user is not None and user.is_active:\n # Correct password, and the user is marked \"active\"\n login(request, user)\n # Success!\n return self.get_session_response(request)\n elif (\n unauthenticated_user is not None\n and unauthenticated_user.password == \"NOT_SPECIFIED\"\n ):\n # Here - we have a Learner whose password is \"NOT_SPECIFIED\" because they were created\n # while the \"Require learners to log in with password\" setting was disabled - but now\n # it is enabled again.\n return Response(\n [\n {\n \"id\": error_constants.PASSWORD_NOT_SPECIFIED,\n \"metadata\": {\n \"field\": \"password\",\n \"message\": \"Username is valid, but password needs to be set before login.\",\n },\n }\n ],\n status=status.HTTP_400_BAD_REQUEST,\n )\n elif (\n not password\n and FacilityUser.objects.filter(\n username__iexact=username, facility=facility_id\n ).exists()\n ):\n # Password was missing, but username is valid, prompt to give password\n return Response(\n [\n {\n \"id\": error_constants.MISSING_PASSWORD,\n \"metadata\": {\n \"field\": \"password\",\n \"message\": \"Username is valid, but password is missing.\",\n },\n }\n ],\n status=status.HTTP_400_BAD_REQUEST,\n )\n else:\n # Respond with error\n return Response(\n [{\"id\": error_constants.INVALID_CREDENTIALS, \"metadata\": {}}],\n status=status.HTTP_401_UNAUTHORIZED,\n )\n\n def destroy(self, request, pk=None):\n logout(request)\n return Response([])\n\n def retrieve(self, request, pk=None):\n return self.get_session_response(request)\n\n def get_session_response(self, request):\n user = request.user\n session_key = \"current\"\n server_time = now()\n session = user.session_data\n session.update(\n {\n \"id\": session_key,\n \"server_time\": server_time,\n \"app_context\": valid_app_key_on_request(request),\n }\n )\n\n visitor_cookie_expiry = datetime.utcnow() + timedelta(days=365)\n\n if isinstance(user, AnonymousUser):\n response = Response(session)\n if not request.COOKIES.get(\"visitor_id\"):\n visitor_id = str(uuid4().hex)\n response.set_cookie(\n \"visitor_id\", visitor_id, expires=visitor_cookie_expiry\n )\n else:\n response.set_cookie(\n \"visitor_id\",\n request.COOKIES.get(\"visitor_id\"),\n expires=visitor_cookie_expiry,\n )\n return response\n # Set last activity on session to the current time to prevent session timeout\n # Only do this for logged in users, as anonymous users cannot get logged out!\n request.session[\"last_session_request\"] = int(time.time())\n # Default to active, only assume not active when explicitly set.\n active = True if request.GET.get(\"active\", \"true\") == \"true\" else False\n\n # Can only record user session log data for FacilityUsers.\n if active and isinstance(user, FacilityUser):\n UserSessionLog.update_log(user)\n\n response = Response(session)\n return response\n", "path": "kolibri/core/auth/api.py" } ]
[ { "content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport time\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom itertools import groupby\nfrom uuid import uuid4\n\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth import login\nfrom django.contrib.auth import logout\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import transaction\nfrom django.db.models import Func\nfrom django.db.models import OuterRef\nfrom django.db.models import Q\nfrom django.db.models import Subquery\nfrom django.db.models import TextField\nfrom django.db.models import Value\nfrom django.db.models.functions import Cast\nfrom django.db.models.query import F\nfrom django.http import Http404\nfrom django.utils.decorators import method_decorator\nfrom django.utils.timezone import now\nfrom django.views.decorators.csrf import ensure_csrf_cookie\nfrom django_filters.rest_framework import CharFilter\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom django_filters.rest_framework import FilterSet\nfrom django_filters.rest_framework import ModelChoiceFilter\nfrom morango.models import TransferSession\nfrom rest_framework import filters\nfrom rest_framework import permissions\nfrom rest_framework import status\nfrom rest_framework import views\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\n\nfrom .constants import collection_kinds\nfrom .constants import role_kinds\nfrom .filters import HierarchyRelationsFilter\nfrom .models import Classroom\nfrom .models import Collection\nfrom .models import Facility\nfrom .models import FacilityDataset\nfrom .models import FacilityUser\nfrom .models import LearnerGroup\nfrom .models import Membership\nfrom .models import Role\nfrom .serializers import ClassroomSerializer\nfrom .serializers import FacilityDatasetSerializer\nfrom .serializers import FacilitySerializer\nfrom .serializers import FacilityUserSerializer\nfrom .serializers import LearnerGroupSerializer\nfrom .serializers import MembershipSerializer\nfrom .serializers import PublicFacilitySerializer\nfrom .serializers import RoleSerializer\nfrom kolibri.core import error_constants\nfrom kolibri.core.api import ValuesViewset\nfrom kolibri.core.device.utils import allow_guest_access\nfrom kolibri.core.device.utils import allow_other_browsers_to_connect\nfrom kolibri.core.device.utils import valid_app_key_on_request\nfrom kolibri.core.logger.models import UserSessionLog\nfrom kolibri.core.mixins import BulkCreateMixin\nfrom kolibri.core.mixins import BulkDeleteMixin\nfrom kolibri.core.query import annotate_array_aggregate\nfrom kolibri.core.query import SQCount\nfrom kolibri.plugins.app.utils import interface\n\n\nclass KolibriAuthPermissionsFilter(filters.BaseFilterBackend):\n \"\"\"\n A Django REST Framework filter backend that limits results to those where the\n requesting user has read object level permissions. This filtering is delegated\n to the ``filter_readable`` method on ``KolibriAbstractBaseUser``.\n \"\"\"\n\n def filter_queryset(self, request, queryset, view):\n if request.method == \"GET\" and request.resolver_match.url_name.endswith(\n \"-list\"\n ):\n # only filter down the queryset in the case of the list view being requested\n return request.user.filter_readable(queryset)\n else:\n # otherwise, return the full queryset, as permission checks will happen object-by-object\n # (and filtering here then leads to 404's instead of the more correct 403's)\n return queryset\n\n\ndef _ensure_raw_dict(d):\n if hasattr(d, \"dict\"):\n d = d.dict()\n return dict(d)\n\n\nclass KolibriAuthPermissions(permissions.BasePermission):\n \"\"\"\n A Django REST Framework permissions class that defers to Kolibri's permissions\n system to determine object-level permissions.\n \"\"\"\n\n def validator(self, request, view, datum):\n model = view.get_serializer_class().Meta.model\n validated_data = view.get_serializer().to_internal_value(\n _ensure_raw_dict(datum)\n )\n return request.user.can_create(model, validated_data)\n\n def has_permission(self, request, view):\n\n # as `has_object_permission` isn't called for POST/create, we need to check here\n if request.method == \"POST\" and request.data:\n if type(request.data) is list:\n data = request.data\n else:\n data = [request.data]\n\n return all(self.validator(request, view, datum) for datum in data)\n\n # for other methods, we return True, as their permissions get checked below\n return True\n\n def has_object_permission(self, request, view, obj):\n # note that there is no entry for POST here, as creation is handled by `has_permission`, above\n if request.method in permissions.SAFE_METHODS: # 'GET', 'OPTIONS' or 'HEAD'\n return request.user.can_read(obj)\n elif request.method in [\"PUT\", \"PATCH\"]:\n return request.user.can_update(obj)\n elif request.method == \"DELETE\":\n return request.user.can_delete(obj)\n else:\n return False\n\n\nclass FacilityDatasetViewSet(ValuesViewset):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter,)\n serializer_class = FacilityDatasetSerializer\n\n values = (\n \"id\",\n \"learner_can_edit_username\",\n \"learner_can_edit_name\",\n \"learner_can_edit_password\",\n \"learner_can_sign_up\",\n \"learner_can_delete_account\",\n \"learner_can_login_with_no_password\",\n \"show_download_button_in_learn\",\n \"description\",\n \"location\",\n \"registered\",\n \"preset\",\n )\n\n field_map = {\"allow_guest_access\": lambda x: allow_guest_access()}\n\n def get_queryset(self):\n queryset = FacilityDataset.objects.filter(\n collection__kind=collection_kinds.FACILITY\n )\n facility_id = self.request.query_params.get(\"facility_id\", None)\n if facility_id is not None:\n queryset = queryset.filter(collection__id=facility_id)\n return queryset\n\n\nclass FacilityUserFilter(FilterSet):\n\n member_of = ModelChoiceFilter(\n method=\"filter_member_of\", queryset=Collection.objects.all()\n )\n\n def filter_member_of(self, queryset, name, value):\n return queryset.filter(Q(memberships__collection=value) | Q(facility=value))\n\n class Meta:\n model = FacilityUser\n fields = [\"member_of\"]\n\n\nclass FacilityUserViewSet(ValuesViewset):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = FacilityUser.objects.all()\n serializer_class = FacilityUserSerializer\n filter_class = FacilityUserFilter\n\n values = (\n \"id\",\n \"username\",\n \"full_name\",\n \"facility\",\n \"roles__kind\",\n \"roles__collection\",\n \"roles__id\",\n \"devicepermissions__is_superuser\",\n \"id_number\",\n \"gender\",\n \"birth_year\",\n )\n\n field_map = {\n \"is_superuser\": lambda x: bool(x.pop(\"devicepermissions__is_superuser\"))\n }\n\n def consolidate(self, items, queryset):\n output = []\n items = sorted(items, key=lambda x: x[\"id\"])\n for key, group in groupby(items, lambda x: x[\"id\"]):\n roles = []\n for item in group:\n role = {\n \"collection\": item.pop(\"roles__collection\"),\n \"kind\": item.pop(\"roles__kind\"),\n \"id\": item.pop(\"roles__id\"),\n }\n if role[\"collection\"]:\n # Our values call will return null for users with no assigned roles\n # So filter them here.\n roles.append(role)\n item[\"roles\"] = roles\n output.append(item)\n return output\n\n def set_password_if_needed(self, instance, serializer):\n with transaction.atomic():\n if serializer.validated_data.get(\"password\", \"\"):\n if serializer.validated_data.get(\"password\", \"\") != \"NOT_SPECIFIED\":\n instance.set_password(serializer.validated_data[\"password\"])\n instance.save()\n return instance\n\n def perform_update(self, serializer):\n instance = serializer.save()\n self.set_password_if_needed(instance, serializer)\n # if the user is updating their own password, ensure they don't get logged out\n if self.request.user == instance:\n update_session_auth_hash(self.request, instance)\n\n def perform_create(self, serializer):\n instance = serializer.save()\n self.set_password_if_needed(instance, serializer)\n\n\nclass FacilityUsernameViewSet(ValuesViewset):\n filter_backends = (DjangoFilterBackend, filters.SearchFilter)\n filter_fields = (\"facility\",)\n search_fields = (\"^username\",)\n\n read_only = True\n\n values = (\"username\",)\n\n def get_queryset(self):\n if valid_app_key_on_request(self.request):\n # Special case for app context to return usernames for\n # the list display\n return FacilityUser.objects.all()\n return FacilityUser.objects.filter(\n dataset__learner_can_login_with_no_password=True, roles=None\n ).filter(\n Q(devicepermissions__is_superuser=False) | Q(devicepermissions__isnull=True)\n )\n\n\nclass MembershipFilter(FilterSet):\n user_ids = CharFilter(method=\"filter_user_ids\")\n\n def filter_user_ids(self, queryset, name, value):\n return queryset.filter(user_id__in=value.split(\",\"))\n\n class Meta:\n model = Membership\n fields = [\"user\", \"collection\", \"user_ids\"]\n\n\nclass MembershipViewSet(BulkDeleteMixin, BulkCreateMixin, viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = Membership.objects.all()\n serializer_class = MembershipSerializer\n filter_class = MembershipFilter\n filter_fields = [\"user\", \"collection\", \"user_ids\"]\n\n\nclass RoleFilter(FilterSet):\n user_ids = CharFilter(method=\"filter_user_ids\")\n\n def filter_user_ids(self, queryset, name, value):\n return queryset.filter(user_id__in=value.split(\",\"))\n\n class Meta:\n model = Role\n fields = [\"user\", \"collection\", \"kind\", \"user_ids\"]\n\n\nclass RoleViewSet(BulkDeleteMixin, BulkCreateMixin, viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = Role.objects.all()\n serializer_class = RoleSerializer\n filter_class = RoleFilter\n filter_fields = [\"user\", \"collection\", \"kind\", \"user_ids\"]\n\n\nclass FacilityViewSet(ValuesViewset):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter,)\n queryset = Facility.objects.all()\n serializer_class = FacilitySerializer\n\n facility_values = [\"id\", \"name\", \"num_classrooms\", \"num_users\", \"last_synced\"]\n\n dataset_keys = [\n \"dataset__id\",\n \"dataset__learner_can_edit_username\",\n \"dataset__learner_can_edit_name\",\n \"dataset__learner_can_edit_password\",\n \"dataset__learner_can_sign_up\",\n \"dataset__learner_can_delete_account\",\n \"dataset__learner_can_login_with_no_password\",\n \"dataset__show_download_button_in_learn\",\n \"dataset__description\",\n \"dataset__location\",\n \"dataset__registered\",\n \"dataset__preset\",\n ]\n\n values = tuple(facility_values + dataset_keys)\n\n # map function to pop() all of the dataset__ items into an dict\n # then assign that new dict to the `dataset` key of the facility\n def _map_dataset(facility, dataset_keys=dataset_keys):\n dataset = {}\n for dataset_key in dataset_keys:\n stripped_key = dataset_key.replace(\"dataset__\", \"\")\n dataset[stripped_key] = facility.pop(dataset_key)\n return dataset\n\n field_map = {\"dataset\": _map_dataset}\n\n def annotate_queryset(self, queryset):\n return (\n queryset.annotate(\n num_users=SQCount(\n FacilityUser.objects.filter(facility=OuterRef(\"id\")), field=\"id\"\n )\n )\n .annotate(\n num_classrooms=SQCount(\n Classroom.objects.filter(parent=OuterRef(\"id\")), field=\"id\"\n )\n )\n .annotate(\n last_synced=Subquery(\n TransferSession.objects.filter(\n filter=Func(\n Cast(OuterRef(\"dataset\"), TextField()),\n Value(\"-\"),\n Value(\"\"),\n function=\"replace\",\n output_field=TextField(),\n )\n )\n .order_by(\"-last_activity_timestamp\")\n .values(\"last_activity_timestamp\")[:1]\n )\n )\n )\n\n\nclass PublicFacilityViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = Facility.objects.all()\n serializer_class = PublicFacilitySerializer\n\n\nclass ClassroomFilter(FilterSet):\n\n role = CharFilter(method=\"filter_has_role_for\")\n parent = ModelChoiceFilter(queryset=Facility.objects.all())\n\n def filter_has_role_for(self, queryset, name, value):\n requesting_user = self.request.user\n if requesting_user.is_superuser:\n return queryset\n\n # filter queryset by admin role and coach role\n return HierarchyRelationsFilter(queryset).filter_by_hierarchy(\n source_user=requesting_user,\n role_kind=role_kinds.ADMIN,\n descendant_collection=F(\"id\"),\n ) | HierarchyRelationsFilter(queryset).filter_by_hierarchy(\n source_user=requesting_user, role_kind=value, descendant_collection=F(\"id\")\n )\n\n class Meta:\n model = Classroom\n fields = [\"role\", \"parent\"]\n\n\nclass ClassroomViewSet(ValuesViewset):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = Classroom.objects.all()\n serializer_class = ClassroomSerializer\n filter_class = ClassroomFilter\n\n values = (\n \"id\",\n \"name\",\n \"parent\",\n \"learner_count\",\n \"role__user__id\",\n \"role__user__devicepermissions__is_superuser\",\n \"role__user__full_name\",\n \"role__user__username\",\n )\n\n def annotate_queryset(self, queryset):\n return queryset.annotate(\n learner_count=SQCount(\n FacilityUser.objects.filter(memberships__collection=OuterRef(\"id\")),\n field=\"id\",\n )\n )\n\n def consolidate(self, items, queryset):\n output = []\n items = sorted(items, key=lambda x: x[\"id\"])\n coach_ids = list(\n set(\n [\n item[\"role__user__id\"]\n for item in items\n if item[\"role__user__id\"] is not None\n ]\n )\n )\n facility_roles = {\n obj.pop(\"user\"): obj\n for obj in Role.objects.filter(\n user_id__in=coach_ids, collection__kind=collection_kinds.FACILITY\n ).values(\"user\", \"kind\", \"collection\", \"id\")\n }\n for key, group in groupby(items, lambda x: x[\"id\"]):\n coaches = []\n for item in group:\n user_id = item.pop(\"role__user__id\")\n if (\n user_id in facility_roles\n and facility_roles[user_id][\"collection\"] == item[\"parent\"]\n ):\n roles = [facility_roles[user_id]]\n else:\n roles = []\n coach = {\n \"id\": user_id,\n \"facility\": item[\"parent\"],\n # Coerce to bool if None\n \"is_superuser\": bool(\n item.pop(\"role__user__devicepermissions__is_superuser\")\n ),\n \"full_name\": item.pop(\"role__user__full_name\"),\n \"username\": item.pop(\"role__user__username\"),\n \"roles\": roles,\n }\n if coach[\"id\"]:\n coaches.append(coach)\n item[\"coaches\"] = coaches\n output.append(item)\n return output\n\n\nclass LearnerGroupViewSet(ValuesViewset):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = LearnerGroup.objects.all()\n serializer_class = LearnerGroupSerializer\n\n filter_fields = (\"parent\",)\n\n values = (\"id\", \"name\", \"parent\", \"user_ids\")\n\n def annotate_queryset(self, queryset):\n return annotate_array_aggregate(queryset, user_ids=\"membership__user__id\")\n\n\nclass SignUpViewSet(viewsets.ViewSet):\n\n serializer_class = FacilityUserSerializer\n\n def extract_request_data(self, request):\n return {\n \"username\": request.data.get(\"username\", \"\"),\n \"full_name\": request.data.get(\"full_name\", \"\"),\n \"password\": request.data.get(\"password\", \"\"),\n \"facility\": request.data.get(\n \"facility\", Facility.get_default_facility().id\n ),\n \"gender\": request.data.get(\"gender\", \"\"),\n \"birth_year\": request.data.get(\"birth_year\", \"\"),\n }\n\n def create(self, request):\n\n data = self.extract_request_data(request)\n # we validate the user's input, and if valid, login as user\n serialized_user = self.serializer_class(data=data)\n if serialized_user.is_valid(raise_exception=True):\n serialized_user.save()\n if data[\"password\"] != \"NOT_SPECIFIED\":\n serialized_user.instance.set_password(data[\"password\"])\n serialized_user.instance.save()\n authenticated_user = authenticate(\n username=data[\"username\"],\n password=data[\"password\"],\n facility=data[\"facility\"],\n )\n login(request, authenticated_user)\n return Response(serialized_user.data, status=status.HTTP_201_CREATED)\n\n\nclass SetNonSpecifiedPasswordView(views.APIView):\n def post(self, request):\n username = request.data.get(\"username\", \"\")\n password = request.data.get(\"password\", \"\")\n facility_id = request.data.get(\"facility\", None)\n\n if not username or not password or not facility_id:\n return Response(\n \"Must specify username, password, and facility\",\n status=status.HTTP_400_BAD_REQUEST,\n )\n\n error_message = \"Suitable user does not exist\"\n\n try:\n user = FacilityUser.objects.get(username=username, facility=facility_id)\n except ObjectDoesNotExist:\n raise Http404(error_message)\n\n if user.password != \"NOT_SPECIFIED\":\n raise Http404(error_message)\n\n user.set_password(password)\n user.save()\n\n return Response()\n\n\n@method_decorator(ensure_csrf_cookie, name=\"dispatch\")\nclass SessionViewSet(viewsets.ViewSet):\n def create(self, request):\n username = request.data.get(\"username\", \"\")\n password = request.data.get(\"password\", \"\")\n facility_id = request.data.get(\"facility\", None)\n\n # Only enforce this when running in an app\n if (\n interface.enabled\n and not allow_other_browsers_to_connect()\n and not valid_app_key_on_request(request)\n ):\n return Response(\n [{\"id\": error_constants.INVALID_CREDENTIALS, \"metadata\": {}}],\n status=status.HTTP_401_UNAUTHORIZED,\n )\n\n # Find the FacilityUser we're looking for use later on\n try:\n unauthenticated_user = FacilityUser.objects.get(\n username__iexact=username, facility=facility_id\n )\n except ObjectDoesNotExist:\n unauthenticated_user = None\n\n user = authenticate(username=username, password=password, facility=facility_id)\n if user is not None and user.is_active:\n # Correct password, and the user is marked \"active\"\n login(request, user)\n # Success!\n return self.get_session_response(request)\n elif (\n unauthenticated_user is not None\n and unauthenticated_user.password == \"NOT_SPECIFIED\"\n ):\n # Here - we have a Learner whose password is \"NOT_SPECIFIED\" because they were created\n # while the \"Require learners to log in with password\" setting was disabled - but now\n # it is enabled again.\n return Response(\n [\n {\n \"id\": error_constants.PASSWORD_NOT_SPECIFIED,\n \"metadata\": {\n \"field\": \"password\",\n \"message\": \"Username is valid, but password needs to be set before login.\",\n },\n }\n ],\n status=status.HTTP_400_BAD_REQUEST,\n )\n elif (\n not password\n and FacilityUser.objects.filter(\n username__iexact=username, facility=facility_id\n ).exists()\n ):\n # Password was missing, but username is valid, prompt to give password\n return Response(\n [\n {\n \"id\": error_constants.MISSING_PASSWORD,\n \"metadata\": {\n \"field\": \"password\",\n \"message\": \"Username is valid, but password is missing.\",\n },\n }\n ],\n status=status.HTTP_400_BAD_REQUEST,\n )\n else:\n # Respond with error\n return Response(\n [{\"id\": error_constants.INVALID_CREDENTIALS, \"metadata\": {}}],\n status=status.HTTP_401_UNAUTHORIZED,\n )\n\n def destroy(self, request, pk=None):\n logout(request)\n return Response([])\n\n def retrieve(self, request, pk=None):\n return self.get_session_response(request)\n\n def get_session_response(self, request):\n user = request.user\n session_key = \"current\"\n server_time = now()\n session = user.session_data\n session.update(\n {\n \"id\": session_key,\n \"server_time\": server_time,\n \"app_context\": valid_app_key_on_request(request),\n }\n )\n\n visitor_cookie_expiry = datetime.utcnow() + timedelta(days=365)\n\n if isinstance(user, AnonymousUser):\n response = Response(session)\n if not request.COOKIES.get(\"visitor_id\"):\n visitor_id = str(uuid4().hex)\n response.set_cookie(\n \"visitor_id\", visitor_id, expires=visitor_cookie_expiry\n )\n else:\n response.set_cookie(\n \"visitor_id\",\n request.COOKIES.get(\"visitor_id\"),\n expires=visitor_cookie_expiry,\n )\n return response\n # Set last activity on session to the current time to prevent session timeout\n # Only do this for logged in users, as anonymous users cannot get logged out!\n request.session[\"last_session_request\"] = int(time.time())\n # Default to active, only assume not active when explicitly set.\n active = True if request.GET.get(\"active\", \"true\") == \"true\" else False\n\n # Can only record user session log data for FacilityUsers.\n if active and isinstance(user, FacilityUser):\n UserSessionLog.update_log(user)\n\n response = Response(session)\n return response\n", "path": "kolibri/core/auth/api.py" } ]
diff --git a/kolibri/core/auth/api.py b/kolibri/core/auth/api.py index 695786901bb..5c36a4f269d 100644 --- a/kolibri/core/auth/api.py +++ b/kolibri/core/auth/api.py @@ -366,7 +366,7 @@ def annotate_queryset(self, queryset): ) ) .order_by("-last_activity_timestamp") - .values("last_activity_timestamp") + .values("last_activity_timestamp")[:1] ) ) )
nautobot__nautobot-4946
Computed fields not return by GraphQL query ### Environment * Nautobot version (Docker tag too if applicable): 2.0.3 * Python version: 3.10.12 * Database platform, version: postgres, 14.9 * Middleware(s): <!-- Having issues accessing computed_fields in version 2.0.3. Computed fields are visible in the UI however, GraphQL query (from UI) throws the following message “CustomFieldModel.get_computed_field() got an unexpected keyword argument ‘slug’” Query: { devices{ name cpf_olt_name } } Have stop/started Nautobot and rebooted the system. This was working prior to upgrading from 1.5. Has anyone else seen this? Was told to submit a bug by Glenn M in the Nautobot Slack channel. --> ### Steps to Reproduce 1. Upgraded from Nautobot 1.5. to 2.0.3 2. Created new computed field {% if obj.type == "xgs-pon" %} olt-{{ obj.mac_address }} {% endif %} 4. Attempted GraphQL for new and pre-upgrade computed fields from the web interface and with pynautobot. <!-- What did you expect to happen? --> Computed field key:value is returned <!-- What happened instead? --> Received this error after a query from the web interface: “CustomFieldModel.get_computed_field() got an unexpected keyword argument ‘slug’”
[ { "content": "\"\"\"Library of generators for GraphQL.\"\"\"\n\nimport logging\n\nimport graphene\nimport graphene_django_optimizer as gql_optimizer\nfrom graphql import GraphQLError\n\nfrom nautobot.core.graphql.types import OptimizedNautobotObjectType\nfrom nautobot.core.graphql.utils import str_to_var_name, get_filtering_args_from_filterset\nfrom nautobot.core.utils.lookup import get_filterset_for_model\nfrom nautobot.extras.choices import RelationshipSideChoices\nfrom nautobot.extras.models import RelationshipAssociation\n\nlogger = logging.getLogger(__name__)\nRESOLVER_PREFIX = \"resolve_\"\n\n\ndef generate_restricted_queryset():\n \"\"\"\n Generate a function to return a restricted queryset compatible with the internal permissions system.\n\n Note that for built-in models such as ContentType the queryset has no `restrict` method, so we have to\n fail gracefully in that case.\n \"\"\"\n\n def get_queryset(queryset, info):\n if not hasattr(queryset, \"restrict\"):\n logger.debug(f\"Queryset {queryset} is not restrictable\")\n return queryset\n return queryset.restrict(info.context.user, \"view\")\n\n return get_queryset\n\n\ndef generate_null_choices_resolver(name, resolver_name):\n \"\"\"\n Generate function to resolve appropriate type when a field has `null=False` (default), `blank=True`, and\n `choices` defined.\n\n Args:\n name (str): name of the field to resolve\n resolver_name (str): name of the resolver as declare in DjangoObjectType\n \"\"\"\n\n def resolve_fields_w_choices(model, info, **kwargs):\n field_value = getattr(model, name)\n if field_value:\n return field_value\n return None\n\n resolve_fields_w_choices.__name__ = resolver_name\n return resolve_fields_w_choices\n\n\ndef generate_filter_resolver(schema_type, resolver_name, field_name):\n \"\"\"\n Generate function to resolve OneToMany filtering.\n\n Args:\n schema_type (DjangoObjectType): DjangoObjectType for a given model\n resolver_name (str): name of the resolver\n field_name (str): name of OneToMany field to filter\n \"\"\"\n filterset_class = schema_type._meta.filterset_class\n\n def resolve_filter(self, *args, **kwargs):\n if not filterset_class:\n return getattr(self, field_name).all()\n\n # Inverse of substitution logic from get_filtering_args_from_filterset() - transform \"_type\" back to \"type\"\n if \"_type\" in kwargs:\n kwargs[\"type\"] = kwargs.pop(\"_type\")\n\n resolved_obj = filterset_class(kwargs, getattr(self, field_name).all())\n\n # Check result filter for errors.\n if not resolved_obj.errors:\n return resolved_obj.qs.all()\n\n errors = {}\n\n # Build error message from results\n # Error messages are collected from each filter object\n for key in resolved_obj.errors:\n errors[key] = resolved_obj.errors[key]\n\n # Raising this exception will send the error message in the response of the GraphQL request\n raise GraphQLError(errors)\n\n resolve_filter.__name__ = resolver_name\n return resolve_filter\n\n\ndef generate_custom_field_resolver(key, resolver_name):\n \"\"\"Generate function to resolve each custom field within each DjangoObjectType.\n\n Args:\n key (str): unique key of the custom field to resolve\n resolver_name (str): name of the resolver as declare in DjangoObjectType\n \"\"\"\n\n def resolve_custom_field(self, info, **kwargs):\n return self.cf.get(key, None)\n\n resolve_custom_field.__name__ = resolver_name\n return resolve_custom_field\n\n\ndef generate_computed_field_resolver(name, resolver_name):\n \"\"\"Generate an instance method for resolving an individual computed field within a given DjangoObjectType.\n\n Args:\n name (str): name of the computed field to resolve\n resolver_name (str): name of the resolver as declare in DjangoObjectType\n \"\"\"\n\n def resolve_computed_field(self, info, **kwargs):\n return self.get_computed_field(slug=name)\n\n resolve_computed_field.__name__ = resolver_name\n return resolve_computed_field\n\n\ndef generate_relationship_resolver(name, resolver_name, relationship, side, peer_model):\n \"\"\"Generate function to resolve each custom relationship within each DjangoObjectType.\n\n Args:\n name (str): name of the custom field to resolve\n resolver_name (str): name of the resolver as declare in DjangoObjectType\n relationship (Relationship): Relationship object to generate a resolver for\n side (str): side of the relationship to use for the resolver\n peer_model (Model): Django Model of the peer of this relationship\n \"\"\"\n\n def resolve_relationship(self, info, **kwargs):\n \"\"\"Return a queryset or an object depending on the type of the relationship.\"\"\"\n peer_side = RelationshipSideChoices.OPPOSITE[side]\n query_params = {\"relationship\": relationship}\n # https://github.com/nautobot/nautobot/issues/1228\n # If querying for **only** the ID of the related object, for example:\n # { device(id:\"...\") { ... rel_my_relationship { id } } }\n # we will get this exception:\n # TypeError: Cannot call select_related() after .values() or .values_list()\n # This appears to be a bug in graphene_django_optimizer but I haven't found a known issue on GitHub.\n # For now we just work around it by catching the exception and retrying without optimization, below...\n if not relationship.symmetric:\n # Get the objects on the other side of this relationship\n query_params[f\"{side}_id\"] = self.pk\n\n try:\n queryset_ids = gql_optimizer.query(\n RelationshipAssociation.objects.filter(**query_params).values_list(f\"{peer_side}_id\", flat=True),\n info,\n )\n except TypeError:\n logger.debug(\"Caught TypeError in graphene_django_optimizer, falling back to un-optimized query\")\n queryset_ids = RelationshipAssociation.objects.filter(**query_params).values_list(\n f\"{peer_side}_id\", flat=True\n )\n else:\n # Get objects that are peers for this relationship, regardless of side\n try:\n queryset_ids = list(\n gql_optimizer.query(\n RelationshipAssociation.objects.filter(source_id=self.pk, **query_params).values_list(\n \"destination_id\", flat=True\n ),\n info,\n )\n )\n queryset_ids += list(\n gql_optimizer.query(\n RelationshipAssociation.objects.filter(destination_id=self.pk, **query_params).values_list(\n \"source_id\", flat=True\n ),\n info,\n )\n )\n except TypeError:\n logger.debug(\"Caught TypeError in graphene_django_optimizer, falling back to un-optimized query\")\n queryset_ids = list(\n RelationshipAssociation.objects.filter(source_id=self.pk, **query_params).values_list(\n \"destination_id\", flat=True\n ),\n )\n queryset_ids += list(\n RelationshipAssociation.objects.filter(destination_id=self.pk, **query_params).values_list(\n \"source_id\", flat=True\n ),\n )\n\n if relationship.has_many(peer_side):\n return gql_optimizer.query(peer_model.objects.filter(id__in=queryset_ids), info)\n\n # Also apparently a graphene_django_optimizer bug - in the same query case as described above, here we may see:\n # AttributeError: object has no attribute \"only\"\n try:\n return gql_optimizer.query(peer_model.objects.filter(id__in=queryset_ids).first(), info)\n except AttributeError:\n logger.debug(\"Caught AttributeError in graphene_django_optimizer, falling back to un-optimized query\")\n return peer_model.objects.filter(id__in=queryset_ids).first()\n\n resolve_relationship.__name__ = resolver_name\n return resolve_relationship\n\n\ndef generate_schema_type(app_name: str, model: object) -> OptimizedNautobotObjectType:\n \"\"\"\n Take a Django model and generate a Graphene Type class definition.\n\n Args:\n app_name (str): name of the application or plugin the Model is part of.\n model (object): Django Model\n\n Example:\n For a model with a name of \"Device\", the following class definition is generated:\n\n class DeviceType(OptimizedNautobotObjectType):\n Meta:\n model = Device\n fields = [\"__all__\"]\n\n If a FilterSet exists for this model at\n '<app_name>.filters.<ModelName>FilterSet' the filterset will be stored in\n filterset_class as follows:\n\n class DeviceType(OptimizedNautobotObjectType):\n Meta:\n model = Device\n fields = [\"__all__\"]\n filterset_class = DeviceFilterSet\n \"\"\"\n\n main_attrs = {}\n meta_attrs = {\"model\": model, \"fields\": \"__all__\"}\n\n # We'll attempt to find a FilterSet corresponding to the model\n # Not all models have a FilterSet defined so the function return none if it can't find a filterset\n meta_attrs[\"filterset_class\"] = get_filterset_for_model(model)\n\n main_attrs[\"Meta\"] = type(\"Meta\", (object,), meta_attrs)\n\n schema_type = type(f\"{model.__name__}Type\", (OptimizedNautobotObjectType,), main_attrs)\n return schema_type\n\n\ndef generate_list_search_parameters(schema_type):\n \"\"\"Generate list of query parameters for the list resolver based on a filterset.\"\"\"\n\n search_params = {\n \"limit\": graphene.Int(),\n \"offset\": graphene.Int(),\n }\n if schema_type._meta.filterset_class is not None:\n search_params.update(\n get_filtering_args_from_filterset(\n schema_type._meta.filterset_class,\n )\n )\n\n return search_params\n\n\ndef generate_single_item_resolver(schema_type, resolver_name):\n \"\"\"Generate a resolver for a single element of schema_type\n\n Args:\n schema_type (DjangoObjectType): DjangoObjectType for a given model\n resolver_name (str): name of the resolver\n\n Returns:\n (func): Resolver function for a single element\n \"\"\"\n model = schema_type._meta.model\n\n def single_resolver(self, info, **kwargs):\n obj_id = kwargs.get(\"id\", None)\n if obj_id:\n return gql_optimizer.query(\n model.objects.restrict(info.context.user, \"view\").filter(pk=obj_id), info\n ).first()\n return None\n\n single_resolver.__name__ = resolver_name\n return single_resolver\n\n\ndef generate_list_resolver(schema_type, resolver_name):\n \"\"\"\n Generate resolver for a list of schema_type.\n\n If a filterset_class is associated with the schema_type,\n the resolver will pass all arguments received to the FilterSet\n If not, it will return a restricted queryset for all objects\n\n Args:\n schema_type (DjangoObjectType): DjangoObjectType for a given model\n resolver_name (str): name of the resolver\n\n Returns:\n (func): Resolver function for list of element\n \"\"\"\n model = schema_type._meta.model\n\n def list_resolver(self, info, limit=None, offset=None, **kwargs):\n filterset_class = schema_type._meta.filterset_class\n if filterset_class is not None:\n resolved_obj = filterset_class(kwargs, model.objects.restrict(info.context.user, \"view\").all())\n\n # Check result filter for errors.\n if resolved_obj.errors:\n errors = {}\n\n # Build error message from results\n # Error messages are collected from each filter object\n for key in resolved_obj.errors:\n errors[key] = resolved_obj.errors[key]\n\n # Raising this exception will send the error message in the response of the GraphQL request\n raise GraphQLError(errors)\n qs = resolved_obj.qs.all()\n\n else:\n qs = model.objects.restrict(info.context.user, \"view\").all()\n\n if offset:\n qs = qs[offset:]\n\n if limit:\n qs = qs[:limit]\n\n return gql_optimizer.query(qs, info)\n\n list_resolver.__name__ = resolver_name\n return list_resolver\n\n\ndef generate_attrs_for_schema_type(schema_type):\n \"\"\"Generate both attributes and resolvers for a given schema_type.\n\n Args:\n schema_type (DjangoObjectType): DjangoObjectType for a given model\n\n Returns:\n (dict): Dict of attributes ready to merge into the QueryMixin class\n \"\"\"\n attrs = {}\n model = schema_type._meta.model\n\n single_item_name = str_to_var_name(model._meta.verbose_name)\n list_name = str_to_var_name(model._meta.verbose_name_plural)\n\n # Define Attributes for single item and list with their search parameters\n search_params = generate_list_search_parameters(schema_type)\n attrs[single_item_name] = graphene.Field(schema_type, id=graphene.ID())\n attrs[list_name] = graphene.List(schema_type, **search_params)\n\n # Define Resolvers for both single item and list\n single_item_resolver_name = f\"{RESOLVER_PREFIX}{single_item_name}\"\n list_resolver_name = f\"{RESOLVER_PREFIX}{list_name}\"\n attrs[single_item_resolver_name] = generate_single_item_resolver(schema_type, single_item_resolver_name)\n attrs[list_resolver_name] = generate_list_resolver(schema_type, list_resolver_name)\n\n return attrs\n", "path": "nautobot/core/graphql/generators.py" } ]
[ { "content": "\"\"\"Library of generators for GraphQL.\"\"\"\n\nimport logging\n\nimport graphene\nimport graphene_django_optimizer as gql_optimizer\nfrom graphql import GraphQLError\n\nfrom nautobot.core.graphql.types import OptimizedNautobotObjectType\nfrom nautobot.core.graphql.utils import str_to_var_name, get_filtering_args_from_filterset\nfrom nautobot.core.utils.lookup import get_filterset_for_model\nfrom nautobot.extras.choices import RelationshipSideChoices\nfrom nautobot.extras.models import RelationshipAssociation\n\nlogger = logging.getLogger(__name__)\nRESOLVER_PREFIX = \"resolve_\"\n\n\ndef generate_restricted_queryset():\n \"\"\"\n Generate a function to return a restricted queryset compatible with the internal permissions system.\n\n Note that for built-in models such as ContentType the queryset has no `restrict` method, so we have to\n fail gracefully in that case.\n \"\"\"\n\n def get_queryset(queryset, info):\n if not hasattr(queryset, \"restrict\"):\n logger.debug(f\"Queryset {queryset} is not restrictable\")\n return queryset\n return queryset.restrict(info.context.user, \"view\")\n\n return get_queryset\n\n\ndef generate_null_choices_resolver(name, resolver_name):\n \"\"\"\n Generate function to resolve appropriate type when a field has `null=False` (default), `blank=True`, and\n `choices` defined.\n\n Args:\n name (str): name of the field to resolve\n resolver_name (str): name of the resolver as declare in DjangoObjectType\n \"\"\"\n\n def resolve_fields_w_choices(model, info, **kwargs):\n field_value = getattr(model, name)\n if field_value:\n return field_value\n return None\n\n resolve_fields_w_choices.__name__ = resolver_name\n return resolve_fields_w_choices\n\n\ndef generate_filter_resolver(schema_type, resolver_name, field_name):\n \"\"\"\n Generate function to resolve OneToMany filtering.\n\n Args:\n schema_type (DjangoObjectType): DjangoObjectType for a given model\n resolver_name (str): name of the resolver\n field_name (str): name of OneToMany field to filter\n \"\"\"\n filterset_class = schema_type._meta.filterset_class\n\n def resolve_filter(self, *args, **kwargs):\n if not filterset_class:\n return getattr(self, field_name).all()\n\n # Inverse of substitution logic from get_filtering_args_from_filterset() - transform \"_type\" back to \"type\"\n if \"_type\" in kwargs:\n kwargs[\"type\"] = kwargs.pop(\"_type\")\n\n resolved_obj = filterset_class(kwargs, getattr(self, field_name).all())\n\n # Check result filter for errors.\n if not resolved_obj.errors:\n return resolved_obj.qs.all()\n\n errors = {}\n\n # Build error message from results\n # Error messages are collected from each filter object\n for key in resolved_obj.errors:\n errors[key] = resolved_obj.errors[key]\n\n # Raising this exception will send the error message in the response of the GraphQL request\n raise GraphQLError(errors)\n\n resolve_filter.__name__ = resolver_name\n return resolve_filter\n\n\ndef generate_custom_field_resolver(key, resolver_name):\n \"\"\"Generate function to resolve each custom field within each DjangoObjectType.\n\n Args:\n key (str): unique key of the custom field to resolve\n resolver_name (str): name of the resolver as declare in DjangoObjectType\n \"\"\"\n\n def resolve_custom_field(self, info, **kwargs):\n return self.cf.get(key, None)\n\n resolve_custom_field.__name__ = resolver_name\n return resolve_custom_field\n\n\ndef generate_computed_field_resolver(name, resolver_name):\n \"\"\"Generate an instance method for resolving an individual computed field within a given DjangoObjectType.\n\n Args:\n name (str): name of the computed field to resolve\n resolver_name (str): name of the resolver as declare in DjangoObjectType\n \"\"\"\n\n def resolve_computed_field(self, info, **kwargs):\n return self.get_computed_field(key=name)\n\n resolve_computed_field.__name__ = resolver_name\n return resolve_computed_field\n\n\ndef generate_relationship_resolver(name, resolver_name, relationship, side, peer_model):\n \"\"\"Generate function to resolve each custom relationship within each DjangoObjectType.\n\n Args:\n name (str): name of the custom field to resolve\n resolver_name (str): name of the resolver as declare in DjangoObjectType\n relationship (Relationship): Relationship object to generate a resolver for\n side (str): side of the relationship to use for the resolver\n peer_model (Model): Django Model of the peer of this relationship\n \"\"\"\n\n def resolve_relationship(self, info, **kwargs):\n \"\"\"Return a queryset or an object depending on the type of the relationship.\"\"\"\n peer_side = RelationshipSideChoices.OPPOSITE[side]\n query_params = {\"relationship\": relationship}\n # https://github.com/nautobot/nautobot/issues/1228\n # If querying for **only** the ID of the related object, for example:\n # { device(id:\"...\") { ... rel_my_relationship { id } } }\n # we will get this exception:\n # TypeError: Cannot call select_related() after .values() or .values_list()\n # This appears to be a bug in graphene_django_optimizer but I haven't found a known issue on GitHub.\n # For now we just work around it by catching the exception and retrying without optimization, below...\n if not relationship.symmetric:\n # Get the objects on the other side of this relationship\n query_params[f\"{side}_id\"] = self.pk\n\n try:\n queryset_ids = gql_optimizer.query(\n RelationshipAssociation.objects.filter(**query_params).values_list(f\"{peer_side}_id\", flat=True),\n info,\n )\n except TypeError:\n logger.debug(\"Caught TypeError in graphene_django_optimizer, falling back to un-optimized query\")\n queryset_ids = RelationshipAssociation.objects.filter(**query_params).values_list(\n f\"{peer_side}_id\", flat=True\n )\n else:\n # Get objects that are peers for this relationship, regardless of side\n try:\n queryset_ids = list(\n gql_optimizer.query(\n RelationshipAssociation.objects.filter(source_id=self.pk, **query_params).values_list(\n \"destination_id\", flat=True\n ),\n info,\n )\n )\n queryset_ids += list(\n gql_optimizer.query(\n RelationshipAssociation.objects.filter(destination_id=self.pk, **query_params).values_list(\n \"source_id\", flat=True\n ),\n info,\n )\n )\n except TypeError:\n logger.debug(\"Caught TypeError in graphene_django_optimizer, falling back to un-optimized query\")\n queryset_ids = list(\n RelationshipAssociation.objects.filter(source_id=self.pk, **query_params).values_list(\n \"destination_id\", flat=True\n ),\n )\n queryset_ids += list(\n RelationshipAssociation.objects.filter(destination_id=self.pk, **query_params).values_list(\n \"source_id\", flat=True\n ),\n )\n\n if relationship.has_many(peer_side):\n return gql_optimizer.query(peer_model.objects.filter(id__in=queryset_ids), info)\n\n # Also apparently a graphene_django_optimizer bug - in the same query case as described above, here we may see:\n # AttributeError: object has no attribute \"only\"\n try:\n return gql_optimizer.query(peer_model.objects.filter(id__in=queryset_ids).first(), info)\n except AttributeError:\n logger.debug(\"Caught AttributeError in graphene_django_optimizer, falling back to un-optimized query\")\n return peer_model.objects.filter(id__in=queryset_ids).first()\n\n resolve_relationship.__name__ = resolver_name\n return resolve_relationship\n\n\ndef generate_schema_type(app_name: str, model: object) -> OptimizedNautobotObjectType:\n \"\"\"\n Take a Django model and generate a Graphene Type class definition.\n\n Args:\n app_name (str): name of the application or plugin the Model is part of.\n model (object): Django Model\n\n Example:\n For a model with a name of \"Device\", the following class definition is generated:\n\n class DeviceType(OptimizedNautobotObjectType):\n Meta:\n model = Device\n fields = [\"__all__\"]\n\n If a FilterSet exists for this model at\n '<app_name>.filters.<ModelName>FilterSet' the filterset will be stored in\n filterset_class as follows:\n\n class DeviceType(OptimizedNautobotObjectType):\n Meta:\n model = Device\n fields = [\"__all__\"]\n filterset_class = DeviceFilterSet\n \"\"\"\n\n main_attrs = {}\n meta_attrs = {\"model\": model, \"fields\": \"__all__\"}\n\n # We'll attempt to find a FilterSet corresponding to the model\n # Not all models have a FilterSet defined so the function return none if it can't find a filterset\n meta_attrs[\"filterset_class\"] = get_filterset_for_model(model)\n\n main_attrs[\"Meta\"] = type(\"Meta\", (object,), meta_attrs)\n\n schema_type = type(f\"{model.__name__}Type\", (OptimizedNautobotObjectType,), main_attrs)\n return schema_type\n\n\ndef generate_list_search_parameters(schema_type):\n \"\"\"Generate list of query parameters for the list resolver based on a filterset.\"\"\"\n\n search_params = {\n \"limit\": graphene.Int(),\n \"offset\": graphene.Int(),\n }\n if schema_type._meta.filterset_class is not None:\n search_params.update(\n get_filtering_args_from_filterset(\n schema_type._meta.filterset_class,\n )\n )\n\n return search_params\n\n\ndef generate_single_item_resolver(schema_type, resolver_name):\n \"\"\"Generate a resolver for a single element of schema_type\n\n Args:\n schema_type (DjangoObjectType): DjangoObjectType for a given model\n resolver_name (str): name of the resolver\n\n Returns:\n (func): Resolver function for a single element\n \"\"\"\n model = schema_type._meta.model\n\n def single_resolver(self, info, **kwargs):\n obj_id = kwargs.get(\"id\", None)\n if obj_id:\n return gql_optimizer.query(\n model.objects.restrict(info.context.user, \"view\").filter(pk=obj_id), info\n ).first()\n return None\n\n single_resolver.__name__ = resolver_name\n return single_resolver\n\n\ndef generate_list_resolver(schema_type, resolver_name):\n \"\"\"\n Generate resolver for a list of schema_type.\n\n If a filterset_class is associated with the schema_type,\n the resolver will pass all arguments received to the FilterSet\n If not, it will return a restricted queryset for all objects\n\n Args:\n schema_type (DjangoObjectType): DjangoObjectType for a given model\n resolver_name (str): name of the resolver\n\n Returns:\n (func): Resolver function for list of element\n \"\"\"\n model = schema_type._meta.model\n\n def list_resolver(self, info, limit=None, offset=None, **kwargs):\n filterset_class = schema_type._meta.filterset_class\n if filterset_class is not None:\n resolved_obj = filterset_class(kwargs, model.objects.restrict(info.context.user, \"view\").all())\n\n # Check result filter for errors.\n if resolved_obj.errors:\n errors = {}\n\n # Build error message from results\n # Error messages are collected from each filter object\n for key in resolved_obj.errors:\n errors[key] = resolved_obj.errors[key]\n\n # Raising this exception will send the error message in the response of the GraphQL request\n raise GraphQLError(errors)\n qs = resolved_obj.qs.all()\n\n else:\n qs = model.objects.restrict(info.context.user, \"view\").all()\n\n if offset:\n qs = qs[offset:]\n\n if limit:\n qs = qs[:limit]\n\n return gql_optimizer.query(qs, info)\n\n list_resolver.__name__ = resolver_name\n return list_resolver\n\n\ndef generate_attrs_for_schema_type(schema_type):\n \"\"\"Generate both attributes and resolvers for a given schema_type.\n\n Args:\n schema_type (DjangoObjectType): DjangoObjectType for a given model\n\n Returns:\n (dict): Dict of attributes ready to merge into the QueryMixin class\n \"\"\"\n attrs = {}\n model = schema_type._meta.model\n\n single_item_name = str_to_var_name(model._meta.verbose_name)\n list_name = str_to_var_name(model._meta.verbose_name_plural)\n\n # Define Attributes for single item and list with their search parameters\n search_params = generate_list_search_parameters(schema_type)\n attrs[single_item_name] = graphene.Field(schema_type, id=graphene.ID())\n attrs[list_name] = graphene.List(schema_type, **search_params)\n\n # Define Resolvers for both single item and list\n single_item_resolver_name = f\"{RESOLVER_PREFIX}{single_item_name}\"\n list_resolver_name = f\"{RESOLVER_PREFIX}{list_name}\"\n attrs[single_item_resolver_name] = generate_single_item_resolver(schema_type, single_item_resolver_name)\n attrs[list_resolver_name] = generate_list_resolver(schema_type, list_resolver_name)\n\n return attrs\n", "path": "nautobot/core/graphql/generators.py" } ]
diff --git a/changes/4851.fixed b/changes/4851.fixed new file mode 100644 index 00000000000..c45c803acd0 --- /dev/null +++ b/changes/4851.fixed @@ -0,0 +1 @@ +Fixed an exception when trying to access computed fields via GraphQL. diff --git a/nautobot/core/graphql/generators.py b/nautobot/core/graphql/generators.py index bf9251e87e2..24d9cdf2972 100644 --- a/nautobot/core/graphql/generators.py +++ b/nautobot/core/graphql/generators.py @@ -116,7 +116,7 @@ def generate_computed_field_resolver(name, resolver_name): """ def resolve_computed_field(self, info, **kwargs): - return self.get_computed_field(slug=name) + return self.get_computed_field(key=name) resolve_computed_field.__name__ = resolver_name return resolve_computed_field
liberapay__liberapay.com-2027
Limit the input field for team names The first step when creating a team at https://liberapay.com/about/teams is selecting a name. I tried to enter a name and it was rejected because it was too long. The input field name currently accepts 30 characters. I suggest to reduce it to the maximum allowed characters for team names.
[ { "content": "from decimal import Decimal\nfrom ipaddress import ip_network\nimport json\nimport logging\nfrom operator import itemgetter\nimport os\nimport re\nimport socket\nfrom tempfile import mkstemp\nfrom time import time\nimport traceback\n\nimport babel.localedata\nfrom babel.messages.pofile import read_po\nfrom babel.numbers import parse_pattern\nimport boto3\nfrom mailshake import AmazonSESMailer, ToConsoleMailer, SMTPMailer\nimport pando\nfrom postgres.cursors import SimpleRowCursor\nimport psycopg2\nfrom psycopg2.extensions import adapt, AsIs, new_type, register_adapter, register_type\nfrom psycopg2_pool import PoolError\nimport requests\nimport sass\nimport sentry_sdk\nfrom state_chain import StateChain\n\nfrom liberapay import elsewhere\nimport liberapay.billing.payday\nfrom liberapay.exceptions import NeedDatabase\nfrom liberapay.i18n.base import (\n ALIASES, ALIASES_R, COUNTRIES, LANGUAGES_2, LOCALES, Locale, make_sorted_dict\n)\nfrom liberapay.i18n.currencies import Money, MoneyBasket, get_currency_exchange_rates\nfrom liberapay.i18n.plural_rules import get_function_from_rule\nfrom liberapay.models import DB\nfrom liberapay.models.account_elsewhere import _AccountElsewhere, AccountElsewhere\nfrom liberapay.models.community import _Community, Community\nfrom liberapay.models.encrypted import Encrypted\nfrom liberapay.models.exchange_route import ExchangeRoute\nfrom liberapay.models.participant import Participant\nfrom liberapay.models.payin import Payin\nfrom liberapay.models.repository import Repository\nfrom liberapay.models.tip import Tip\nfrom liberapay.security.crypto import Cryptograph\nfrom liberapay.utils import find_files, markdown, mkdir_p, resolve, urlquote\nfrom liberapay.utils.emails import compile_email_spt\nfrom liberapay.utils.http_caching import asset_etag\nfrom liberapay.utils.query_cache import QueryCache\nfrom liberapay.utils.types import Object\nfrom liberapay.version import get_version\nfrom liberapay.website import CustomUndefined\n\n\ndef canonical(env):\n canonical_scheme = env.canonical_scheme\n canonical_host = env.canonical_host\n cookie_domain = None\n if canonical_host:\n canonical_url = '%s://%s' % (canonical_scheme, canonical_host)\n if ':' not in canonical_host:\n cookie_domain = '.' + canonical_host\n else:\n canonical_url = ''\n asset_url = canonical_url+'/assets/'\n return locals()\n\n\nclass CSP(bytes):\n\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/default-src\n based_on_default_src = set(b'''\n child-src connect-src font-src frame-src img-src manifest-src\n media-src object-src script-src style-src worker-src\n '''.split())\n\n def __new__(cls, x):\n if isinstance(x, dict):\n self = bytes.__new__(cls, b';'.join(b' '.join(t).rstrip() for t in x.items()) + b';')\n self.directives = dict(x)\n else:\n self = bytes.__new__(cls, x)\n self.directives = dict(\n (d.split(b' ', 1) + [b''])[:2] for d in self.split(b';') if d\n )\n return self\n\n def allow(self, directive, value):\n d = dict(self.directives)\n old_value = d.get(directive)\n if old_value is None and directive in self.based_on_default_src:\n old_value = d.get(b'default-src')\n d[directive] = b'%s %s' % (old_value, value) if old_value else value\n return CSP(d)\n\n\ndef csp(canonical_host, canonical_scheme, env):\n csp = (\n b\"default-src 'self' %(main_domain)s;\"\n b\"connect-src 'self' *.liberapay.org;\"\n b\"form-action 'self';\"\n b\"img-src * blob: data:;\"\n b\"object-src 'none';\"\n ) % {b'main_domain': canonical_host.encode('ascii')}\n csp += env.csp_extra.encode()\n if canonical_scheme == 'https':\n csp += b\"upgrade-insecure-requests;\"\n return {'csp': CSP(csp)}\n\n\ndef crypto():\n return {'cryptograph': Cryptograph()}\n\n\nclass NoDB:\n\n def __getattr__(self, attr):\n raise NeedDatabase()\n\n __bool__ = lambda self: False\n\n back_as_registry = {}\n\n def register_model(self, model):\n model.db = self\n\n\ndef database(env, tell_sentry):\n dburl = env.database_url\n maxconn = env.database_maxconn\n try:\n db = DB(dburl, maxconn=maxconn, cursor_factory=SimpleRowCursor)\n except psycopg2.OperationalError as e:\n tell_sentry(e, {}, allow_reraise=False)\n db = NoDB()\n\n itemgetter0 = itemgetter(0)\n\n def back_as_Object(cols, vals):\n return Object(zip(map(itemgetter0, cols), vals))\n\n db.back_as_registry[Object] = db.back_as_registry['Object'] = back_as_Object\n\n models = (\n _AccountElsewhere, AccountElsewhere, _Community, Community,\n Encrypted, ExchangeRoute, Participant, Payin, Repository, Tip,\n )\n for model in models:\n db.register_model(model)\n setattr(db, model.__name__, model)\n liberapay.billing.payday.Payday.db = db\n\n def adapt_set(s):\n return adapt(tuple(s))\n register_adapter(set, adapt_set)\n\n def adapt_money(m):\n return AsIs('(%s,%s)::currency_amount' % (adapt(m.amount), adapt(m.currency)))\n register_adapter(Money, adapt_money)\n\n def cast_currency_amount(v, cursor):\n return None if v in (None, '(,)') else Money(*v[1:-1].split(','))\n try:\n oid = db.one(\"SELECT 'currency_amount'::regtype::oid\")\n register_type(new_type((oid,), 'currency_amount', cast_currency_amount))\n except (psycopg2.ProgrammingError, NeedDatabase):\n pass\n\n def adapt_money_basket(b):\n return AsIs(\n \"_wrap_amounts('%s'::jsonb)\" %\n json.dumps({k: str(v) for k, v in b.amounts.items() if v}).replace(\"'\", \"''\")\n )\n register_adapter(MoneyBasket, adapt_money_basket)\n\n def cast_currency_basket(v, cursor):\n if v is None:\n return None\n parts = v[1:-1].split(',', 2)\n if len(parts) == 2:\n eur, usd = parts\n obj = None\n else:\n eur, usd, obj = parts\n if obj:\n amounts = json.loads(obj[1:-1].replace('\"\"', '\"') if obj[0] == '\"' else obj)\n amounts = {k: Decimal(str(v)) for k, v in amounts.items()}\n else:\n amounts = {}\n if eur:\n amounts['EUR'] = Decimal(eur)\n if usd:\n amounts['USD'] = Decimal(usd)\n return MoneyBasket(**amounts)\n try:\n oid = db.one(\"SELECT 'currency_basket'::regtype::oid\")\n register_type(new_type((oid,), 'currency_basket', cast_currency_basket))\n except (psycopg2.ProgrammingError, NeedDatabase):\n pass\n\n use_qc = not env.override_query_cache\n qc1 = QueryCache(db, threshold=(1 if use_qc else 0))\n qc5 = QueryCache(db, threshold=(5 if use_qc else 0))\n\n return {'db': db, 'db_qc1': qc1, 'db_qc5': qc5}\n\n\nclass AppConf:\n\n fields = dict(\n app_name=str,\n bitbucket_callback=str,\n bitbucket_id=str,\n bitbucket_secret=str,\n bot_github_token=str,\n bot_github_username=str,\n check_avatar_urls=bool,\n check_email_domains=bool,\n check_email_servers=bool,\n cron_intervals=dict,\n facebook_callback=str,\n facebook_id=str,\n facebook_secret=str,\n github_callback=str,\n github_id=str,\n github_secret=str,\n gitlab_callback=str,\n gitlab_id=str,\n gitlab_secret=str,\n google_callback=str,\n google_id=str,\n google_secret=str,\n linuxfr_callback=str,\n linuxfr_id=str,\n linuxfr_secret=str,\n log_emails=bool,\n mangopay_base_url=str,\n mangopay_client_id=str,\n mangopay_client_password=str,\n openstreetmap_api_url=str,\n openstreetmap_auth_url=str,\n openstreetmap_callback=str,\n openstreetmap_id=str,\n openstreetmap_secret=str,\n password_rounds=int,\n payday_label=str,\n payday_repo=str,\n payin_methods=dict,\n paypal_domain=str,\n paypal_id=str,\n paypal_secret=str,\n s3_endpoint=str,\n s3_public_access_key=str,\n s3_secret_key=str,\n s3_region=str,\n s3_payday_logs_bucket=str,\n ses_feedback_queue_url=str,\n ses_region=str,\n sepa_creditor_identifier=str,\n show_sandbox_warning=bool,\n socket_timeout=float,\n smtp_host=str,\n smtp_port=int,\n smtp_username=str,\n smtp_password=str,\n smtp_use_tls=bool,\n stripe_callback_secret=str,\n stripe_connect_id=str,\n stripe_publishable_key=str,\n stripe_secret_key=str,\n trusted_proxies=list,\n twitch_id=str,\n twitch_secret=str,\n twitter_callback=str,\n twitter_id=str,\n twitter_secret=str,\n )\n\n def __init__(self, d):\n d = d if isinstance(d, dict) else dict(d)\n\n unexpected = set(d) - set(self.fields)\n if unexpected:\n print(\"Found %i unexpected variables in the app_conf table: %s\" %\n (len(unexpected), ' '.join(unexpected)))\n\n missing, mistyped = [], []\n for k, t in self.fields.items():\n if k in d:\n v = d[k]\n if isinstance(v, t):\n self.__dict__[k] = v\n else:\n mistyped.append((k, v, t))\n else:\n missing.append(k)\n if missing:\n print('Missing configuration variables: ', ' '.join(missing))\n for k, v, t in mistyped:\n print('Invalid configuration variable, %s: %s is of type %s, not %s' %\n (k, json.dumps(v), type(v), t))\n\n self.missing = missing\n self.mistyped = mistyped\n self.unexpected = unexpected\n\n\ndef app_conf(db):\n if not db:\n return {'app_conf': None}\n app_conf = AppConf(db.all(\"SELECT key, value FROM app_conf\"))\n if app_conf:\n socket.setdefaulttimeout(app_conf.socket_timeout)\n return {'app_conf': app_conf}\n\n\ndef trusted_proxies(app_conf, env, tell_sentry):\n if not app_conf:\n return {'trusted_proxies': []}\n def parse_network(net):\n if net == 'private':\n return [net]\n elif net.startswith('https://'):\n d = env.log_dir + '/trusted_proxies/'\n mkdir_p(d)\n filename = d + urlquote(net, '')\n skip_download = (\n os.path.exists(filename) and\n os.stat(filename).st_size > 0 and\n os.stat(filename).st_mtime > time() - 60*60*24*7\n )\n if not skip_download:\n tmpfd, tmp_path = mkstemp(dir=d)\n with open(tmpfd, 'w') as f:\n f.write(requests.get(net).text)\n os.rename(tmp_path, filename)\n with open(filename, 'rb') as f:\n return [ip_network(x) for x in f.read().decode('ascii').strip().split()]\n else:\n return [ip_network(net)]\n try:\n return {'trusted_proxies': [\n sum((parse_network(net) for net in networks), [])\n for networks in (app_conf.trusted_proxies or ())\n ]}\n except Exception as e:\n tell_sentry(e, {})\n return {'trusted_proxies': []}\n\n\ndef mail(app_conf, env, project_root='.'):\n if not app_conf:\n return\n smtp_conf = {\n k[5:]: v for k, v in app_conf.__dict__.items() if k.startswith('smtp_')\n }\n if smtp_conf:\n smtp_conf.setdefault('timeout', app_conf.socket_timeout)\n if getattr(app_conf, 'ses_region', None):\n mailer = AmazonSESMailer(\n env.aws_access_key_id, env.aws_secret_access_key,\n region_name=app_conf.ses_region\n )\n elif smtp_conf:\n mailer = SMTPMailer(**smtp_conf)\n else:\n mailer = ToConsoleMailer()\n emails = {}\n emails_dir = project_root+'/emails/'\n i = len(emails_dir)\n for spt in find_files(emails_dir, '*.spt'):\n base_name = spt[i:-4]\n emails[base_name] = compile_email_spt(spt)\n\n def log_email(message):\n message = dict(message)\n html, text = message.pop('html'), message.pop('text')\n print('\\n', ' ', '='*26, 'BEGIN EMAIL', '='*26)\n print(json.dumps(message))\n print('[---] text/html')\n print(html)\n print('[---] text/plain')\n print(text)\n print(' ', '='*27, 'END EMAIL', '='*27)\n\n if app_conf.log_emails and not isinstance(mailer, ToConsoleMailer):\n log_email = log_email\n else:\n log_email = lambda *a, **kw: None\n\n return {'emails': emails, 'log_email': log_email, 'mailer': mailer}\n\n\ndef billing(app_conf):\n if not app_conf:\n return\n import mangopay\n sandbox = 'sandbox' in app_conf.mangopay_base_url\n mangopay.sandbox = sandbox\n handler = mangopay.APIRequest(\n client_id=app_conf.mangopay_client_id,\n passphrase=app_conf.mangopay_client_password,\n sandbox=sandbox,\n timeout=app_conf.socket_timeout,\n )\n mangopay.get_default_handler = mangopay.base.get_default_handler = \\\n mangopay.query.get_default_handler = lambda: handler\n\n # https://github.com/Mangopay/mangopay2-python-sdk/issues/95\n if not sandbox:\n mangopay.api.logger.setLevel(logging.CRITICAL)\n\n # https://github.com/Mangopay/mangopay2-python-sdk/issues/118\n mangopay.resources.LegalUser.person_type = 'LEGAL'\n\n # https://github.com/Mangopay/mangopay2-python-sdk/issues/144\n import liberapay.billing.watcher\n mangopay.signals.request_finished.connect(liberapay.billing.watcher.on_response)\n\n # https://github.com/Mangopay/mangopay2-python-sdk/issues/157\n cls = mangopay.resources.DirectPayIn\n field = mangopay.fields.Field(api_name='Billing')\n field.add_to_class(cls, 'billing')\n cls._meta.api_names[field.api_name] = field.name\n\n\ndef stripe(app_conf):\n if not app_conf:\n return\n import stripe\n stripe.api_key = app_conf.stripe_secret_key\n stripe.api_version = '2019-08-14'\n stripe.client_id = app_conf.stripe_connect_id\n stripe.max_network_retries = 2\n\n\ndef username_restrictions(www_root):\n return {'restricted_usernames': os.listdir(www_root)}\n\n\ndef version(env):\n try:\n version = get_version()\n except Exception:\n if env.instance_type == 'production':\n raise\n version = None\n return {'version': version}\n\n\ndef make_sentry_teller(env, version):\n if env.sentry_dsn:\n sentry_sdk.init(\n env.sentry_dsn,\n environment=env.instance_type,\n release=version,\n debug=False, # Pass `True` when investigating an integration issue\n )\n sentry = True\n else:\n sentry = False\n print(\"Won't log to Sentry (SENTRY_DSN is empty).\")\n\n def tell_sentry(exception, state, allow_reraise=True, level=None):\n r = {'sentry_ident': None}\n\n if isinstance(exception, pando.Response):\n if exception.code < 500:\n # Only log server errors\n return r\n if not level and exception.code in (502, 504):\n # This kind of error is usually transient and not our fault.\n level = 'warning'\n\n if isinstance(exception, NeedDatabase):\n # Don't flood Sentry when DB is down\n return r\n\n if isinstance(exception, PoolError):\n # If this happens, then the `DATABASE_MAXCONN` value is too low.\n state['exception'] = NeedDatabase()\n\n if isinstance(exception, psycopg2.Error):\n from liberapay.website import website\n if getattr(website, 'db', None):\n try:\n website.db.one('SELECT 1 AS x')\n except psycopg2.Error as e:\n # If it can't answer this simple query, then it's either\n # down or unreachable. Show the proper 503 error page.\n website.db.okay = False\n state['exception'] = NeedDatabase()\n if sentry:\n # Record the exception raised above instead of the\n # original one, to avoid duplicate issues.\n return tell_sentry(e, state, allow_reraise=True)\n\n if 'read-only' in str(exception):\n # DB is in read only mode\n state['db_is_readonly'] = True\n # Show the proper 503 error page\n state['exception'] = NeedDatabase()\n # Don't reraise this in tests\n allow_reraise = False\n\n if isinstance(exception, ValueError):\n if 'cannot contain NUL (0x00) characters' in str(exception):\n # https://github.com/liberapay/liberapay.com/issues/675\n response = state.get('response') or pando.Response()\n response.code = 400\n response.body = str(exception)\n r['exception'] = None\n r['response'] = response\n return r\n\n if not sentry:\n # No Sentry, log to stderr instead\n traceback.print_exc()\n # Reraise if allowed\n if env.sentry_reraise and allow_reraise:\n raise\n return r\n\n # Prepare context data\n if not level:\n level = 'warning' if isinstance(exception, Warning) else 'error'\n scope_dict = {'level': level}\n if state:\n try:\n # https://docs.sentry.io/platforms/python/enriching-events/identify-user/\n user_data = scope_dict['user'] = {}\n user = state.get('user')\n if isinstance(user, Participant):\n user_data['id'] = getattr(user, 'id', None)\n user_data['username'] = getattr(user, 'username', None)\n # https://develop.sentry.dev/sdk/event-payloads/request/\n request = state.get('request')\n if request is not None:\n user_data['ip_address'] = str(request.source)\n decode = lambda b: b.decode('ascii', 'backslashreplace')\n scope_dict['contexts'] = {}\n scope_dict['contexts']['request'] = {\n 'method': request.method,\n 'url': request.line.uri.decoded,\n 'headers': {\n decode(k): decode(b', '.join(v))\n for k, v in request.headers.items()\n if k != b'Cookie'\n },\n }\n # https://docs.sentry.io/platforms/python/enriching-events/tags/\n scope_dict['tags'] = {\n 'lang': getattr(state.get('locale'), 'language', None),\n }\n except Exception as e:\n tell_sentry(e, {})\n\n # Tell Sentry\n r['sentry_ident'] = sentry_sdk.capture_exception(exception, **scope_dict)\n return r\n\n CustomUndefined._tell_sentry = staticmethod(tell_sentry)\n\n return {'tell_sentry': tell_sentry}\n\n\nclass PlatformRegistry:\n \"\"\"Registry of platforms we support.\n \"\"\"\n\n def __init__(self, platforms):\n self.list = platforms\n self.dict = dict((p.name, p) for p in platforms)\n self.__dict__.update(self.dict)\n self.order = dict((p.name, i) for i, p in enumerate(platforms))\n self._hasattr_cache = {}\n\n def __contains__(self, platform):\n return platform.name in self.dict\n\n def __iter__(self):\n return iter(self.list)\n\n def __len__(self):\n return len(self.list)\n\n def _cache_hasattr(self, attr):\n r = PlatformRegistry([p for p in self if getattr(p, attr, None)])\n self._hasattr_cache[attr] = r\n return r\n\n def get(self, k, default=None):\n return self.dict.get(k, default)\n\n def hasattr(self, attr):\n r = self._hasattr_cache.get(attr)\n return r or self._cache_hasattr(attr)\n\n def index(self, name):\n return self.order[name]\n\n\ndef accounts_elsewhere(app_conf, asset, canonical_url, db):\n if not app_conf:\n return {'platforms': db, 'follow_platforms': db}\n platforms = []\n for cls in elsewhere.CLASSES:\n conf = {\n k[len(cls.name)+1:]: v\n for k, v in app_conf.__dict__.items() if k.startswith(cls.name+'_')\n }\n conf.setdefault('api_timeout', app_conf.socket_timeout)\n conf.setdefault('app_name', app_conf.app_name)\n conf.setdefault('app_url', canonical_url)\n if hasattr(cls, 'register_app'):\n callback_url = canonical_url + '/on/' + cls.name + ':{domain}/associate'\n platforms.append(cls(None, None, callback_url, **conf))\n elif hasattr(cls, 'based_on'):\n based_on = cls.based_on\n callback_url = canonical_url + '/on/' + cls.name + '/associate'\n platforms.append(cls(\n getattr(app_conf, based_on + '_id'),\n getattr(app_conf, based_on + '_secret'),\n callback_url,\n **conf\n ))\n else:\n platforms.append(cls(\n conf.pop('id'),\n conf.pop('secret'),\n conf.pop('callback', canonical_url + '/on/' + cls.name + '/associate'),\n **conf\n ))\n\n platforms = [p for p in platforms if p.api_secret or hasattr(p, 'register_app')]\n order = db.all(\"\"\"\n SELECT platform\n FROM (\n SELECT e.platform, count(*) as c\n FROM elsewhere e\n JOIN participants p ON p.id = e.participant\n WHERE p.status = 'active'\n AND p.hide_from_lists = 0\n GROUP BY e.platform\n ) a\n ORDER BY c DESC, platform ASC\n \"\"\")\n n = len(order)\n order = dict(zip(order, range(n)))\n platforms = sorted(platforms, key=lambda p: (order.get(p.name, n), p.name))\n platforms = PlatformRegistry(platforms)\n\n follow_platforms = [p for p in platforms if getattr(p, 'api_follows_path', None)]\n follow_platforms = PlatformRegistry(follow_platforms)\n\n for platform in platforms:\n if platform.fontawesome_name:\n continue\n platform.icon = asset(\n 'platforms/%s.svg' % platform.name,\n 'platforms/%s.16.png' % platform.name,\n )\n platform.logo = asset(\n 'platforms/%s.svg' % platform.name,\n 'platforms/%s.png' % platform.name,\n )\n\n return {'platforms': platforms, 'follow_platforms': follow_platforms}\n\n\ndef replace_unused_singulars(c):\n for m in list(c):\n msg = m.id\n if not isinstance(msg, tuple):\n continue\n if msg[0].startswith('<unused singular (hash='):\n del c[msg[0]]\n c[msg[1]] = m\n\n\ndef share_source_strings(catalog, shared_strings):\n \"\"\"Share message IDs between catalogs to save memory.\n \"\"\"\n if not shared_strings:\n shared_strings.update((m.id, m.id) for m in catalog)\n return\n for m in list(catalog):\n if not m.id:\n continue\n if m.id in shared_strings:\n m.id = shared_strings[m.id]\n catalog.delete(m.id)\n catalog[m.id] = m\n else:\n shared_strings[m.id] = m.id\n\n\ndef load_i18n(canonical_host, canonical_scheme, project_root, tell_sentry):\n # Load the locales\n localeDir = os.path.join(project_root, 'i18n', 'core')\n locales = LOCALES\n source_strings = {}\n for file in os.listdir(localeDir):\n try:\n parts = file.split(\".\")\n if not (len(parts) == 2 and parts[1] == \"po\"):\n continue\n lang = parts[0]\n with open(os.path.join(localeDir, file), 'rb') as f:\n l = locales[lang.lower()] = Locale(lang)\n c = l.catalog = read_po(f)\n share_source_strings(c, source_strings)\n c.plural_func = get_function_from_rule(c.plural_expr)\n replace_unused_singulars(c)\n try:\n l.countries = make_sorted_dict(COUNTRIES, l.territories)\n except KeyError:\n l.countries = COUNTRIES\n try:\n l.languages_2 = make_sorted_dict(LANGUAGES_2, l.languages)\n except KeyError:\n l.languages_2 = LANGUAGES_2\n except Exception as e:\n tell_sentry(e, {})\n del source_strings\n\n # Unload the Babel data that we no longer need\n # We load a lot of data to populate the LANGUAGE_NAMES dict, we don't want\n # to keep it all in RAM.\n used_data_dict_addresses = set(id(l._data._data) for l in locales.values())\n for key, data_dict in list(babel.localedata._cache.items()):\n if id(data_dict) not in used_data_dict_addresses:\n del babel.localedata._cache[key]\n\n # Prepare a unique and sorted list for use in the language switcher\n percent = lambda l, total: sum((percent(s, len(s)) if isinstance(s, tuple) else 1) for s in l if s) / total\n for l in list(locales.values()):\n if l.language == 'en':\n l.completion = 1\n continue\n l.completion = percent([m.string for m in l.catalog if m.id and not m.fuzzy], len(l.catalog))\n if l.completion == 0:\n del locales[l.language]\n loc_url = canonical_scheme+'://%s.'+canonical_host\n domain, port = (canonical_host.split(':') + [None])[:2]\n port = int(port) if port else socket.getservbyname(canonical_scheme, 'tcp')\n subdomains = {\n l.subdomain: loc_url % l.subdomain for l in locales.values()\n if resolve(l.subdomain + '.' + domain, port)\n }\n lang_list = sorted(\n (\n (l.completion, l.language, l.language_name.title(), loc_url % l.subdomain)\n for l in set(locales.values()) if l.completion > 0.5\n ),\n key=lambda t: (-t[0], t[1]),\n )\n\n # Add year-less date format\n year_re = re.compile(r'(^y+[^a-zA-Z]+|[^a-zA-Z]+y+$)')\n for l in locales.values():\n short_format = l.date_formats['short'].pattern\n assert short_format[0] == 'y' or short_format[-1] == 'y', (l.language, short_format)\n l.date_formats['short_yearless'] = year_re.sub('', short_format)\n\n # Add aliases\n for k, v in list(locales.items()):\n locales.setdefault(ALIASES.get(k, k), v)\n locales.setdefault(ALIASES_R.get(k, k), v)\n for k, v in list(locales.items()):\n locales.setdefault(k.split('_', 1)[0], v)\n\n # Add universal strings\n # These strings don't need to be translated, but they have to be in the catalogs\n # so that they're counted as translated.\n for l in locales.values():\n l.catalog.add(\"PayPal\", \"PayPal\")\n\n # Patch the locales to look less formal\n locales['fr'].currency_formats['standard'] = parse_pattern('#,##0.00\\u202f\\xa4')\n locales['fr'].currencies['USD'] = 'dollar états-unien'\n\n # Load the markdown files\n docs = {}\n heading_re = re.compile(r'^(#+ )', re.M)\n for path in find_files(os.path.join(project_root, 'i18n'), '*.md'):\n d, b = os.path.split(path)\n doc = os.path.basename(d)\n lang = b[:-3]\n with open(path, 'rb') as f:\n md = f.read().decode('utf8')\n if md.startswith('# '):\n md = '\\n'.join(md.split('\\n')[1:]).strip()\n md = heading_re.sub(r'##\\1', md)\n docs.setdefault(doc, {}).__setitem__(lang, markdown.render(md))\n\n return {'docs': docs, 'lang_list': lang_list, 'locales': locales, 'subdomains': subdomains}\n\n\ndef asset_url_generator(env, asset_url, tell_sentry, www_root):\n def asset(*paths):\n for path in paths:\n fspath = www_root+'/assets/'+path\n etag = ''\n try:\n if env.cache_static:\n etag = asset_etag(fspath)\n else:\n os.stat(fspath)\n except FileNotFoundError as e:\n if path == paths[-1]:\n if not os.path.exists(fspath + '.spt'):\n tell_sentry(e, {})\n else:\n continue\n except Exception as e:\n tell_sentry(e, {})\n return asset_url+path+(etag and '?etag='+etag)\n return {'asset': asset}\n\n\ndef load_scss_variables(project_root):\n \"\"\"Build a dict representing the `style/variables.scss` file.\n \"\"\"\n # Get the names of all the variables\n with open(project_root + '/style/variables.scss') as f:\n variables = f.read()\n names = [m.group(1) for m in re.finditer(r'^\\$([\\w-]+):', variables, re.M)]\n # Compile a big rule that uses all the variables\n props = ''.join('-x-{0}: ${0};'.format(name) for name in names)\n css = sass.compile(string=('%s\\nx { %s }' % (variables, props)))\n # Read the final values from the generated CSS\n d = dict((m.group(1), m.group(2)) for m in re.finditer(r'-x-([\\w-]+): (.+?);\\s', css))\n return {'scss_variables': d}\n\n\ndef s3(env):\n key, secret = env.aws_access_key_id, env.aws_secret_access_key\n if key and secret:\n s3 = boto3.client('s3', aws_access_key_id=key, aws_secret_access_key=secret)\n else:\n s3 = None\n return {'s3': s3}\n\n\ndef currency_exchange_rates(db):\n if not db:\n return\n return {'currency_exchange_rates': get_currency_exchange_rates(db)}\n\n\nminimal_chain = StateChain(\n version,\n make_sentry_teller,\n database,\n)\n\nfull_chain = StateChain(\n version,\n make_sentry_teller,\n crypto,\n database,\n canonical,\n csp,\n app_conf,\n mail,\n billing,\n stripe,\n username_restrictions,\n load_i18n,\n asset_url_generator,\n accounts_elsewhere,\n load_scss_variables,\n s3,\n trusted_proxies,\n currency_exchange_rates,\n)\n\n\ndef main():\n from os import environ\n environ['RUN_CRON_JOBS'] = 'no'\n from liberapay.main import website\n app_conf, env = website.app_conf, website.env\n if app_conf.missing or app_conf.mistyped or env.missing or env.malformed:\n raise SystemExit('The configuration is incorrect.')\n\n\nif __name__ == '__main__':\n main()\n", "path": "liberapay/wireup.py" } ]
[ { "content": "from decimal import Decimal\nfrom ipaddress import ip_network\nimport json\nimport logging\nfrom operator import itemgetter\nimport os\nimport re\nimport socket\nfrom tempfile import mkstemp\nfrom time import time\nimport traceback\n\nimport babel.localedata\nfrom babel.messages.pofile import read_po\nfrom babel.numbers import parse_pattern\nimport boto3\nfrom mailshake import AmazonSESMailer, ToConsoleMailer, SMTPMailer\nimport pando\nfrom postgres.cursors import SimpleRowCursor\nimport psycopg2\nfrom psycopg2.extensions import adapt, AsIs, new_type, register_adapter, register_type\nfrom psycopg2_pool import PoolError\nimport requests\nimport sass\nimport sentry_sdk\nfrom state_chain import StateChain\n\nfrom liberapay import elsewhere\nimport liberapay.billing.payday\nfrom liberapay.exceptions import NeedDatabase\nfrom liberapay.i18n.base import (\n ALIASES, ALIASES_R, COUNTRIES, LANGUAGES_2, LOCALES, Locale, make_sorted_dict\n)\nfrom liberapay.i18n.currencies import Money, MoneyBasket, get_currency_exchange_rates\nfrom liberapay.i18n.plural_rules import get_function_from_rule\nfrom liberapay.models import DB\nfrom liberapay.models.account_elsewhere import _AccountElsewhere, AccountElsewhere\nfrom liberapay.models.community import _Community, Community\nfrom liberapay.models.encrypted import Encrypted\nfrom liberapay.models.exchange_route import ExchangeRoute\nfrom liberapay.models.participant import Participant\nfrom liberapay.models.payin import Payin\nfrom liberapay.models.repository import Repository\nfrom liberapay.models.tip import Tip\nfrom liberapay.security.crypto import Cryptograph\nfrom liberapay.utils import find_files, markdown, mkdir_p, resolve, urlquote\nfrom liberapay.utils.emails import compile_email_spt\nfrom liberapay.utils.http_caching import asset_etag\nfrom liberapay.utils.query_cache import QueryCache\nfrom liberapay.utils.types import Object\nfrom liberapay.version import get_version\nfrom liberapay.website import CustomUndefined\n\n\ndef canonical(env):\n canonical_scheme = env.canonical_scheme\n canonical_host = env.canonical_host\n cookie_domain = None\n if canonical_host:\n canonical_url = '%s://%s' % (canonical_scheme, canonical_host)\n if ':' not in canonical_host:\n cookie_domain = '.' + canonical_host\n else:\n canonical_url = ''\n asset_url = canonical_url+'/assets/'\n return locals()\n\n\nclass CSP(bytes):\n\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/default-src\n based_on_default_src = set(b'''\n child-src connect-src font-src frame-src img-src manifest-src\n media-src object-src script-src style-src worker-src\n '''.split())\n\n def __new__(cls, x):\n if isinstance(x, dict):\n self = bytes.__new__(cls, b';'.join(b' '.join(t).rstrip() for t in x.items()) + b';')\n self.directives = dict(x)\n else:\n self = bytes.__new__(cls, x)\n self.directives = dict(\n (d.split(b' ', 1) + [b''])[:2] for d in self.split(b';') if d\n )\n return self\n\n def allow(self, directive, value):\n d = dict(self.directives)\n old_value = d.get(directive)\n if old_value is None and directive in self.based_on_default_src:\n old_value = d.get(b'default-src')\n d[directive] = b'%s %s' % (old_value, value) if old_value else value\n return CSP(d)\n\n\ndef csp(canonical_host, canonical_scheme, env):\n csp = (\n b\"default-src 'self' %(main_domain)s;\"\n b\"connect-src 'self' *.liberapay.org;\"\n b\"form-action 'self';\"\n b\"img-src * blob: data:;\"\n b\"object-src 'none';\"\n ) % {b'main_domain': canonical_host.encode('ascii')}\n csp += env.csp_extra.encode()\n if canonical_scheme == 'https':\n csp += b\"upgrade-insecure-requests;\"\n return {'csp': CSP(csp)}\n\n\ndef crypto():\n return {'cryptograph': Cryptograph()}\n\n\nclass NoDB:\n\n def __getattr__(self, attr):\n raise NeedDatabase()\n\n __bool__ = lambda self: False\n\n back_as_registry = {}\n\n def register_model(self, model):\n model.db = self\n\n\ndef database(env, tell_sentry):\n dburl = env.database_url\n maxconn = env.database_maxconn\n try:\n db = DB(dburl, maxconn=maxconn, cursor_factory=SimpleRowCursor)\n except psycopg2.OperationalError as e:\n tell_sentry(e, {}, allow_reraise=False)\n db = NoDB()\n\n itemgetter0 = itemgetter(0)\n\n def back_as_Object(cols, vals):\n return Object(zip(map(itemgetter0, cols), vals))\n\n db.back_as_registry[Object] = db.back_as_registry['Object'] = back_as_Object\n\n models = (\n _AccountElsewhere, AccountElsewhere, _Community, Community,\n Encrypted, ExchangeRoute, Participant, Payin, Repository, Tip,\n )\n for model in models:\n db.register_model(model)\n setattr(db, model.__name__, model)\n liberapay.billing.payday.Payday.db = db\n\n def adapt_set(s):\n return adapt(tuple(s))\n register_adapter(set, adapt_set)\n\n def adapt_money(m):\n return AsIs('(%s,%s)::currency_amount' % (adapt(m.amount), adapt(m.currency)))\n register_adapter(Money, adapt_money)\n\n def cast_currency_amount(v, cursor):\n return None if v in (None, '(,)') else Money(*v[1:-1].split(','))\n try:\n oid = db.one(\"SELECT 'currency_amount'::regtype::oid\")\n register_type(new_type((oid,), 'currency_amount', cast_currency_amount))\n except (psycopg2.ProgrammingError, NeedDatabase):\n pass\n\n def adapt_money_basket(b):\n return AsIs(\n \"_wrap_amounts('%s'::jsonb)\" %\n json.dumps({k: str(v) for k, v in b.amounts.items() if v}).replace(\"'\", \"''\")\n )\n register_adapter(MoneyBasket, adapt_money_basket)\n\n def cast_currency_basket(v, cursor):\n if v is None:\n return None\n parts = v[1:-1].split(',', 2)\n if len(parts) == 2:\n eur, usd = parts\n obj = None\n else:\n eur, usd, obj = parts\n if obj:\n amounts = json.loads(obj[1:-1].replace('\"\"', '\"') if obj[0] == '\"' else obj)\n amounts = {k: Decimal(str(v)) for k, v in amounts.items()}\n else:\n amounts = {}\n if eur:\n amounts['EUR'] = Decimal(eur)\n if usd:\n amounts['USD'] = Decimal(usd)\n return MoneyBasket(**amounts)\n try:\n oid = db.one(\"SELECT 'currency_basket'::regtype::oid\")\n register_type(new_type((oid,), 'currency_basket', cast_currency_basket))\n except (psycopg2.ProgrammingError, NeedDatabase):\n pass\n\n use_qc = not env.override_query_cache\n qc1 = QueryCache(db, threshold=(1 if use_qc else 0))\n qc5 = QueryCache(db, threshold=(5 if use_qc else 0))\n\n return {'db': db, 'db_qc1': qc1, 'db_qc5': qc5}\n\n\nclass AppConf:\n\n fields = dict(\n app_name=str,\n bitbucket_callback=str,\n bitbucket_id=str,\n bitbucket_secret=str,\n bot_github_token=str,\n bot_github_username=str,\n check_avatar_urls=bool,\n check_email_domains=bool,\n check_email_servers=bool,\n cron_intervals=dict,\n facebook_callback=str,\n facebook_id=str,\n facebook_secret=str,\n github_callback=str,\n github_id=str,\n github_secret=str,\n gitlab_callback=str,\n gitlab_id=str,\n gitlab_secret=str,\n google_callback=str,\n google_id=str,\n google_secret=str,\n linuxfr_callback=str,\n linuxfr_id=str,\n linuxfr_secret=str,\n log_emails=bool,\n mangopay_base_url=str,\n mangopay_client_id=str,\n mangopay_client_password=str,\n openstreetmap_api_url=str,\n openstreetmap_auth_url=str,\n openstreetmap_callback=str,\n openstreetmap_id=str,\n openstreetmap_secret=str,\n password_rounds=int,\n payday_label=str,\n payday_repo=str,\n payin_methods=dict,\n paypal_domain=str,\n paypal_id=str,\n paypal_secret=str,\n s3_endpoint=str,\n s3_public_access_key=str,\n s3_secret_key=str,\n s3_region=str,\n s3_payday_logs_bucket=str,\n ses_feedback_queue_url=str,\n ses_region=str,\n sepa_creditor_identifier=str,\n show_sandbox_warning=bool,\n socket_timeout=float,\n smtp_host=str,\n smtp_port=int,\n smtp_username=str,\n smtp_password=str,\n smtp_use_tls=bool,\n stripe_callback_secret=str,\n stripe_connect_callback_secret=str,\n stripe_connect_id=str,\n stripe_publishable_key=str,\n stripe_secret_key=str,\n trusted_proxies=list,\n twitch_id=str,\n twitch_secret=str,\n twitter_callback=str,\n twitter_id=str,\n twitter_secret=str,\n )\n\n def __init__(self, d):\n d = d if isinstance(d, dict) else dict(d)\n\n unexpected = set(d) - set(self.fields)\n if unexpected:\n print(\"Found %i unexpected variables in the app_conf table: %s\" %\n (len(unexpected), ' '.join(unexpected)))\n\n missing, mistyped = [], []\n for k, t in self.fields.items():\n if k in d:\n v = d[k]\n if isinstance(v, t):\n self.__dict__[k] = v\n else:\n mistyped.append((k, v, t))\n else:\n missing.append(k)\n if missing:\n print('Missing configuration variables: ', ' '.join(missing))\n for k, v, t in mistyped:\n print('Invalid configuration variable, %s: %s is of type %s, not %s' %\n (k, json.dumps(v), type(v), t))\n\n self.missing = missing\n self.mistyped = mistyped\n self.unexpected = unexpected\n\n\ndef app_conf(db):\n if not db:\n return {'app_conf': None}\n app_conf = AppConf(db.all(\"SELECT key, value FROM app_conf\"))\n if app_conf:\n socket.setdefaulttimeout(app_conf.socket_timeout)\n return {'app_conf': app_conf}\n\n\ndef trusted_proxies(app_conf, env, tell_sentry):\n if not app_conf:\n return {'trusted_proxies': []}\n def parse_network(net):\n if net == 'private':\n return [net]\n elif net.startswith('https://'):\n d = env.log_dir + '/trusted_proxies/'\n mkdir_p(d)\n filename = d + urlquote(net, '')\n skip_download = (\n os.path.exists(filename) and\n os.stat(filename).st_size > 0 and\n os.stat(filename).st_mtime > time() - 60*60*24*7\n )\n if not skip_download:\n tmpfd, tmp_path = mkstemp(dir=d)\n with open(tmpfd, 'w') as f:\n f.write(requests.get(net).text)\n os.rename(tmp_path, filename)\n with open(filename, 'rb') as f:\n return [ip_network(x) for x in f.read().decode('ascii').strip().split()]\n else:\n return [ip_network(net)]\n try:\n return {'trusted_proxies': [\n sum((parse_network(net) for net in networks), [])\n for networks in (app_conf.trusted_proxies or ())\n ]}\n except Exception as e:\n tell_sentry(e, {})\n return {'trusted_proxies': []}\n\n\ndef mail(app_conf, env, project_root='.'):\n if not app_conf:\n return\n smtp_conf = {\n k[5:]: v for k, v in app_conf.__dict__.items() if k.startswith('smtp_')\n }\n if smtp_conf:\n smtp_conf.setdefault('timeout', app_conf.socket_timeout)\n if getattr(app_conf, 'ses_region', None):\n mailer = AmazonSESMailer(\n env.aws_access_key_id, env.aws_secret_access_key,\n region_name=app_conf.ses_region\n )\n elif smtp_conf:\n mailer = SMTPMailer(**smtp_conf)\n else:\n mailer = ToConsoleMailer()\n emails = {}\n emails_dir = project_root+'/emails/'\n i = len(emails_dir)\n for spt in find_files(emails_dir, '*.spt'):\n base_name = spt[i:-4]\n emails[base_name] = compile_email_spt(spt)\n\n def log_email(message):\n message = dict(message)\n html, text = message.pop('html'), message.pop('text')\n print('\\n', ' ', '='*26, 'BEGIN EMAIL', '='*26)\n print(json.dumps(message))\n print('[---] text/html')\n print(html)\n print('[---] text/plain')\n print(text)\n print(' ', '='*27, 'END EMAIL', '='*27)\n\n if app_conf.log_emails and not isinstance(mailer, ToConsoleMailer):\n log_email = log_email\n else:\n log_email = lambda *a, **kw: None\n\n return {'emails': emails, 'log_email': log_email, 'mailer': mailer}\n\n\ndef billing(app_conf):\n if not app_conf:\n return\n import mangopay\n sandbox = 'sandbox' in app_conf.mangopay_base_url\n mangopay.sandbox = sandbox\n handler = mangopay.APIRequest(\n client_id=app_conf.mangopay_client_id,\n passphrase=app_conf.mangopay_client_password,\n sandbox=sandbox,\n timeout=app_conf.socket_timeout,\n )\n mangopay.get_default_handler = mangopay.base.get_default_handler = \\\n mangopay.query.get_default_handler = lambda: handler\n\n # https://github.com/Mangopay/mangopay2-python-sdk/issues/95\n if not sandbox:\n mangopay.api.logger.setLevel(logging.CRITICAL)\n\n # https://github.com/Mangopay/mangopay2-python-sdk/issues/118\n mangopay.resources.LegalUser.person_type = 'LEGAL'\n\n # https://github.com/Mangopay/mangopay2-python-sdk/issues/144\n import liberapay.billing.watcher\n mangopay.signals.request_finished.connect(liberapay.billing.watcher.on_response)\n\n # https://github.com/Mangopay/mangopay2-python-sdk/issues/157\n cls = mangopay.resources.DirectPayIn\n field = mangopay.fields.Field(api_name='Billing')\n field.add_to_class(cls, 'billing')\n cls._meta.api_names[field.api_name] = field.name\n\n\ndef stripe(app_conf):\n if not app_conf:\n return\n import stripe\n stripe.api_key = app_conf.stripe_secret_key\n stripe.api_version = '2019-08-14'\n stripe.client_id = app_conf.stripe_connect_id\n stripe.max_network_retries = 2\n\n\ndef username_restrictions(www_root):\n return {'restricted_usernames': os.listdir(www_root)}\n\n\ndef version(env):\n try:\n version = get_version()\n except Exception:\n if env.instance_type == 'production':\n raise\n version = None\n return {'version': version}\n\n\ndef make_sentry_teller(env, version):\n if env.sentry_dsn:\n sentry_sdk.init(\n env.sentry_dsn,\n environment=env.instance_type,\n release=version,\n debug=False, # Pass `True` when investigating an integration issue\n )\n sentry = True\n else:\n sentry = False\n print(\"Won't log to Sentry (SENTRY_DSN is empty).\")\n\n def tell_sentry(exception, state, allow_reraise=True, level=None):\n r = {'sentry_ident': None}\n\n if isinstance(exception, pando.Response):\n if exception.code < 500:\n # Only log server errors\n return r\n if not level and exception.code in (502, 504):\n # This kind of error is usually transient and not our fault.\n level = 'warning'\n\n if isinstance(exception, NeedDatabase):\n # Don't flood Sentry when DB is down\n return r\n\n if isinstance(exception, PoolError):\n # If this happens, then the `DATABASE_MAXCONN` value is too low.\n state['exception'] = NeedDatabase()\n\n if isinstance(exception, psycopg2.Error):\n from liberapay.website import website\n if getattr(website, 'db', None):\n try:\n website.db.one('SELECT 1 AS x')\n except psycopg2.Error as e:\n # If it can't answer this simple query, then it's either\n # down or unreachable. Show the proper 503 error page.\n website.db.okay = False\n state['exception'] = NeedDatabase()\n if sentry:\n # Record the exception raised above instead of the\n # original one, to avoid duplicate issues.\n return tell_sentry(e, state, allow_reraise=True)\n\n if 'read-only' in str(exception):\n # DB is in read only mode\n state['db_is_readonly'] = True\n # Show the proper 503 error page\n state['exception'] = NeedDatabase()\n # Don't reraise this in tests\n allow_reraise = False\n\n if isinstance(exception, ValueError):\n if 'cannot contain NUL (0x00) characters' in str(exception):\n # https://github.com/liberapay/liberapay.com/issues/675\n response = state.get('response') or pando.Response()\n response.code = 400\n response.body = str(exception)\n r['exception'] = None\n r['response'] = response\n return r\n\n if not sentry:\n # No Sentry, log to stderr instead\n traceback.print_exc()\n # Reraise if allowed\n if env.sentry_reraise and allow_reraise:\n raise\n return r\n\n # Prepare context data\n if not level:\n level = 'warning' if isinstance(exception, Warning) else 'error'\n scope_dict = {'level': level}\n if state:\n try:\n # https://docs.sentry.io/platforms/python/enriching-events/identify-user/\n user_data = scope_dict['user'] = {}\n user = state.get('user')\n if isinstance(user, Participant):\n user_data['id'] = getattr(user, 'id', None)\n user_data['username'] = getattr(user, 'username', None)\n # https://develop.sentry.dev/sdk/event-payloads/request/\n request = state.get('request')\n if request is not None:\n user_data['ip_address'] = str(request.source)\n decode = lambda b: b.decode('ascii', 'backslashreplace')\n scope_dict['contexts'] = {}\n scope_dict['contexts']['request'] = {\n 'method': request.method,\n 'url': request.line.uri.decoded,\n 'headers': {\n decode(k): decode(b', '.join(v))\n for k, v in request.headers.items()\n if k != b'Cookie'\n },\n }\n # https://docs.sentry.io/platforms/python/enriching-events/tags/\n scope_dict['tags'] = {\n 'lang': getattr(state.get('locale'), 'language', None),\n }\n except Exception as e:\n tell_sentry(e, {})\n\n # Tell Sentry\n r['sentry_ident'] = sentry_sdk.capture_exception(exception, **scope_dict)\n return r\n\n CustomUndefined._tell_sentry = staticmethod(tell_sentry)\n\n return {'tell_sentry': tell_sentry}\n\n\nclass PlatformRegistry:\n \"\"\"Registry of platforms we support.\n \"\"\"\n\n def __init__(self, platforms):\n self.list = platforms\n self.dict = dict((p.name, p) for p in platforms)\n self.__dict__.update(self.dict)\n self.order = dict((p.name, i) for i, p in enumerate(platforms))\n self._hasattr_cache = {}\n\n def __contains__(self, platform):\n return platform.name in self.dict\n\n def __iter__(self):\n return iter(self.list)\n\n def __len__(self):\n return len(self.list)\n\n def _cache_hasattr(self, attr):\n r = PlatformRegistry([p for p in self if getattr(p, attr, None)])\n self._hasattr_cache[attr] = r\n return r\n\n def get(self, k, default=None):\n return self.dict.get(k, default)\n\n def hasattr(self, attr):\n r = self._hasattr_cache.get(attr)\n return r or self._cache_hasattr(attr)\n\n def index(self, name):\n return self.order[name]\n\n\ndef accounts_elsewhere(app_conf, asset, canonical_url, db):\n if not app_conf:\n return {'platforms': db, 'follow_platforms': db}\n platforms = []\n for cls in elsewhere.CLASSES:\n conf = {\n k[len(cls.name)+1:]: v\n for k, v in app_conf.__dict__.items() if k.startswith(cls.name+'_')\n }\n conf.setdefault('api_timeout', app_conf.socket_timeout)\n conf.setdefault('app_name', app_conf.app_name)\n conf.setdefault('app_url', canonical_url)\n if hasattr(cls, 'register_app'):\n callback_url = canonical_url + '/on/' + cls.name + ':{domain}/associate'\n platforms.append(cls(None, None, callback_url, **conf))\n elif hasattr(cls, 'based_on'):\n based_on = cls.based_on\n callback_url = canonical_url + '/on/' + cls.name + '/associate'\n platforms.append(cls(\n getattr(app_conf, based_on + '_id'),\n getattr(app_conf, based_on + '_secret'),\n callback_url,\n **conf\n ))\n else:\n platforms.append(cls(\n conf.pop('id'),\n conf.pop('secret'),\n conf.pop('callback', canonical_url + '/on/' + cls.name + '/associate'),\n **conf\n ))\n\n platforms = [p for p in platforms if p.api_secret or hasattr(p, 'register_app')]\n order = db.all(\"\"\"\n SELECT platform\n FROM (\n SELECT e.platform, count(*) as c\n FROM elsewhere e\n JOIN participants p ON p.id = e.participant\n WHERE p.status = 'active'\n AND p.hide_from_lists = 0\n GROUP BY e.platform\n ) a\n ORDER BY c DESC, platform ASC\n \"\"\")\n n = len(order)\n order = dict(zip(order, range(n)))\n platforms = sorted(platforms, key=lambda p: (order.get(p.name, n), p.name))\n platforms = PlatformRegistry(platforms)\n\n follow_platforms = [p for p in platforms if getattr(p, 'api_follows_path', None)]\n follow_platforms = PlatformRegistry(follow_platforms)\n\n for platform in platforms:\n if platform.fontawesome_name:\n continue\n platform.icon = asset(\n 'platforms/%s.svg' % platform.name,\n 'platforms/%s.16.png' % platform.name,\n )\n platform.logo = asset(\n 'platforms/%s.svg' % platform.name,\n 'platforms/%s.png' % platform.name,\n )\n\n return {'platforms': platforms, 'follow_platforms': follow_platforms}\n\n\ndef replace_unused_singulars(c):\n for m in list(c):\n msg = m.id\n if not isinstance(msg, tuple):\n continue\n if msg[0].startswith('<unused singular (hash='):\n del c[msg[0]]\n c[msg[1]] = m\n\n\ndef share_source_strings(catalog, shared_strings):\n \"\"\"Share message IDs between catalogs to save memory.\n \"\"\"\n if not shared_strings:\n shared_strings.update((m.id, m.id) for m in catalog)\n return\n for m in list(catalog):\n if not m.id:\n continue\n if m.id in shared_strings:\n m.id = shared_strings[m.id]\n catalog.delete(m.id)\n catalog[m.id] = m\n else:\n shared_strings[m.id] = m.id\n\n\ndef load_i18n(canonical_host, canonical_scheme, project_root, tell_sentry):\n # Load the locales\n localeDir = os.path.join(project_root, 'i18n', 'core')\n locales = LOCALES\n source_strings = {}\n for file in os.listdir(localeDir):\n try:\n parts = file.split(\".\")\n if not (len(parts) == 2 and parts[1] == \"po\"):\n continue\n lang = parts[0]\n with open(os.path.join(localeDir, file), 'rb') as f:\n l = locales[lang.lower()] = Locale(lang)\n c = l.catalog = read_po(f)\n share_source_strings(c, source_strings)\n c.plural_func = get_function_from_rule(c.plural_expr)\n replace_unused_singulars(c)\n try:\n l.countries = make_sorted_dict(COUNTRIES, l.territories)\n except KeyError:\n l.countries = COUNTRIES\n try:\n l.languages_2 = make_sorted_dict(LANGUAGES_2, l.languages)\n except KeyError:\n l.languages_2 = LANGUAGES_2\n except Exception as e:\n tell_sentry(e, {})\n del source_strings\n\n # Unload the Babel data that we no longer need\n # We load a lot of data to populate the LANGUAGE_NAMES dict, we don't want\n # to keep it all in RAM.\n used_data_dict_addresses = set(id(l._data._data) for l in locales.values())\n for key, data_dict in list(babel.localedata._cache.items()):\n if id(data_dict) not in used_data_dict_addresses:\n del babel.localedata._cache[key]\n\n # Prepare a unique and sorted list for use in the language switcher\n percent = lambda l, total: sum((percent(s, len(s)) if isinstance(s, tuple) else 1) for s in l if s) / total\n for l in list(locales.values()):\n if l.language == 'en':\n l.completion = 1\n continue\n l.completion = percent([m.string for m in l.catalog if m.id and not m.fuzzy], len(l.catalog))\n if l.completion == 0:\n del locales[l.language]\n loc_url = canonical_scheme+'://%s.'+canonical_host\n domain, port = (canonical_host.split(':') + [None])[:2]\n port = int(port) if port else socket.getservbyname(canonical_scheme, 'tcp')\n subdomains = {\n l.subdomain: loc_url % l.subdomain for l in locales.values()\n if resolve(l.subdomain + '.' + domain, port)\n }\n lang_list = sorted(\n (\n (l.completion, l.language, l.language_name.title(), loc_url % l.subdomain)\n for l in set(locales.values()) if l.completion > 0.5\n ),\n key=lambda t: (-t[0], t[1]),\n )\n\n # Add year-less date format\n year_re = re.compile(r'(^y+[^a-zA-Z]+|[^a-zA-Z]+y+$)')\n for l in locales.values():\n short_format = l.date_formats['short'].pattern\n assert short_format[0] == 'y' or short_format[-1] == 'y', (l.language, short_format)\n l.date_formats['short_yearless'] = year_re.sub('', short_format)\n\n # Add aliases\n for k, v in list(locales.items()):\n locales.setdefault(ALIASES.get(k, k), v)\n locales.setdefault(ALIASES_R.get(k, k), v)\n for k, v in list(locales.items()):\n locales.setdefault(k.split('_', 1)[0], v)\n\n # Add universal strings\n # These strings don't need to be translated, but they have to be in the catalogs\n # so that they're counted as translated.\n for l in locales.values():\n l.catalog.add(\"PayPal\", \"PayPal\")\n\n # Patch the locales to look less formal\n locales['fr'].currency_formats['standard'] = parse_pattern('#,##0.00\\u202f\\xa4')\n locales['fr'].currencies['USD'] = 'dollar états-unien'\n\n # Load the markdown files\n docs = {}\n heading_re = re.compile(r'^(#+ )', re.M)\n for path in find_files(os.path.join(project_root, 'i18n'), '*.md'):\n d, b = os.path.split(path)\n doc = os.path.basename(d)\n lang = b[:-3]\n with open(path, 'rb') as f:\n md = f.read().decode('utf8')\n if md.startswith('# '):\n md = '\\n'.join(md.split('\\n')[1:]).strip()\n md = heading_re.sub(r'##\\1', md)\n docs.setdefault(doc, {}).__setitem__(lang, markdown.render(md))\n\n return {'docs': docs, 'lang_list': lang_list, 'locales': locales, 'subdomains': subdomains}\n\n\ndef asset_url_generator(env, asset_url, tell_sentry, www_root):\n def asset(*paths):\n for path in paths:\n fspath = www_root+'/assets/'+path\n etag = ''\n try:\n if env.cache_static:\n etag = asset_etag(fspath)\n else:\n os.stat(fspath)\n except FileNotFoundError as e:\n if path == paths[-1]:\n if not os.path.exists(fspath + '.spt'):\n tell_sentry(e, {})\n else:\n continue\n except Exception as e:\n tell_sentry(e, {})\n return asset_url+path+(etag and '?etag='+etag)\n return {'asset': asset}\n\n\ndef load_scss_variables(project_root):\n \"\"\"Build a dict representing the `style/variables.scss` file.\n \"\"\"\n # Get the names of all the variables\n with open(project_root + '/style/variables.scss') as f:\n variables = f.read()\n names = [m.group(1) for m in re.finditer(r'^\\$([\\w-]+):', variables, re.M)]\n # Compile a big rule that uses all the variables\n props = ''.join('-x-{0}: ${0};'.format(name) for name in names)\n css = sass.compile(string=('%s\\nx { %s }' % (variables, props)))\n # Read the final values from the generated CSS\n d = dict((m.group(1), m.group(2)) for m in re.finditer(r'-x-([\\w-]+): (.+?);\\s', css))\n return {'scss_variables': d}\n\n\ndef s3(env):\n key, secret = env.aws_access_key_id, env.aws_secret_access_key\n if key and secret:\n s3 = boto3.client('s3', aws_access_key_id=key, aws_secret_access_key=secret)\n else:\n s3 = None\n return {'s3': s3}\n\n\ndef currency_exchange_rates(db):\n if not db:\n return\n return {'currency_exchange_rates': get_currency_exchange_rates(db)}\n\n\nminimal_chain = StateChain(\n version,\n make_sentry_teller,\n database,\n)\n\nfull_chain = StateChain(\n version,\n make_sentry_teller,\n crypto,\n database,\n canonical,\n csp,\n app_conf,\n mail,\n billing,\n stripe,\n username_restrictions,\n load_i18n,\n asset_url_generator,\n accounts_elsewhere,\n load_scss_variables,\n s3,\n trusted_proxies,\n currency_exchange_rates,\n)\n\n\ndef main():\n from os import environ\n environ['RUN_CRON_JOBS'] = 'no'\n from liberapay.main import website\n app_conf, env = website.app_conf, website.env\n if app_conf.missing or app_conf.mistyped or env.missing or env.malformed:\n raise SystemExit('The configuration is incorrect.')\n\n\nif __name__ == '__main__':\n main()\n", "path": "liberapay/wireup.py" } ]
diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 43897bd92b..0000000000 --- a/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM debian:10 - -RUN apt-get update && \ - apt-get install -y build-essential python3-pip \ - libpq-dev libffi-dev python3-dev postgresql-client && \ - rm -rf /var/lib/apt/lists/* - - -COPY requirements_*.txt /tmp/ - -RUN pip3 install --require-hashes -r /tmp/requirements_base.txt \ - -r /tmp/requirements_tests.txt \ - -r /tmp/requirements_dev.txt - -COPY . /app -WORKDIR /app diff --git a/emails/payin_failed.spt b/emails/payin_failed.spt index 0948f9b463..93e1955c47 100644 --- a/emails/payin_failed.spt +++ b/emails/payin_failed.spt @@ -1,7 +1,7 @@ {{ _("Your payment has failed") }} [---] text/html -% if payin.off_session +% if payin.off_session|default(False) <p>{{ _( "The automatic payment of {money_amount} initiated today has failed.", money_amount=payin.amount, diff --git a/emails/payin_succeeded.spt b/emails/payin_succeeded.spt index e125530cbb..c11f3a7bc7 100644 --- a/emails/payin_succeeded.spt +++ b/emails/payin_succeeded.spt @@ -1,7 +1,7 @@ {{ _("Your payment has succeeded") }} [---] text/html -% if payin.off_session +% if payin.off_session|default(False) <p>{{ _( "The automatic payment of {money_amount} initiated today has succeeded.", money_amount=payin.amount, diff --git a/liberapay/wireup.py b/liberapay/wireup.py index 8e36ac7f47..f722ffea0c 100644 --- a/liberapay/wireup.py +++ b/liberapay/wireup.py @@ -265,6 +265,7 @@ class AppConf: smtp_password=str, smtp_use_tls=bool, stripe_callback_secret=str, + stripe_connect_callback_secret=str, stripe_connect_id=str, stripe_publishable_key=str, stripe_secret_key=str, diff --git a/style/base/base.scss b/style/base/base.scss index ebe6295410..5475728c74 100644 --- a/style/base/base.scss +++ b/style/base/base.scss @@ -177,9 +177,24 @@ div.account { } button.close { + margin-left: 5px; padding: 0 5px; } +button.corner-icon { + background: transparent; + border: none; + color: #000; + font-size: 18px; + line-height: 24px; + margin-left: 5px; + opacity: 0.2; + padding: 0 5px; +} +button.corner-icon:hover { + opacity: 0.5; +} + img.platform-icon { height: 16px; } diff --git a/www/%username/payment/index.spt b/www/%username/payment/index.spt index 246d169f09..7d4b9cd922 100644 --- a/www/%username/payment/index.spt +++ b/www/%username/payment/index.spt @@ -8,19 +8,49 @@ participant = get_participant(state, restrict=True) if request.method == 'POST': account_pk = request.body.get_int('account_pk') - account = website.db.one(""" - UPDATE payment_accounts - SET is_current = NULL - WHERE participant = %s - AND pk = %s - RETURNING * - """, (participant.id, account_pk)) - if account and account.provider == 'stripe': - try: - stripe.oauth.OAuth.deauthorize(stripe_user_id=account.id) - except stripe.oauth_error.InvalidClientError as e: - if "This application is not connected to stripe account" not in str(e): - website.warning("unexpected error message: " + str(e)) + action = request.body.get_choice('action', ('disconnect', 'refresh'), default='disconnect') + if action == 'disconnect': + account = website.db.one(""" + UPDATE payment_accounts + SET is_current = NULL + WHERE participant = %s + AND pk = %s + RETURNING * + """, (participant.id, account_pk)) + if account and account.provider == 'stripe': + try: + stripe.oauth.OAuth.deauthorize(stripe_user_id=account.id) + except stripe.oauth_error.InvalidClientError as e: + if "This application is not connected to stripe account" not in str(e): + website.warning("unexpected error message: " + str(e)) + elif action == 'refresh': + account = website.db.one(""" + SELECT * + FROM payment_accounts + WHERE participant = %s + AND pk = %s + """, (participant.id, account_pk)) + if not account: + raise response.invalid_input(account_pk, 'account_pk', 'body') + if account.provider == 'stripe': + stripe_account = stripe.Account.retrieve(account.id) + website.db.run(""" + UPDATE payment_accounts + SET country = %(country)s + , default_currency = %(default_currency)s + , charges_enabled = %(charges_enabled)s + , display_name = %(display_name)s + WHERE provider = 'stripe' + AND id = %(account_id)s + """, dict( + country=stripe_account.country, + default_currency=stripe_account.default_currency.upper(), + charges_enabled=stripe_account.charges_enabled, + display_name=stripe_account.settings.dashboard.display_name, + account_id=stripe_account.id, + )) + else: + raise response.error(400, f"refresh isn't implemented for provider {account.provider}") response.redirect(request.path.raw) accounts = website.db.all(""" @@ -99,12 +129,14 @@ subhead = _("Payment Processors") "bank accounts.)" ) }}</p> % if stripe_accounts - <form action="" method="POST"> - <input type="hidden" name="csrf_token" value="{{ csrf_token }}" /> % for account in stripe_accounts - <div class="card card-default"> - <button class="close pull-right" name="account_pk" value="{{ account.pk }}" - title="{{ _('Disconnect') }}">&times;</button> + <form class="card card-default" action="" method="POST"> + <input type="hidden" name="csrf_token" value="{{ csrf_token }}" /> + <input type="hidden" name="account_pk" value="{{ account.pk }}" /> + <button class="corner-icon fa fa-close" name="action" value="disconnect" + title="{{ _('Disconnect') }}"></button> + <button class="corner-icon fa fa-refresh" name="action" value="refresh" + title="{{ _('Refresh') }}"></button> % if account.display_name <h4>{{ account.display_name }}</h4> {{ _("Account ID: {0}", account.id) }}<br> @@ -129,9 +161,8 @@ subhead = _("Payment Processors") fontawesome("external-link") }} {{ _( "Manage this {platform} account", platform="Stripe" ) }}</a> - </div> + </form> % endfor - </form> <br> % elif country in locale.countries % if country in constants.PAYOUT_COUNTRIES['stripe'] diff --git a/www/about/teams.spt b/www/about/teams.spt index 2c5a996f55..a4aa242641 100644 --- a/www/about/teams.spt +++ b/www/about/teams.spt @@ -92,7 +92,7 @@ title = _("Teams") <input type="hidden" name="csrf_token" value="{{ csrf_token }}" /> <div class="form-group"> - <input class="form-control" name="name" size=30 + <input class="form-control" name="name" size=30 maxlength="{{ constants.USERNAME_MAX_SIZE }}" placeholder="{{ _('Name of the team') }}" /> </div> <div class="form-group"> diff --git a/www/callbacks/stripe.spt b/www/callbacks/stripe.spt index d83c9f9027..094cdbcbad 100644 --- a/www/callbacks/stripe.spt +++ b/www/callbacks/stripe.spt @@ -19,10 +19,12 @@ PRODUCTION = website.env.instance_type == 'production' request.allow('POST') payload = request.body_bytes sig = request.headers[b'Stripe-Signature'].decode('ascii', 'replace') +if 'connect' in request.qs: + secret = website.app_conf.stripe_connect_callback_secret +else: + secret = website.app_conf.stripe_callback_secret try: - event = stripe.Webhook.construct_event( - payload, sig, website.app_conf.stripe_callback_secret - ) + event = stripe.Webhook.construct_event(payload, sig, secret) except ValueError as e: raise response.error(400, str(e)) except stripe.error.SignatureVerificationError:
rootpy__rootpy-791
Unable to set errorlow for a graph There is a bug in `rootpy.plotting.graph`, setting exl or eyl trough a GraphPoint throws an AttributeError: ```python from rootpy.plotting import Graph g = Graph(name="test",type='asymm') g[0] = (1,1) g[0].y.error_hi = 0.1 g[0].y.error_low = 0.1 ```
[ { "content": "from __future__ import absolute_import\n\nimport math\nimport numbers\nfrom operator import add, sub\n\nimport ROOT\n\nfrom .. import log; log = log[__name__]\nfrom .. import QROOT\nfrom ..extern.six.moves import range\nfrom ..base import NamelessConstructorObject\nfrom ..decorators import snake_case_methods\nfrom .base import Plottable\n\n__all__ = [\n 'Graph',\n 'Graph1D',\n 'Graph2D',\n]\n\n\nclass _GraphBase(object):\n\n class GraphPoint(object):\n \"\"\"\n\tClass similar to BinProxy for histograms, useful for\n\tgetting single point information\n\t\"\"\"\n class Measurement(object):\n \"\"\"\n\t Generalized measusement class, each graph point\n\t has one for each axis\n\t \"\"\"\n def __init__(self, graph, axis, idx):\n self.isdefault = not hasattr(graph, axis)\n self.axis_ = axis\n self.index_ = idx\n self.graph_ = graph\n\n @property\n def value(self):\n return 0. if self.isdefault else getattr(self.graph_, self.axis_)(self.index_)\n\n @value.setter\n def value(self, value):\n axes = ['x', 'y']\n if hasattr(self.graph_, 'z'):\n axes.append('z')\n vals = []\n for axis in axes:\n if axis == self.axis_:\n vals.append(value)\n else:\n vals.append(\n getattr(\n self.graph_,\n axis)(self.index_)\n )\n self.graph_.SetPoint(self.index_, *vals)\n\n @property\n def error(self):\n return 0. if self.isdefault else getattr(\n self.graph_,\n '{0}err'.format(self.axis_)\n )(self.index_)\n\n @property\n def error_hi(self):\n return 0. if self.isdefault else getattr(\n self.graph_,\n '{0}errh'.format(self.axis_)\n )(self.index_)\n\n @error_hi.setter\n def error_hi(self, val):\n if self.isdefault: return\n getattr(\n self.graph_,\n 'SetPointE{0}high'.format(self.axis_.upper())\n )(self.index_, val)\n\n @property\n def error_low(self):\n return 0. if self.isdefault else getattr(\n self.graph_,\n '{0}errl'.format(self.axis_)\n )(self.index_)\n\n @error_low.setter\n def error_low(self, val):\n if self.isdefault: return\n getattr(\n self.graph_,\n 'voidSetPointE{0}low'.format(self.axis_.upper())\n )(self.index_, val)\n\n\n @property\n def error_avg(self):\n return 0. if self.isdefault else getattr(\n self.graph_,\n '{0}erravg'.format(self.axis_)\n )(self.index_)\n\n @property\n def error_max(self):\n return 0. if self.isdefault else getattr(\n self.graph_,\n '{0}errmax'.format(self.axis_)\n )(self.index_)\n\n def __init__(self, graph, idx):\n self.graph_ = graph\n self.idx_ = idx\n\n @property\n def x(self):\n \"\"\"returns the x coordinate\n \"\"\"\n return _GraphBase.GraphPoint.Measurement(self.graph_, 'x', self.idx_)\n\n @property\n def y(self):\n \"\"\"returns the y coordinate\n \"\"\"\n return _GraphBase.GraphPoint.Measurement(self.graph_, 'y', self.idx_)\n\n @property\n def z(self):\n \"\"\"returns the z coordinate\n \"\"\"\n return _GraphBase.GraphPoint.Measurement(self.graph_, 'z', self.idx_)\n\n @classmethod\n def from_file(cls, filename, sep=' ', name=None, title=None):\n with open(filename, 'r') as gfile:\n lines = gfile.readlines()\n numpoints = len(lines)\n graph = cls(numpoints, name=name, title=title)\n for idx, line in enumerate(lines):\n point = list(map(float, line.rstrip().split(sep)))\n if len(point) != cls.DIM + 1:\n raise ValueError(\n \"line {0:d} does not contain \"\n \"{1:d} values: {2}\".format(\n idx + 1, cls.DIM + 1, line))\n graph.SetPoint(idx, *point)\n graph.Set(numpoints)\n return graph\n\n def __len__(self):\n return self.GetN()\n\n def __iter__(self):\n for index in range(len(self)):\n yield self[index]\n\n @property\n def num_points(self):\n return self.GetN()\n\n @num_points.setter\n def num_points(self, n):\n if n < 0:\n raise ValueError(\"number of points in a graph must \"\n \"be non-negative\")\n # ROOT, why not SetN with GetN?\n self.Set(n)\n\n def x(self, index=None):\n if index is None:\n return (self.GetX()[i] for i in range(self.GetN()))\n index = index % len(self)\n return self.GetX()[index]\n\n def xerr(self, index=None):\n if index is None:\n return ((self.GetEXlow()[i], self.GetEXhigh()[i])\n for i in range(self.GetN()))\n index = index % len(self)\n return (self.GetErrorXlow(index), self.GetErrorXhigh(index))\n\n def xerrh(self, index=None):\n if index is None:\n return (self.GetEXhigh()[i] for i in range(self.GetN()))\n index = index % len(self)\n return self.GetErrorXhigh(index)\n\n def xerrl(self, index=None):\n if index is None:\n return (self.GetEXlow()[i] for i in range(self.GetN()))\n index = index % len(self)\n return self.GetErrorXlow(index)\n\n def xerravg(self, index=None):\n if index is None:\n return (self.xerravg(i) for i in range(self.GetN()))\n index = index % len(self)\n return math.sqrt(self.GetErrorXhigh(index) ** 2 +\n self.GetErrorXlow(index) ** 2)\n\n def xerrmax(self, index=None):\n if index is None:\n return (self.xerravg(i) for i in range(self.GetN()))\n index = index % len(self)\n return max(self.GetErrorXhigh(index),\n self.GetErrorXlow(index))\n\n def y(self, index=None):\n if index is None:\n return (self.GetY()[i] for i in range(self.GetN()))\n index = index % len(self)\n return self.GetY()[index]\n\n def yerr(self, index=None):\n if index is None:\n return (self.yerr(i) for i in range(self.GetN()))\n index = index % len(self)\n return (self.GetErrorYlow(index), self.GetErrorYhigh(index))\n\n def yerrh(self, index=None):\n if index is None:\n return (self.GetEYhigh()[i] for i in range(self.GetN()))\n index = index % len(self)\n return self.GetEYhigh()[index]\n\n def yerrl(self, index=None):\n if index is None:\n return (self.GetEYlow()[i] for i in range(self.GetN()))\n index = index % len(self)\n return self.GetEYlow()[index]\n\n def yerravg(self, index=None):\n if index is None:\n return (self.yerravg()[i] for i in range(self.GetN()))\n index = index % len(self)\n return math.sqrt(self.GetEYhigh()[index] ** 2 +\n self.GetEYlow()[index] ** 2)\n\n def yerravg(self, index=None):\n if index is None:\n return (self.yerravg()[i] for i in range(self.GetN()))\n index = index % len(self)\n return max(self.GetEYhigh()[index],\n self.GetEYlow()[index])\n\n def __getitem__(self, idx):\n return _GraphBase.GraphPoint(self, idx)\n\n def __setitem__(self, index, point):\n if not 0 <= index <= self.GetN():\n raise IndexError(\"graph point index out of range\")\n self.SetPoint(index, *point)\n\n\nclass _Graph1DBase(_GraphBase):\n\n @classmethod\n def divide(cls, top, bottom, option='cp'):\n from .hist import Hist\n if isinstance(top, _Graph1DBase):\n top = Hist(top)\n if isinstance(bottom, _Graph1DBase):\n bottom = Hist(bottom)\n ratio = Graph(type='asymm')\n ratio.Divide(top, bottom, option)\n return ratio\n\n def __add__(self, other):\n copy = self.Clone()\n copy += other\n return copy\n\n def __radd__(self, other):\n return self + other\n\n def __sub__(self, other):\n copy = self.Clone()\n copy -= other\n return copy\n\n def __rsub__(self, other):\n return -1 * (self - other)\n\n def __div__(self, other):\n copy = self.Clone()\n copy /= other\n return copy\n\n __truediv__ = __div__\n\n def __mul__(self, other):\n copy = self.Clone()\n copy *= other\n return copy\n\n def __rmul__(self, other):\n return self * other\n\n def __iadd__(self, other):\n if isinstance(other, numbers.Real):\n for index in range(len(self)):\n point = self[index]\n self.SetPoint(index, point.x.value, point.y.value + other)\n return self\n for index in range(len(self)):\n mypoint = self[index]\n otherpoint = other[index]\n xlow = self.GetEXlow()[index]\n xhigh = self.GetEXhigh()[index]\n ylow = math.sqrt((self.GetEYlow()[index]) ** 2 +\n (other.GetEYlow()[index]) ** 2)\n yhigh = math.sqrt((self.GetEYhigh()[index]) ** 2 +\n (other.GetEYhigh()[index]) ** 2)\n self.SetPoint(index, mypoint.x.value, mypoint.y.value + otherpoint.y.value)\n self.SetPointError(index, xlow, xhigh, ylow, yhigh)\n return self\n\n def __isub__(self, other):\n if isinstance(other, numbers.Real):\n for index in range(len(self)):\n point = self[index]\n self.SetPoint(index, point.x.value, point.y.value - other)\n return self\n for index in range(len(self)):\n mypoint = self[index]\n otherpoint = other[index]\n xlow = self.GetEXlow()[index]\n xhigh = self.GetEXhigh()[index]\n ylow = math.sqrt((self.GetEYlow()[index]) ** 2 +\n (other.GetEYlow()[index]) ** 2)\n yhigh = math.sqrt((self.GetEYhigh()[index]) ** 2 +\n (other.GetEYhigh()[index]) ** 2)\n self.SetPoint(index, mypoint.x.value, mypoint.y.value - otherpoint.y.value)\n self.SetPointError(index, xlow, xhigh, ylow, yhigh)\n return self\n\n def __idiv__(self, other):\n if isinstance(other, numbers.Real):\n for index in range(len(self)):\n point = self[index]\n ylow, yhigh = self.GetEYlow()[index], self.GetEYhigh()[index]\n xlow, xhigh = self.GetEXlow()[index], self.GetEXhigh()[index]\n self.SetPoint(index, point.x.value, point.y.value / other)\n self.SetPointError(index, xlow, xhigh,\n ylow / other, yhigh / other)\n return self\n for index in range(len(self)):\n mypoint = self[index]\n otherpoint = other[index]\n xlow = self.GetEXlow()[index]\n xhigh = self.GetEXhigh()[index]\n ylow = (\n (mypoint.y.value / otherpoint.y.value) *\n math.sqrt((self.GetEYlow()[index] / mypoint.y.value) ** 2 +\n (other.GetEYlow()[index] /\n otherpoint.y.value) ** 2))\n yhigh = (\n (mypoint.y.value / otherpoint.y.value) *\n math.sqrt((self.GetEYhigh()[index] / mypoint.y.value) ** 2 +\n (other.GetEYhigh()[index] /\n otherpoint.y.value) ** 2))\n self.SetPoint(index, mypoint.x.value, mypoint.y.value / otherpoint.y.value)\n self.SetPointError(index, xlow, xhigh, ylow, yhigh)\n return self\n\n __itruediv__ = __idiv__\n\n def __imul__(self, other):\n if isinstance(other, numbers.Real):\n for index in range(len(self)):\n point = self[index]\n ylow, yhigh = self.GetEYlow()[index], self.GetEYhigh()[index]\n xlow, xhigh = self.GetEXlow()[index], self.GetEXhigh()[index]\n self.SetPoint(index, point.x.value, point.y.value * other)\n self.SetPointError(index, xlow, xhigh,\n ylow * other, yhigh * other)\n return self\n for index in range(len(self)):\n mypoint = self[index]\n otherpoint = other[index]\n xlow = self.GetEXlow()[index]\n xhigh = self.GetEXhigh()[index]\n ylow = (\n (mypoint.y.value * otherpoint.y.value) *\n math.sqrt((self.GetEYlow()[index] / mypoint.y.value) ** 2 +\n (other.GetEYlow()[index] / otherpoint.y.value) ** 2))\n yhigh = (\n (mypoint.y.value * otherpoint.y.value) *\n math.sqrt((self.GetEYhigh()[index] / mypoint.y.value) ** 2 +\n (other.GetEYhigh()[index] / otherpoint.y.value) ** 2))\n self.SetPoint(index, mypoint.x.value, mypoint.y.value * otherpoint.y.value)\n self.SetPointError(index, xlow, xhigh, ylow, yhigh)\n return self\n\n def GetMaximum(self, include_error=False):\n if not include_error:\n return self.GetYmax()\n summed = map(add, self.y(), self.yerrh())\n return max(summed)\n\n def GetMinimum(self, include_error=False):\n if not include_error:\n return self.GetYmin()\n summed = map(sub, self.y(), self.yerrl())\n return min(summed)\n\n def GetXmin(self):\n if len(self) == 0:\n raise ValueError(\"Attemping to get xmin of empty graph\")\n return ROOT.TMath.MinElement(self.GetN(), self.GetX())\n\n def GetXmax(self):\n if len(self) == 0:\n raise ValueError(\"Attempting to get xmax of empty graph\")\n return ROOT.TMath.MaxElement(self.GetN(), self.GetX())\n\n def GetYmin(self):\n if len(self) == 0:\n raise ValueError(\"Attempting to get ymin of empty graph\")\n return ROOT.TMath.MinElement(self.GetN(), self.GetY())\n\n def GetYmax(self):\n if len(self) == 0:\n raise ValueError(\"Attempting to get ymax of empty graph!\")\n return ROOT.TMath.MaxElement(self.GetN(), self.GetY())\n\n def GetEXhigh(self):\n if isinstance(self, ROOT.TGraphErrors):\n return self.GetEX()\n return super(_Graph1DBase, self).GetEXhigh()\n\n def GetEXlow(self):\n if isinstance(self, ROOT.TGraphErrors):\n return self.GetEX()\n return super(_Graph1DBase, self).GetEXlow()\n\n def GetEYhigh(self):\n if isinstance(self, ROOT.TGraphErrors):\n return self.GetEY()\n return super(_Graph1DBase, self).GetEYhigh()\n\n def GetEYlow(self):\n if isinstance(self, ROOT.TGraphErrors):\n return self.GetEY()\n return super(_Graph1DBase, self).GetEYlow()\n\n def Crop(self, x1, x2, copy=False):\n \"\"\"\n Remove points which lie outside of [x1, x2].\n If x1 and/or x2 is below/above the current lowest/highest\n x-coordinates, additional points are added to the graph using a\n linear interpolation\n \"\"\"\n numPoints = self.GetN()\n if copy:\n cropGraph = self.Clone()\n copyGraph = self\n else:\n cropGraph = self\n copyGraph = self.Clone()\n X = copyGraph.GetX()\n EXlow = copyGraph.GetEXlow()\n EXhigh = copyGraph.GetEXhigh()\n Y = copyGraph.GetY()\n EYlow = copyGraph.GetEYlow()\n EYhigh = copyGraph.GetEYhigh()\n xmin = copyGraph.GetXmin()\n if x1 < xmin:\n cropGraph.Set(numPoints + 1)\n numPoints += 1\n xmax = copyGraph.GetXmax()\n if x2 > xmax:\n cropGraph.Set(numPoints + 1)\n numPoints += 1\n index = 0\n for i in range(numPoints):\n if i == 0 and x1 < xmin:\n cropGraph.SetPoint(0, x1, copyGraph.Eval(x1))\n elif i == numPoints - 1 and x2 > xmax:\n cropGraph.SetPoint(i, x2, copyGraph.Eval(x2))\n else:\n cropGraph.SetPoint(i, X[index], Y[index])\n cropGraph.SetPointError(\n i,\n EXlow[index], EXhigh[index],\n EYlow[index], EYhigh[index])\n index += 1\n return cropGraph\n\n def Reverse(self, copy=False):\n \"\"\"\n Reverse the order of the points\n \"\"\"\n numPoints = self.GetN()\n if copy:\n revGraph = self.Clone()\n else:\n revGraph = self\n X = self.GetX()\n EXlow = self.GetEXlow()\n EXhigh = self.GetEXhigh()\n Y = self.GetY()\n EYlow = self.GetEYlow()\n EYhigh = self.GetEYhigh()\n for i in range(numPoints):\n index = numPoints - 1 - i\n revGraph.SetPoint(i, X[index], Y[index])\n revGraph.SetPointError(\n i,\n EXlow[index], EXhigh[index],\n EYlow[index], EYhigh[index])\n return revGraph\n\n def Invert(self, copy=False):\n \"\"\"\n Interchange the x and y coordinates of all points\n \"\"\"\n numPoints = self.GetN()\n if copy:\n invGraph = self.Clone()\n else:\n invGraph = self\n X = self.GetX()\n EXlow = self.GetEXlow()\n EXhigh = self.GetEXhigh()\n Y = self.GetY()\n EYlow = self.GetEYlow()\n EYhigh = self.GetEYhigh()\n for i in range(numPoints):\n invGraph.SetPoint(i, Y[i], X[i])\n invGraph.SetPointError(\n i,\n EYlow[i], EYhigh[i],\n EXlow[i], EXhigh[i])\n return invGraph\n\n def Scale(self, value, copy=False):\n \"\"\"\n Scale the graph vertically by value\n \"\"\"\n numPoints = self.GetN()\n if copy:\n scaleGraph = self.Clone()\n else:\n scaleGraph = self\n X = self.GetX()\n EXlow = self.GetEXlow()\n EXhigh = self.GetEXhigh()\n Y = self.GetY()\n EYlow = self.GetEYlow()\n EYhigh = self.GetEYhigh()\n for i in range(numPoints):\n scaleGraph.SetPoint(i, X[i], Y[i] * value)\n scaleGraph.SetPointError(\n i,\n EXlow[i], EXhigh[i],\n EYlow[i] * value, EYhigh[i] * value)\n return scaleGraph\n\n def Stretch(self, value, copy=False):\n \"\"\"\n Stretch the graph horizontally by a factor of value\n \"\"\"\n numPoints = self.GetN()\n if copy:\n stretchGraph = self.Clone()\n else:\n stretchGraph = self\n X = self.GetX()\n EXlow = self.GetEXlow()\n EXhigh = self.GetEXhigh()\n Y = self.GetY()\n EYlow = self.GetEYlow()\n EYhigh = self.GetEYhigh()\n for i in range(numPoints):\n stretchGraph.SetPoint(i, X[i] * value, Y[i])\n stretchGraph.SetPointError(\n i,\n EXlow[i] * value, EXhigh[i] * value,\n EYlow[i], EYhigh[i])\n return stretchGraph\n\n def Shift(self, value, copy=False):\n \"\"\"\n Shift the graph left or right by value\n \"\"\"\n numPoints = self.GetN()\n if copy:\n shiftGraph = self.Clone()\n else:\n shiftGraph = self\n X = self.GetX()\n EXlow = self.GetEXlow()\n EXhigh = self.GetEXhigh()\n Y = self.GetY()\n EYlow = self.GetEYlow()\n EYhigh = self.GetEYhigh()\n for i in range(numPoints):\n shiftGraph.SetPoint(i, X[i] + value, Y[i])\n shiftGraph.SetPointError(\n i,\n EXlow[i], EXhigh[i],\n EYlow[i], EYhigh[i])\n return shiftGraph\n\n def Integrate(self):\n \"\"\"\n Integrate using the trapazoidal method\n \"\"\"\n area = 0.\n X = self.GetX()\n Y = self.GetY()\n for i in range(self.GetN() - 1):\n area += (X[i + 1] - X[i]) * (Y[i] + Y[i + 1]) / 2.\n return area\n\n def Append(self, other):\n \"\"\"\n Append points from another graph\n \"\"\"\n orig_len = len(self)\n self.Set(orig_len + len(other))\n ipoint = orig_len\n if hasattr(self, 'SetPointError'):\n for point in other:\n self.SetPoint(ipoint, point.x.value, point.y.value)\n self.SetPointError(\n ipoint,\n point.x.error_low, point.x.error_hi,\n point.y.error_low, point.y.error_hi)\n ipoint += 1\n else:\n for point in other:\n self.SetPoint(ipoint, point.x.value, point.y.value)\n ipoint += 1\n\n\nclass _Graph2DBase(_GraphBase):\n\n def z(self, index=None):\n if index is None:\n return (self.GetZ()[i] for i in range(self.GetN()))\n index = index % len(self)\n return self.GetZ()[index]\n\n def zerr(self, index=None):\n if index is None:\n return (self.zerr(i) for i in range(self.GetN()))\n index = index % len(self)\n return self.GetErrorZ(index)\n\n\n_GRAPH1D_BASES = {\n 'default': QROOT.TGraph,\n 'asymm': QROOT.TGraphAsymmErrors,\n 'errors': QROOT.TGraphErrors,\n 'benterrors': QROOT.TGraphBentErrors,\n}\n_GRAPH1D_CLASSES = {}\n\n\ndef _Graph_class(base):\n\n class Graph(_Graph1DBase, Plottable, NamelessConstructorObject,\n base):\n _ROOT = base\n DIM = 1\n\n def __init__(self, npoints_or_hist=None,\n name=None, title=None, **kwargs):\n if npoints_or_hist is not None:\n super(Graph, self).__init__(npoints_or_hist,\n name=name, title=title)\n else:\n super(Graph, self).__init__(name=name, title=title)\n self._post_init(**kwargs)\n\n return Graph\n\nfor name, base in _GRAPH1D_BASES.items():\n _GRAPH1D_CLASSES[name] = snake_case_methods(_Graph_class(base))\n\n\nclass Graph(_Graph1DBase, QROOT.TGraph):\n \"\"\"\n Returns a Graph object which inherits from the associated\n ROOT.TGraph* class (TGraph, TGraphErrors, TGraphAsymmErrors)\n \"\"\"\n _ROOT = QROOT.TGraph\n DIM = 1\n\n @classmethod\n def dynamic_cls(cls, type='asymm'):\n return _GRAPH1D_CLASSES[type]\n\n def __new__(cls, *args, **kwargs):\n type = kwargs.pop('type', 'asymm').lower()\n return cls.dynamic_cls(type)(\n *args, **kwargs)\n\n\n# alias Graph1D -> Graph\nGraph1D = Graph\n\n_GRAPH2D_BASES = {\n 'default': QROOT.TGraph2D,\n 'errors': QROOT.TGraph2DErrors,\n}\n_GRAPH2D_CLASSES = {}\n\n\ndef _Graph2D_class(base):\n\n class Graph2D(_Graph2DBase, Plottable, NamelessConstructorObject,\n base):\n _ROOT = base\n DIM = 2\n\n def __init__(self, npoints_or_hist=None,\n name=None, title=None, **kwargs):\n if npoints_or_hist is not None:\n super(Graph2D, self).__init__(npoints_or_hist,\n name=name, title=title)\n else:\n super(Graph2D, self).__init__(name=name, title=title)\n if isinstance(npoints_or_hist, int):\n # ROOT bug in TGraph2D\n self.Set(npoints_or_hist)\n self._post_init(**kwargs)\n\n return Graph2D\n\nfor name, base in _GRAPH2D_BASES.items():\n _GRAPH2D_CLASSES[name] = snake_case_methods(_Graph2D_class(base))\n\n\nclass Graph2D(_Graph2DBase, QROOT.TGraph2D):\n \"\"\"\n Returns a Graph2D object which inherits from the associated\n ROOT.TGraph2D* class (TGraph2D, TGraph2DErrors)\n \"\"\"\n _ROOT = QROOT.TGraph2D\n DIM = 2\n\n @classmethod\n def dynamic_cls(cls, type='errors'):\n return _GRAPH2D_CLASSES[type]\n\n def __new__(cls, *args, **kwargs):\n type = kwargs.pop('type', 'errors').lower()\n return cls.dynamic_cls(type)(\n *args, **kwargs)\n", "path": "rootpy/plotting/graph.py" } ]
[ { "content": "from __future__ import absolute_import\n\nimport math\nimport numbers\nfrom operator import add, sub\n\nimport ROOT\n\nfrom .. import log; log = log[__name__]\nfrom .. import QROOT\nfrom ..extern.six.moves import range\nfrom ..base import NamelessConstructorObject\nfrom ..decorators import snake_case_methods\nfrom .base import Plottable\n\n__all__ = [\n 'Graph',\n 'Graph1D',\n 'Graph2D',\n]\n\n\nclass _GraphBase(object):\n\n class GraphPoint(object):\n \"\"\"\n\tClass similar to BinProxy for histograms, useful for\n\tgetting single point information\n\t\"\"\"\n class Measurement(object):\n \"\"\"\n\t Generalized measusement class, each graph point\n\t has one for each axis\n\t \"\"\"\n def __init__(self, graph, axis, idx):\n self.isdefault = not hasattr(graph, axis)\n self.axis_ = axis\n self.index_ = idx\n self.graph_ = graph\n\n @property\n def value(self):\n return 0. if self.isdefault else getattr(self.graph_, self.axis_)(self.index_)\n\n @value.setter\n def value(self, value):\n axes = ['x', 'y']\n if hasattr(self.graph_, 'z'):\n axes.append('z')\n vals = []\n for axis in axes:\n if axis == self.axis_:\n vals.append(value)\n else:\n vals.append(\n getattr(\n self.graph_,\n axis)(self.index_)\n )\n self.graph_.SetPoint(self.index_, *vals)\n\n @property\n def error(self):\n return 0. if self.isdefault else getattr(\n self.graph_,\n '{0}err'.format(self.axis_)\n )(self.index_)\n\n @property\n def error_hi(self):\n return 0. if self.isdefault else getattr(\n self.graph_,\n '{0}errh'.format(self.axis_)\n )(self.index_)\n\n @error_hi.setter\n def error_hi(self, val):\n if self.isdefault: return\n getattr(\n self.graph_,\n 'SetPointE{0}high'.format(self.axis_.upper())\n )(self.index_, val)\n\n @property\n def error_low(self):\n return 0. if self.isdefault else getattr(\n self.graph_,\n '{0}errl'.format(self.axis_)\n )(self.index_)\n\n @error_low.setter\n def error_low(self, val):\n if self.isdefault: return\n getattr(\n self.graph_,\n 'SetPointE{0}low'.format(self.axis_.upper())\n )(self.index_, val)\n\n\n @property\n def error_avg(self):\n return 0. if self.isdefault else getattr(\n self.graph_,\n '{0}erravg'.format(self.axis_)\n )(self.index_)\n\n @property\n def error_max(self):\n return 0. if self.isdefault else getattr(\n self.graph_,\n '{0}errmax'.format(self.axis_)\n )(self.index_)\n\n def __init__(self, graph, idx):\n self.graph_ = graph\n self.idx_ = idx\n\n @property\n def x(self):\n \"\"\"returns the x coordinate\n \"\"\"\n return _GraphBase.GraphPoint.Measurement(self.graph_, 'x', self.idx_)\n\n @property\n def y(self):\n \"\"\"returns the y coordinate\n \"\"\"\n return _GraphBase.GraphPoint.Measurement(self.graph_, 'y', self.idx_)\n\n @property\n def z(self):\n \"\"\"returns the z coordinate\n \"\"\"\n return _GraphBase.GraphPoint.Measurement(self.graph_, 'z', self.idx_)\n\n @classmethod\n def from_file(cls, filename, sep=' ', name=None, title=None):\n with open(filename, 'r') as gfile:\n lines = gfile.readlines()\n numpoints = len(lines)\n graph = cls(numpoints, name=name, title=title)\n for idx, line in enumerate(lines):\n point = list(map(float, line.rstrip().split(sep)))\n if len(point) != cls.DIM + 1:\n raise ValueError(\n \"line {0:d} does not contain \"\n \"{1:d} values: {2}\".format(\n idx + 1, cls.DIM + 1, line))\n graph.SetPoint(idx, *point)\n graph.Set(numpoints)\n return graph\n\n def __len__(self):\n return self.GetN()\n\n def __iter__(self):\n for index in range(len(self)):\n yield self[index]\n\n @property\n def num_points(self):\n return self.GetN()\n\n @num_points.setter\n def num_points(self, n):\n if n < 0:\n raise ValueError(\"number of points in a graph must \"\n \"be non-negative\")\n # ROOT, why not SetN with GetN?\n self.Set(n)\n\n def x(self, index=None):\n if index is None:\n return (self.GetX()[i] for i in range(self.GetN()))\n index = index % len(self)\n return self.GetX()[index]\n\n def xerr(self, index=None):\n if index is None:\n return ((self.GetEXlow()[i], self.GetEXhigh()[i])\n for i in range(self.GetN()))\n index = index % len(self)\n return (self.GetErrorXlow(index), self.GetErrorXhigh(index))\n\n def xerrh(self, index=None):\n if index is None:\n return (self.GetEXhigh()[i] for i in range(self.GetN()))\n index = index % len(self)\n return self.GetErrorXhigh(index)\n\n def xerrl(self, index=None):\n if index is None:\n return (self.GetEXlow()[i] for i in range(self.GetN()))\n index = index % len(self)\n return self.GetErrorXlow(index)\n\n def xerravg(self, index=None):\n if index is None:\n return (self.xerravg(i) for i in range(self.GetN()))\n index = index % len(self)\n return math.sqrt(self.GetErrorXhigh(index) ** 2 +\n self.GetErrorXlow(index) ** 2)\n\n def xerrmax(self, index=None):\n if index is None:\n return (self.xerravg(i) for i in range(self.GetN()))\n index = index % len(self)\n return max(self.GetErrorXhigh(index),\n self.GetErrorXlow(index))\n\n def y(self, index=None):\n if index is None:\n return (self.GetY()[i] for i in range(self.GetN()))\n index = index % len(self)\n return self.GetY()[index]\n\n def yerr(self, index=None):\n if index is None:\n return (self.yerr(i) for i in range(self.GetN()))\n index = index % len(self)\n return (self.GetErrorYlow(index), self.GetErrorYhigh(index))\n\n def yerrh(self, index=None):\n if index is None:\n return (self.GetEYhigh()[i] for i in range(self.GetN()))\n index = index % len(self)\n return self.GetEYhigh()[index]\n\n def yerrl(self, index=None):\n if index is None:\n return (self.GetEYlow()[i] for i in range(self.GetN()))\n index = index % len(self)\n return self.GetEYlow()[index]\n\n def yerravg(self, index=None):\n if index is None:\n return (self.yerravg()[i] for i in range(self.GetN()))\n index = index % len(self)\n return math.sqrt(self.GetEYhigh()[index] ** 2 +\n self.GetEYlow()[index] ** 2)\n\n def yerravg(self, index=None):\n if index is None:\n return (self.yerravg()[i] for i in range(self.GetN()))\n index = index % len(self)\n return max(self.GetEYhigh()[index],\n self.GetEYlow()[index])\n\n def __getitem__(self, idx):\n return _GraphBase.GraphPoint(self, idx)\n\n def __setitem__(self, index, point):\n if not 0 <= index <= self.GetN():\n raise IndexError(\"graph point index out of range\")\n self.SetPoint(index, *point)\n\n\nclass _Graph1DBase(_GraphBase):\n\n @classmethod\n def divide(cls, top, bottom, option='cp'):\n from .hist import Hist\n if isinstance(top, _Graph1DBase):\n top = Hist(top)\n if isinstance(bottom, _Graph1DBase):\n bottom = Hist(bottom)\n ratio = Graph(type='asymm')\n ratio.Divide(top, bottom, option)\n return ratio\n\n def __add__(self, other):\n copy = self.Clone()\n copy += other\n return copy\n\n def __radd__(self, other):\n return self + other\n\n def __sub__(self, other):\n copy = self.Clone()\n copy -= other\n return copy\n\n def __rsub__(self, other):\n return -1 * (self - other)\n\n def __div__(self, other):\n copy = self.Clone()\n copy /= other\n return copy\n\n __truediv__ = __div__\n\n def __mul__(self, other):\n copy = self.Clone()\n copy *= other\n return copy\n\n def __rmul__(self, other):\n return self * other\n\n def __iadd__(self, other):\n if isinstance(other, numbers.Real):\n for index in range(len(self)):\n point = self[index]\n self.SetPoint(index, point.x.value, point.y.value + other)\n return self\n for index in range(len(self)):\n mypoint = self[index]\n otherpoint = other[index]\n xlow = self.GetEXlow()[index]\n xhigh = self.GetEXhigh()[index]\n ylow = math.sqrt((self.GetEYlow()[index]) ** 2 +\n (other.GetEYlow()[index]) ** 2)\n yhigh = math.sqrt((self.GetEYhigh()[index]) ** 2 +\n (other.GetEYhigh()[index]) ** 2)\n self.SetPoint(index, mypoint.x.value, mypoint.y.value + otherpoint.y.value)\n self.SetPointError(index, xlow, xhigh, ylow, yhigh)\n return self\n\n def __isub__(self, other):\n if isinstance(other, numbers.Real):\n for index in range(len(self)):\n point = self[index]\n self.SetPoint(index, point.x.value, point.y.value - other)\n return self\n for index in range(len(self)):\n mypoint = self[index]\n otherpoint = other[index]\n xlow = self.GetEXlow()[index]\n xhigh = self.GetEXhigh()[index]\n ylow = math.sqrt((self.GetEYlow()[index]) ** 2 +\n (other.GetEYlow()[index]) ** 2)\n yhigh = math.sqrt((self.GetEYhigh()[index]) ** 2 +\n (other.GetEYhigh()[index]) ** 2)\n self.SetPoint(index, mypoint.x.value, mypoint.y.value - otherpoint.y.value)\n self.SetPointError(index, xlow, xhigh, ylow, yhigh)\n return self\n\n def __idiv__(self, other):\n if isinstance(other, numbers.Real):\n for index in range(len(self)):\n point = self[index]\n ylow, yhigh = self.GetEYlow()[index], self.GetEYhigh()[index]\n xlow, xhigh = self.GetEXlow()[index], self.GetEXhigh()[index]\n self.SetPoint(index, point.x.value, point.y.value / other)\n self.SetPointError(index, xlow, xhigh,\n ylow / other, yhigh / other)\n return self\n for index in range(len(self)):\n mypoint = self[index]\n otherpoint = other[index]\n xlow = self.GetEXlow()[index]\n xhigh = self.GetEXhigh()[index]\n ylow = (\n (mypoint.y.value / otherpoint.y.value) *\n math.sqrt((self.GetEYlow()[index] / mypoint.y.value) ** 2 +\n (other.GetEYlow()[index] /\n otherpoint.y.value) ** 2))\n yhigh = (\n (mypoint.y.value / otherpoint.y.value) *\n math.sqrt((self.GetEYhigh()[index] / mypoint.y.value) ** 2 +\n (other.GetEYhigh()[index] /\n otherpoint.y.value) ** 2))\n self.SetPoint(index, mypoint.x.value, mypoint.y.value / otherpoint.y.value)\n self.SetPointError(index, xlow, xhigh, ylow, yhigh)\n return self\n\n __itruediv__ = __idiv__\n\n def __imul__(self, other):\n if isinstance(other, numbers.Real):\n for index in range(len(self)):\n point = self[index]\n ylow, yhigh = self.GetEYlow()[index], self.GetEYhigh()[index]\n xlow, xhigh = self.GetEXlow()[index], self.GetEXhigh()[index]\n self.SetPoint(index, point.x.value, point.y.value * other)\n self.SetPointError(index, xlow, xhigh,\n ylow * other, yhigh * other)\n return self\n for index in range(len(self)):\n mypoint = self[index]\n otherpoint = other[index]\n xlow = self.GetEXlow()[index]\n xhigh = self.GetEXhigh()[index]\n ylow = (\n (mypoint.y.value * otherpoint.y.value) *\n math.sqrt((self.GetEYlow()[index] / mypoint.y.value) ** 2 +\n (other.GetEYlow()[index] / otherpoint.y.value) ** 2))\n yhigh = (\n (mypoint.y.value * otherpoint.y.value) *\n math.sqrt((self.GetEYhigh()[index] / mypoint.y.value) ** 2 +\n (other.GetEYhigh()[index] / otherpoint.y.value) ** 2))\n self.SetPoint(index, mypoint.x.value, mypoint.y.value * otherpoint.y.value)\n self.SetPointError(index, xlow, xhigh, ylow, yhigh)\n return self\n\n def GetMaximum(self, include_error=False):\n if not include_error:\n return self.GetYmax()\n summed = map(add, self.y(), self.yerrh())\n return max(summed)\n\n def GetMinimum(self, include_error=False):\n if not include_error:\n return self.GetYmin()\n summed = map(sub, self.y(), self.yerrl())\n return min(summed)\n\n def GetXmin(self):\n if len(self) == 0:\n raise ValueError(\"Attemping to get xmin of empty graph\")\n return ROOT.TMath.MinElement(self.GetN(), self.GetX())\n\n def GetXmax(self):\n if len(self) == 0:\n raise ValueError(\"Attempting to get xmax of empty graph\")\n return ROOT.TMath.MaxElement(self.GetN(), self.GetX())\n\n def GetYmin(self):\n if len(self) == 0:\n raise ValueError(\"Attempting to get ymin of empty graph\")\n return ROOT.TMath.MinElement(self.GetN(), self.GetY())\n\n def GetYmax(self):\n if len(self) == 0:\n raise ValueError(\"Attempting to get ymax of empty graph!\")\n return ROOT.TMath.MaxElement(self.GetN(), self.GetY())\n\n def GetEXhigh(self):\n if isinstance(self, ROOT.TGraphErrors):\n return self.GetEX()\n return super(_Graph1DBase, self).GetEXhigh()\n\n def GetEXlow(self):\n if isinstance(self, ROOT.TGraphErrors):\n return self.GetEX()\n return super(_Graph1DBase, self).GetEXlow()\n\n def GetEYhigh(self):\n if isinstance(self, ROOT.TGraphErrors):\n return self.GetEY()\n return super(_Graph1DBase, self).GetEYhigh()\n\n def GetEYlow(self):\n if isinstance(self, ROOT.TGraphErrors):\n return self.GetEY()\n return super(_Graph1DBase, self).GetEYlow()\n\n def Crop(self, x1, x2, copy=False):\n \"\"\"\n Remove points which lie outside of [x1, x2].\n If x1 and/or x2 is below/above the current lowest/highest\n x-coordinates, additional points are added to the graph using a\n linear interpolation\n \"\"\"\n numPoints = self.GetN()\n if copy:\n cropGraph = self.Clone()\n copyGraph = self\n else:\n cropGraph = self\n copyGraph = self.Clone()\n X = copyGraph.GetX()\n EXlow = copyGraph.GetEXlow()\n EXhigh = copyGraph.GetEXhigh()\n Y = copyGraph.GetY()\n EYlow = copyGraph.GetEYlow()\n EYhigh = copyGraph.GetEYhigh()\n xmin = copyGraph.GetXmin()\n if x1 < xmin:\n cropGraph.Set(numPoints + 1)\n numPoints += 1\n xmax = copyGraph.GetXmax()\n if x2 > xmax:\n cropGraph.Set(numPoints + 1)\n numPoints += 1\n index = 0\n for i in range(numPoints):\n if i == 0 and x1 < xmin:\n cropGraph.SetPoint(0, x1, copyGraph.Eval(x1))\n elif i == numPoints - 1 and x2 > xmax:\n cropGraph.SetPoint(i, x2, copyGraph.Eval(x2))\n else:\n cropGraph.SetPoint(i, X[index], Y[index])\n cropGraph.SetPointError(\n i,\n EXlow[index], EXhigh[index],\n EYlow[index], EYhigh[index])\n index += 1\n return cropGraph\n\n def Reverse(self, copy=False):\n \"\"\"\n Reverse the order of the points\n \"\"\"\n numPoints = self.GetN()\n if copy:\n revGraph = self.Clone()\n else:\n revGraph = self\n X = self.GetX()\n EXlow = self.GetEXlow()\n EXhigh = self.GetEXhigh()\n Y = self.GetY()\n EYlow = self.GetEYlow()\n EYhigh = self.GetEYhigh()\n for i in range(numPoints):\n index = numPoints - 1 - i\n revGraph.SetPoint(i, X[index], Y[index])\n revGraph.SetPointError(\n i,\n EXlow[index], EXhigh[index],\n EYlow[index], EYhigh[index])\n return revGraph\n\n def Invert(self, copy=False):\n \"\"\"\n Interchange the x and y coordinates of all points\n \"\"\"\n numPoints = self.GetN()\n if copy:\n invGraph = self.Clone()\n else:\n invGraph = self\n X = self.GetX()\n EXlow = self.GetEXlow()\n EXhigh = self.GetEXhigh()\n Y = self.GetY()\n EYlow = self.GetEYlow()\n EYhigh = self.GetEYhigh()\n for i in range(numPoints):\n invGraph.SetPoint(i, Y[i], X[i])\n invGraph.SetPointError(\n i,\n EYlow[i], EYhigh[i],\n EXlow[i], EXhigh[i])\n return invGraph\n\n def Scale(self, value, copy=False):\n \"\"\"\n Scale the graph vertically by value\n \"\"\"\n numPoints = self.GetN()\n if copy:\n scaleGraph = self.Clone()\n else:\n scaleGraph = self\n X = self.GetX()\n EXlow = self.GetEXlow()\n EXhigh = self.GetEXhigh()\n Y = self.GetY()\n EYlow = self.GetEYlow()\n EYhigh = self.GetEYhigh()\n for i in range(numPoints):\n scaleGraph.SetPoint(i, X[i], Y[i] * value)\n scaleGraph.SetPointError(\n i,\n EXlow[i], EXhigh[i],\n EYlow[i] * value, EYhigh[i] * value)\n return scaleGraph\n\n def Stretch(self, value, copy=False):\n \"\"\"\n Stretch the graph horizontally by a factor of value\n \"\"\"\n numPoints = self.GetN()\n if copy:\n stretchGraph = self.Clone()\n else:\n stretchGraph = self\n X = self.GetX()\n EXlow = self.GetEXlow()\n EXhigh = self.GetEXhigh()\n Y = self.GetY()\n EYlow = self.GetEYlow()\n EYhigh = self.GetEYhigh()\n for i in range(numPoints):\n stretchGraph.SetPoint(i, X[i] * value, Y[i])\n stretchGraph.SetPointError(\n i,\n EXlow[i] * value, EXhigh[i] * value,\n EYlow[i], EYhigh[i])\n return stretchGraph\n\n def Shift(self, value, copy=False):\n \"\"\"\n Shift the graph left or right by value\n \"\"\"\n numPoints = self.GetN()\n if copy:\n shiftGraph = self.Clone()\n else:\n shiftGraph = self\n X = self.GetX()\n EXlow = self.GetEXlow()\n EXhigh = self.GetEXhigh()\n Y = self.GetY()\n EYlow = self.GetEYlow()\n EYhigh = self.GetEYhigh()\n for i in range(numPoints):\n shiftGraph.SetPoint(i, X[i] + value, Y[i])\n shiftGraph.SetPointError(\n i,\n EXlow[i], EXhigh[i],\n EYlow[i], EYhigh[i])\n return shiftGraph\n\n def Integrate(self):\n \"\"\"\n Integrate using the trapazoidal method\n \"\"\"\n area = 0.\n X = self.GetX()\n Y = self.GetY()\n for i in range(self.GetN() - 1):\n area += (X[i + 1] - X[i]) * (Y[i] + Y[i + 1]) / 2.\n return area\n\n def Append(self, other):\n \"\"\"\n Append points from another graph\n \"\"\"\n orig_len = len(self)\n self.Set(orig_len + len(other))\n ipoint = orig_len\n if hasattr(self, 'SetPointError'):\n for point in other:\n self.SetPoint(ipoint, point.x.value, point.y.value)\n self.SetPointError(\n ipoint,\n point.x.error_low, point.x.error_hi,\n point.y.error_low, point.y.error_hi)\n ipoint += 1\n else:\n for point in other:\n self.SetPoint(ipoint, point.x.value, point.y.value)\n ipoint += 1\n\n\nclass _Graph2DBase(_GraphBase):\n\n def z(self, index=None):\n if index is None:\n return (self.GetZ()[i] for i in range(self.GetN()))\n index = index % len(self)\n return self.GetZ()[index]\n\n def zerr(self, index=None):\n if index is None:\n return (self.zerr(i) for i in range(self.GetN()))\n index = index % len(self)\n return self.GetErrorZ(index)\n\n\n_GRAPH1D_BASES = {\n 'default': QROOT.TGraph,\n 'asymm': QROOT.TGraphAsymmErrors,\n 'errors': QROOT.TGraphErrors,\n 'benterrors': QROOT.TGraphBentErrors,\n}\n_GRAPH1D_CLASSES = {}\n\n\ndef _Graph_class(base):\n\n class Graph(_Graph1DBase, Plottable, NamelessConstructorObject,\n base):\n _ROOT = base\n DIM = 1\n\n def __init__(self, npoints_or_hist=None,\n name=None, title=None, **kwargs):\n if npoints_or_hist is not None:\n super(Graph, self).__init__(npoints_or_hist,\n name=name, title=title)\n else:\n super(Graph, self).__init__(name=name, title=title)\n self._post_init(**kwargs)\n\n return Graph\n\nfor name, base in _GRAPH1D_BASES.items():\n _GRAPH1D_CLASSES[name] = snake_case_methods(_Graph_class(base))\n\n\nclass Graph(_Graph1DBase, QROOT.TGraph):\n \"\"\"\n Returns a Graph object which inherits from the associated\n ROOT.TGraph* class (TGraph, TGraphErrors, TGraphAsymmErrors)\n \"\"\"\n _ROOT = QROOT.TGraph\n DIM = 1\n\n @classmethod\n def dynamic_cls(cls, type='asymm'):\n return _GRAPH1D_CLASSES[type]\n\n def __new__(cls, *args, **kwargs):\n type = kwargs.pop('type', 'asymm').lower()\n return cls.dynamic_cls(type)(\n *args, **kwargs)\n\n\n# alias Graph1D -> Graph\nGraph1D = Graph\n\n_GRAPH2D_BASES = {\n 'default': QROOT.TGraph2D,\n 'errors': QROOT.TGraph2DErrors,\n}\n_GRAPH2D_CLASSES = {}\n\n\ndef _Graph2D_class(base):\n\n class Graph2D(_Graph2DBase, Plottable, NamelessConstructorObject,\n base):\n _ROOT = base\n DIM = 2\n\n def __init__(self, npoints_or_hist=None,\n name=None, title=None, **kwargs):\n if npoints_or_hist is not None:\n super(Graph2D, self).__init__(npoints_or_hist,\n name=name, title=title)\n else:\n super(Graph2D, self).__init__(name=name, title=title)\n if isinstance(npoints_or_hist, int):\n # ROOT bug in TGraph2D\n self.Set(npoints_or_hist)\n self._post_init(**kwargs)\n\n return Graph2D\n\nfor name, base in _GRAPH2D_BASES.items():\n _GRAPH2D_CLASSES[name] = snake_case_methods(_Graph2D_class(base))\n\n\nclass Graph2D(_Graph2DBase, QROOT.TGraph2D):\n \"\"\"\n Returns a Graph2D object which inherits from the associated\n ROOT.TGraph2D* class (TGraph2D, TGraph2DErrors)\n \"\"\"\n _ROOT = QROOT.TGraph2D\n DIM = 2\n\n @classmethod\n def dynamic_cls(cls, type='errors'):\n return _GRAPH2D_CLASSES[type]\n\n def __new__(cls, *args, **kwargs):\n type = kwargs.pop('type', 'errors').lower()\n return cls.dynamic_cls(type)(\n *args, **kwargs)\n", "path": "rootpy/plotting/graph.py" } ]
diff --git a/rootpy/plotting/graph.py b/rootpy/plotting/graph.py index 7cd5f0b8..b62ba6eb 100644 --- a/rootpy/plotting/graph.py +++ b/rootpy/plotting/graph.py @@ -93,7 +93,7 @@ def error_low(self, val): if self.isdefault: return getattr( self.graph_, - 'voidSetPointE{0}low'.format(self.axis_.upper()) + 'SetPointE{0}low'.format(self.axis_.upper()) )(self.index_, val)
helmholtz-analytics__heat-406
Recent CI runs failing with NetCDF: HDF error **Description** Recent CI (and local) runs of our tests fail with messages like ``` E RuntimeError: NetCDF: HDF error netCDF4/_netCDF4.pyx:1887: RuntimeError During handling of the above exception, another exception occurred: self = <heat.core.tests.test_io.TestIO testMethod=test_save_netcdf> def test_save_netcdf(self): # netcdf support is optional if not ht.io.supports_netcdf(): return # local unsplit data local_data = ht.arange(100) > ht.save_netcdf(local_data, self.NETCDF_OUT_PATH, self.NETCDF_VARIABLE) heat/core/tests/test_io.py:373: ``` **To Reproduce** Steps to reproduce the behavior: 1. Which module/class/function is affected? heat/core/tests/test_io.py 2. What are the circumstances under which the bug appears? ANY, just run from current master 3. What is the exact error-message/errorous behavious? cf. above. **Expected behavior** Tests should run successfully. **Illustrative** https://travis-ci.com/helmholtz-analytics/heat/builds/135270829 **Version Info** Topic branch, but master would suffer from a rebuild. **Additional comments** The fix will be to pin the NetCDF dependency to <=1.5.2. Problems start to occur with 1.5.3.
[ { "content": "from setuptools import setup\nimport sys\n\nsys.path.append(\"./heat/core\")\nimport version\n\nprint(version, dir(version))\n\nwith open(\"README.md\", \"r\") as handle:\n long_description = handle.read()\n\n# with open('./heat/core/version.py') as handle:\n# exec(handle.read())\n# print(dir())\n\nsetup(\n name=\"heat\",\n packages=[\"heat\", \"heat.core\", \"heat.ml\", \"heat.ml.cluster\"],\n data_files=[\"README.md\", \"LICENSE\"],\n version=version.__version__,\n description=\"A framework for high performance data analytics and machine learning.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Helmholtz Association\",\n author_email=\"[email protected]\",\n url=\"https://github.com/helmholtz-analytics/heat\",\n keywords=[\"data\", \"analytics\", \"tensors\", \"distributed\", \"gpu\"],\n classifiers=[\n \"Development Status :: 2 - Pre-Alpha\",\n \"Programming Language :: Python :: 3.5\",\n \"License :: OSI Approved :: MIT License\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering\",\n ],\n install_requires=[\"mpi4py>=3.0.0\", \"numpy>=1.13.0\", \"torch==1.3.0\"],\n extras_require={\n \"hdf5\": [\"h5py>=2.8.0\"],\n \"netcdf\": [\"netCDF4>=1.4.0\"],\n \"dev\": [\"pre-commit>=1.18.3\"],\n },\n)\n", "path": "setup.py" } ]
[ { "content": "from setuptools import setup\nimport sys\n\nsys.path.append(\"./heat/core\")\nimport version\n\nprint(version, dir(version))\n\nwith open(\"README.md\", \"r\") as handle:\n long_description = handle.read()\n\n# with open('./heat/core/version.py') as handle:\n# exec(handle.read())\n# print(dir())\n\nsetup(\n name=\"heat\",\n packages=[\"heat\", \"heat.core\", \"heat.ml\", \"heat.ml.cluster\"],\n data_files=[\"README.md\", \"LICENSE\"],\n version=version.__version__,\n description=\"A framework for high performance data analytics and machine learning.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Helmholtz Association\",\n author_email=\"[email protected]\",\n url=\"https://github.com/helmholtz-analytics/heat\",\n keywords=[\"data\", \"analytics\", \"tensors\", \"distributed\", \"gpu\"],\n classifiers=[\n \"Development Status :: 2 - Pre-Alpha\",\n \"Programming Language :: Python :: 3.5\",\n \"License :: OSI Approved :: MIT License\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering\",\n ],\n install_requires=[\"mpi4py>=3.0.0\", \"numpy>=1.13.0\", \"torch==1.3.0\"],\n extras_require={\n \"hdf5\": [\"h5py>=2.8.0\"],\n \"netcdf\": [\"netCDF4>=1.4.0,<=1.5.2\"],\n \"dev\": [\"pre-commit>=1.18.3\"],\n },\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index bedeb05b67..5ab6519be3 100644 --- a/setup.py +++ b/setup.py @@ -35,7 +35,7 @@ install_requires=["mpi4py>=3.0.0", "numpy>=1.13.0", "torch==1.3.0"], extras_require={ "hdf5": ["h5py>=2.8.0"], - "netcdf": ["netCDF4>=1.4.0"], + "netcdf": ["netCDF4>=1.4.0,<=1.5.2"], "dev": ["pre-commit>=1.18.3"], }, )
ansible__awx-13645
Websocket not working at non-root path ### Please confirm the following - [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html). - [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates. - [X] I understand that AWX is open source software provided for free and that I might not receive a timely response. ### Summary Changes from #11342 and #652 are not full ### AWX version 21.0.0 ### Select the relevant components - [X] UI - [ ] API - [ ] Docs ### Installation method kubernetes ### Modifications no ### Ansible version _No response_ ### Operating system _No response_ ### Web browser _No response_ ### Steps to reproduce Deploy AWX with custom `ingress_path: /awx` ### Expected results websocket should work ### Actual results `2022-05-17 08:46:41,031 ERROR [-] daphne.ws_protocol [Failure instance: Traceback: <class 'ValueError'>: No route found for path 'awx/websocket/'. /var/lib/awx/venv/awx/lib64/python3.9/site-packages/autobahn/websocket/protocol.py:2841:processHandshake /var/lib/awx/venv/awx/lib64/python3.9/site-packages/txaio/tx.py:366:as_future /var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred /var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/ws_protocol.py:72:onConnect --- <exception caught here> --- /var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred /var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/server.py:201:create_application /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:54:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:47:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:145:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:169:__init__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/middleware.py:31:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:150:__call__ ] 2022-05-17 08:46:41,031 ERROR [Failure instance: Traceback: <class 'ValueError'>: No route found for path 'awx/websocket/'. /var/lib/awx/venv/awx/lib64/python3.9/site-packages/autobahn/websocket/protocol.py:2841:processHandshake /var/lib/awx/venv/awx/lib64/python3.9/site-packages/txaio/tx.py:366:as_future /var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred /var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/ws_protocol.py:72:onConnect --- <exception caught here> --- /var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred /var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/server.py:201:create_application /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:54:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:47:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:145:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:169:__init__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/middleware.py:31:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:150:__call__` ### Additional information It seems that issue is in https://github.com/ansible/awx/blob/48b016802c517ff04d1cff4c43e64f17bb77a7a8/awx/main/routing.py ``` websocket_urlpatterns = [ re_path(r'websocket/$', consumers.EventConsumer), re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer), ] ``` From https://docs.djangoproject.com/en/4.0/ref/urls/: When a route ends with $ the whole requested URL, matching against path_info, must match the regular expression pattern (re.fullmatch() is used). Replacing with ``` websocket_urlpatterns = [ re_path(r'websocket/', consumers.EventConsumer), re_path(r'websocket/broadcast/', consumers.BroadcastConsumer), ] ``` solves the issue
[ { "content": "import redis\nimport logging\n\nfrom django.conf import settings\nfrom django.urls import re_path\n\nfrom channels.auth import AuthMiddlewareStack\nfrom channels.routing import ProtocolTypeRouter, URLRouter\n\nfrom . import consumers\n\n\nlogger = logging.getLogger('awx.main.routing')\n\n\nclass AWXProtocolTypeRouter(ProtocolTypeRouter):\n def __init__(self, *args, **kwargs):\n try:\n r = redis.Redis.from_url(settings.BROKER_URL)\n for k in r.scan_iter('asgi:*', 500):\n logger.debug(f\"cleaning up Redis key {k}\")\n r.delete(k)\n except redis.exceptions.RedisError as e:\n logger.warning(\"encountered an error communicating with redis.\")\n raise e\n super().__init__(*args, **kwargs)\n\n\nwebsocket_urlpatterns = [\n re_path(r'websocket/', consumers.EventConsumer.as_asgi()),\n re_path(r'websocket/broadcast/', consumers.BroadcastConsumer.as_asgi()),\n]\n\napplication = AWXProtocolTypeRouter(\n {\n 'websocket': AuthMiddlewareStack(URLRouter(websocket_urlpatterns)),\n }\n)\n", "path": "awx/main/routing.py" } ]
[ { "content": "import redis\nimport logging\n\nfrom django.conf import settings\nfrom django.urls import re_path\n\nfrom channels.auth import AuthMiddlewareStack\nfrom channels.routing import ProtocolTypeRouter, URLRouter\n\nfrom . import consumers\n\n\nlogger = logging.getLogger('awx.main.routing')\n\n\nclass AWXProtocolTypeRouter(ProtocolTypeRouter):\n def __init__(self, *args, **kwargs):\n try:\n r = redis.Redis.from_url(settings.BROKER_URL)\n for k in r.scan_iter('asgi:*', 500):\n logger.debug(f\"cleaning up Redis key {k}\")\n r.delete(k)\n except redis.exceptions.RedisError as e:\n logger.warning(\"encountered an error communicating with redis.\")\n raise e\n super().__init__(*args, **kwargs)\n\n\nwebsocket_urlpatterns = [\n re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),\n re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()),\n]\n\napplication = AWXProtocolTypeRouter(\n {\n 'websocket': AuthMiddlewareStack(URLRouter(websocket_urlpatterns)),\n }\n)\n", "path": "awx/main/routing.py" } ]
diff --git a/awx/main/routing.py b/awx/main/routing.py index c96505b7e120..100347f64e55 100644 --- a/awx/main/routing.py +++ b/awx/main/routing.py @@ -27,8 +27,8 @@ def __init__(self, *args, **kwargs): websocket_urlpatterns = [ - re_path(r'websocket/', consumers.EventConsumer.as_asgi()), - re_path(r'websocket/broadcast/', consumers.BroadcastConsumer.as_asgi()), + re_path(r'websocket/$', consumers.EventConsumer.as_asgi()), + re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()), ] application = AWXProtocolTypeRouter(
encode__httpx-1255
Docs incorrectly reference `HTTPError.message` attribute ### Checklist <!-- Please make sure you check all these items before submitting your bug report. --> - [x] The bug is reproducible against the latest release and/or `master`. - [x] There are no similar issues or pull requests to fix it yet. ### Describe the bug <!-- A clear and concise description of what the bug is. --> The documentation indicates that message field can be used in the HTTPError: https://github.com/encode/httpx/blob/master/httpx/_exceptions.py#L54 ``` try: response = httpx.get("https://www.example.com") response.raise_for_status() except httpx.HTTPError as exc: print(f"HTTP Exception for {exc.request.url} - {exc.message}") ``` But there is not such field: ``` AttributeError: 'HTTPStatusError' object has no attribute 'message' ``` ### To reproduce Execute the example from the doc ### Expected behavior Print the string without raising any exceptions ### Actual behavior AttributeError is raised ### Possible fixes 1. Update the documentation to use `str(exc)` instead of `exc.message` or 2. Set `self.message` field in HTTPError
[ { "content": "\"\"\"\nOur exception hierarchy:\n\n* HTTPError\n x RequestError\n + TransportError\n - TimeoutException\n · ConnectTimeout\n · ReadTimeout\n · WriteTimeout\n · PoolTimeout\n - NetworkError\n · ConnectError\n · ReadError\n · WriteError\n · CloseError\n - ProtocolError\n · LocalProtocolError\n · RemoteProtocolError\n - ProxyError\n - UnsupportedProtocol\n + DecodingError\n + TooManyRedirects\n + RequestBodyUnavailable\n x HTTPStatusError\n* InvalidURL\n* NotRedirectResponse\n* CookieConflict\n* StreamError\n x StreamConsumed\n x ResponseNotRead\n x RequestNotRead\n x ResponseClosed\n\"\"\"\nimport contextlib\nimport typing\n\nimport httpcore\n\nif typing.TYPE_CHECKING:\n from ._models import Request, Response # pragma: nocover\n\n\nclass HTTPError(Exception):\n \"\"\"\n Base class for `RequestError` and `HTTPStatusError`.\n\n Useful for `try...except` blocks when issuing a request,\n and then calling `.raise_for_status()`.\n\n For example:\n\n ```\n try:\n response = httpx.get(\"https://www.example.com\")\n response.raise_for_status()\n except httpx.HTTPError as exc:\n print(f\"HTTP Exception for {exc.request.url} - {exc.message}\")\n ```\n \"\"\"\n\n def __init__(self, message: str, *, request: \"Request\") -> None:\n super().__init__(message)\n self.request = request\n\n\nclass RequestError(HTTPError):\n \"\"\"\n Base class for all exceptions that may occur when issuing a `.request()`.\n \"\"\"\n\n def __init__(self, message: str, *, request: \"Request\") -> None:\n super().__init__(message, request=request)\n\n\nclass TransportError(RequestError):\n \"\"\"\n Base class for all exceptions that occur at the level of the Transport API.\n\n All of these exceptions also have an equivelent mapping in `httpcore`.\n \"\"\"\n\n\n# Timeout exceptions...\n\n\nclass TimeoutException(TransportError):\n \"\"\"\n The base class for timeout errors.\n\n An operation has timed out.\n \"\"\"\n\n\nclass ConnectTimeout(TimeoutException):\n \"\"\"\n Timed out while connecting to the host.\n \"\"\"\n\n\nclass ReadTimeout(TimeoutException):\n \"\"\"\n Timed out while receiving data from the host.\n \"\"\"\n\n\nclass WriteTimeout(TimeoutException):\n \"\"\"\n Timed out while sending data to the host.\n \"\"\"\n\n\nclass PoolTimeout(TimeoutException):\n \"\"\"\n Timed out waiting to acquire a connection from the pool.\n \"\"\"\n\n\n# Core networking exceptions...\n\n\nclass NetworkError(TransportError):\n \"\"\"\n The base class for network-related errors.\n\n An error occurred while interacting with the network.\n \"\"\"\n\n\nclass ReadError(NetworkError):\n \"\"\"\n Failed to receive data from the network.\n \"\"\"\n\n\nclass WriteError(NetworkError):\n \"\"\"\n Failed to send data through the network.\n \"\"\"\n\n\nclass ConnectError(NetworkError):\n \"\"\"\n Failed to establish a connection.\n \"\"\"\n\n\nclass CloseError(NetworkError):\n \"\"\"\n Failed to close a connection.\n \"\"\"\n\n\n# Other transport exceptions...\n\n\nclass ProxyError(TransportError):\n \"\"\"\n An error occurred while establishing a proxy connection.\n \"\"\"\n\n\nclass UnsupportedProtocol(TransportError):\n \"\"\"\n Attempted to make a request to an unsupported protocol.\n\n For example issuing a request to `ftp://www.example.com`.\n \"\"\"\n\n\nclass ProtocolError(TransportError):\n \"\"\"\n The protocol was violated.\n \"\"\"\n\n\nclass LocalProtocolError(ProtocolError):\n \"\"\"\n A protocol was violated by the client.\n\n For example if the user instantiated a `Request` instance explicitly,\n failed to include the mandatory `Host:` header, and then issued it directly\n using `client.send()`.\n \"\"\"\n\n\nclass RemoteProtocolError(ProtocolError):\n \"\"\"\n The protocol was violated by the server.\n\n For exaample, returning malformed HTTP.\n \"\"\"\n\n\n# Other request exceptions...\n\n\nclass DecodingError(RequestError):\n \"\"\"\n Decoding of the response failed, due to a malformed encoding.\n \"\"\"\n\n\nclass TooManyRedirects(RequestError):\n \"\"\"\n Too many redirects.\n \"\"\"\n\n\nclass RequestBodyUnavailable(RequestError):\n \"\"\"\n Had to send the request again, but the request body was streaming, and is\n no longer available.\n \"\"\"\n\n\n# Client errors\n\n\nclass HTTPStatusError(HTTPError):\n \"\"\"\n The response had an error HTTP status of 4xx or 5xx.\n\n May be raised when calling `response.raise_for_status()`\n \"\"\"\n\n def __init__(\n self, message: str, *, request: \"Request\", response: \"Response\"\n ) -> None:\n super().__init__(message, request=request)\n self.response = response\n\n\nclass InvalidURL(Exception):\n \"\"\"\n URL is improperly formed or cannot be parsed.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\nclass NotRedirectResponse(Exception):\n \"\"\"\n Response was not a redirect response.\n\n May be raised if `response.next()` is called without first\n properly checking `response.is_redirect`.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\nclass CookieConflict(Exception):\n \"\"\"\n Attempted to lookup a cookie by name, but multiple cookies existed.\n\n Can occur when calling `response.cookies.get(...)`.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\n# Stream exceptions...\n\n# These may occur as the result of a programming error, by accessing\n# the request/response stream in an invalid manner.\n\n\nclass StreamError(Exception):\n \"\"\"\n The base class for stream exceptions.\n\n The developer made an error in accessing the request stream in\n an invalid way.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\nclass StreamConsumed(StreamError):\n \"\"\"\n Attempted to read or stream response content, but the content has already\n been streamed.\n \"\"\"\n\n def __init__(self) -> None:\n message = (\n \"Attempted to read or stream response content, but the content has \"\n \"already been streamed.\"\n )\n super().__init__(message)\n\n\nclass ResponseNotRead(StreamError):\n \"\"\"\n Attempted to access response content, without having called `read()`\n after a streaming response.\n \"\"\"\n\n def __init__(self) -> None:\n message = (\n \"Attempted to access response content, without having called `read()` \"\n \"after a streaming response.\"\n )\n super().__init__(message)\n\n\nclass RequestNotRead(StreamError):\n \"\"\"\n Attempted to access request content, without having called `read()`.\n \"\"\"\n\n def __init__(self) -> None:\n message = \"Attempted to access request content, without having called `read()`.\"\n super().__init__(message)\n\n\nclass ResponseClosed(StreamError):\n \"\"\"\n Attempted to read or stream response content, but the request has been\n closed.\n \"\"\"\n\n def __init__(self) -> None:\n message = (\n \"Attempted to read or stream response content, but the request has \"\n \"been closed.\"\n )\n super().__init__(message)\n\n\[email protected]\ndef map_exceptions(\n mapping: typing.Mapping[typing.Type[Exception], typing.Type[Exception]],\n **kwargs: typing.Any,\n) -> typing.Iterator[None]:\n try:\n yield\n except Exception as exc:\n mapped_exc = None\n\n for from_exc, to_exc in mapping.items():\n if not isinstance(exc, from_exc):\n continue\n # We want to map to the most specific exception we can find.\n # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to\n # `httpx.ReadTimeout`, not just `httpx.TimeoutException`.\n if mapped_exc is None or issubclass(to_exc, mapped_exc):\n mapped_exc = to_exc\n\n if mapped_exc is None:\n raise\n\n message = str(exc)\n raise mapped_exc(message, **kwargs) from exc # type: ignore\n\n\nHTTPCORE_EXC_MAP = {\n httpcore.TimeoutException: TimeoutException,\n httpcore.ConnectTimeout: ConnectTimeout,\n httpcore.ReadTimeout: ReadTimeout,\n httpcore.WriteTimeout: WriteTimeout,\n httpcore.PoolTimeout: PoolTimeout,\n httpcore.NetworkError: NetworkError,\n httpcore.ConnectError: ConnectError,\n httpcore.ReadError: ReadError,\n httpcore.WriteError: WriteError,\n httpcore.CloseError: CloseError,\n httpcore.ProxyError: ProxyError,\n httpcore.UnsupportedProtocol: UnsupportedProtocol,\n httpcore.ProtocolError: ProtocolError,\n httpcore.LocalProtocolError: LocalProtocolError,\n httpcore.RemoteProtocolError: RemoteProtocolError,\n}\n", "path": "httpx/_exceptions.py" } ]
[ { "content": "\"\"\"\nOur exception hierarchy:\n\n* HTTPError\n x RequestError\n + TransportError\n - TimeoutException\n · ConnectTimeout\n · ReadTimeout\n · WriteTimeout\n · PoolTimeout\n - NetworkError\n · ConnectError\n · ReadError\n · WriteError\n · CloseError\n - ProtocolError\n · LocalProtocolError\n · RemoteProtocolError\n - ProxyError\n - UnsupportedProtocol\n + DecodingError\n + TooManyRedirects\n + RequestBodyUnavailable\n x HTTPStatusError\n* InvalidURL\n* NotRedirectResponse\n* CookieConflict\n* StreamError\n x StreamConsumed\n x ResponseNotRead\n x RequestNotRead\n x ResponseClosed\n\"\"\"\nimport contextlib\nimport typing\n\nimport httpcore\n\nif typing.TYPE_CHECKING:\n from ._models import Request, Response # pragma: nocover\n\n\nclass HTTPError(Exception):\n \"\"\"\n Base class for `RequestError` and `HTTPStatusError`.\n\n Useful for `try...except` blocks when issuing a request,\n and then calling `.raise_for_status()`.\n\n For example:\n\n ```\n try:\n response = httpx.get(\"https://www.example.com\")\n response.raise_for_status()\n except httpx.HTTPError as exc:\n print(f\"HTTP Exception for {exc.request.url} - {exc}\")\n ```\n \"\"\"\n\n def __init__(self, message: str, *, request: \"Request\") -> None:\n super().__init__(message)\n self.request = request\n\n\nclass RequestError(HTTPError):\n \"\"\"\n Base class for all exceptions that may occur when issuing a `.request()`.\n \"\"\"\n\n def __init__(self, message: str, *, request: \"Request\") -> None:\n super().__init__(message, request=request)\n\n\nclass TransportError(RequestError):\n \"\"\"\n Base class for all exceptions that occur at the level of the Transport API.\n\n All of these exceptions also have an equivelent mapping in `httpcore`.\n \"\"\"\n\n\n# Timeout exceptions...\n\n\nclass TimeoutException(TransportError):\n \"\"\"\n The base class for timeout errors.\n\n An operation has timed out.\n \"\"\"\n\n\nclass ConnectTimeout(TimeoutException):\n \"\"\"\n Timed out while connecting to the host.\n \"\"\"\n\n\nclass ReadTimeout(TimeoutException):\n \"\"\"\n Timed out while receiving data from the host.\n \"\"\"\n\n\nclass WriteTimeout(TimeoutException):\n \"\"\"\n Timed out while sending data to the host.\n \"\"\"\n\n\nclass PoolTimeout(TimeoutException):\n \"\"\"\n Timed out waiting to acquire a connection from the pool.\n \"\"\"\n\n\n# Core networking exceptions...\n\n\nclass NetworkError(TransportError):\n \"\"\"\n The base class for network-related errors.\n\n An error occurred while interacting with the network.\n \"\"\"\n\n\nclass ReadError(NetworkError):\n \"\"\"\n Failed to receive data from the network.\n \"\"\"\n\n\nclass WriteError(NetworkError):\n \"\"\"\n Failed to send data through the network.\n \"\"\"\n\n\nclass ConnectError(NetworkError):\n \"\"\"\n Failed to establish a connection.\n \"\"\"\n\n\nclass CloseError(NetworkError):\n \"\"\"\n Failed to close a connection.\n \"\"\"\n\n\n# Other transport exceptions...\n\n\nclass ProxyError(TransportError):\n \"\"\"\n An error occurred while establishing a proxy connection.\n \"\"\"\n\n\nclass UnsupportedProtocol(TransportError):\n \"\"\"\n Attempted to make a request to an unsupported protocol.\n\n For example issuing a request to `ftp://www.example.com`.\n \"\"\"\n\n\nclass ProtocolError(TransportError):\n \"\"\"\n The protocol was violated.\n \"\"\"\n\n\nclass LocalProtocolError(ProtocolError):\n \"\"\"\n A protocol was violated by the client.\n\n For example if the user instantiated a `Request` instance explicitly,\n failed to include the mandatory `Host:` header, and then issued it directly\n using `client.send()`.\n \"\"\"\n\n\nclass RemoteProtocolError(ProtocolError):\n \"\"\"\n The protocol was violated by the server.\n\n For exaample, returning malformed HTTP.\n \"\"\"\n\n\n# Other request exceptions...\n\n\nclass DecodingError(RequestError):\n \"\"\"\n Decoding of the response failed, due to a malformed encoding.\n \"\"\"\n\n\nclass TooManyRedirects(RequestError):\n \"\"\"\n Too many redirects.\n \"\"\"\n\n\nclass RequestBodyUnavailable(RequestError):\n \"\"\"\n Had to send the request again, but the request body was streaming, and is\n no longer available.\n \"\"\"\n\n\n# Client errors\n\n\nclass HTTPStatusError(HTTPError):\n \"\"\"\n The response had an error HTTP status of 4xx or 5xx.\n\n May be raised when calling `response.raise_for_status()`\n \"\"\"\n\n def __init__(\n self, message: str, *, request: \"Request\", response: \"Response\"\n ) -> None:\n super().__init__(message, request=request)\n self.response = response\n\n\nclass InvalidURL(Exception):\n \"\"\"\n URL is improperly formed or cannot be parsed.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\nclass NotRedirectResponse(Exception):\n \"\"\"\n Response was not a redirect response.\n\n May be raised if `response.next()` is called without first\n properly checking `response.is_redirect`.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\nclass CookieConflict(Exception):\n \"\"\"\n Attempted to lookup a cookie by name, but multiple cookies existed.\n\n Can occur when calling `response.cookies.get(...)`.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\n# Stream exceptions...\n\n# These may occur as the result of a programming error, by accessing\n# the request/response stream in an invalid manner.\n\n\nclass StreamError(Exception):\n \"\"\"\n The base class for stream exceptions.\n\n The developer made an error in accessing the request stream in\n an invalid way.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\nclass StreamConsumed(StreamError):\n \"\"\"\n Attempted to read or stream response content, but the content has already\n been streamed.\n \"\"\"\n\n def __init__(self) -> None:\n message = (\n \"Attempted to read or stream response content, but the content has \"\n \"already been streamed.\"\n )\n super().__init__(message)\n\n\nclass ResponseNotRead(StreamError):\n \"\"\"\n Attempted to access response content, without having called `read()`\n after a streaming response.\n \"\"\"\n\n def __init__(self) -> None:\n message = (\n \"Attempted to access response content, without having called `read()` \"\n \"after a streaming response.\"\n )\n super().__init__(message)\n\n\nclass RequestNotRead(StreamError):\n \"\"\"\n Attempted to access request content, without having called `read()`.\n \"\"\"\n\n def __init__(self) -> None:\n message = \"Attempted to access request content, without having called `read()`.\"\n super().__init__(message)\n\n\nclass ResponseClosed(StreamError):\n \"\"\"\n Attempted to read or stream response content, but the request has been\n closed.\n \"\"\"\n\n def __init__(self) -> None:\n message = (\n \"Attempted to read or stream response content, but the request has \"\n \"been closed.\"\n )\n super().__init__(message)\n\n\[email protected]\ndef map_exceptions(\n mapping: typing.Mapping[typing.Type[Exception], typing.Type[Exception]],\n **kwargs: typing.Any,\n) -> typing.Iterator[None]:\n try:\n yield\n except Exception as exc:\n mapped_exc = None\n\n for from_exc, to_exc in mapping.items():\n if not isinstance(exc, from_exc):\n continue\n # We want to map to the most specific exception we can find.\n # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to\n # `httpx.ReadTimeout`, not just `httpx.TimeoutException`.\n if mapped_exc is None or issubclass(to_exc, mapped_exc):\n mapped_exc = to_exc\n\n if mapped_exc is None:\n raise\n\n message = str(exc)\n raise mapped_exc(message, **kwargs) from exc # type: ignore\n\n\nHTTPCORE_EXC_MAP = {\n httpcore.TimeoutException: TimeoutException,\n httpcore.ConnectTimeout: ConnectTimeout,\n httpcore.ReadTimeout: ReadTimeout,\n httpcore.WriteTimeout: WriteTimeout,\n httpcore.PoolTimeout: PoolTimeout,\n httpcore.NetworkError: NetworkError,\n httpcore.ConnectError: ConnectError,\n httpcore.ReadError: ReadError,\n httpcore.WriteError: WriteError,\n httpcore.CloseError: CloseError,\n httpcore.ProxyError: ProxyError,\n httpcore.UnsupportedProtocol: UnsupportedProtocol,\n httpcore.ProtocolError: ProtocolError,\n httpcore.LocalProtocolError: LocalProtocolError,\n httpcore.RemoteProtocolError: RemoteProtocolError,\n}\n", "path": "httpx/_exceptions.py" } ]
diff --git a/httpx/_exceptions.py b/httpx/_exceptions.py index 4d6837778a..260d14ee5f 100644 --- a/httpx/_exceptions.py +++ b/httpx/_exceptions.py @@ -55,7 +55,7 @@ class HTTPError(Exception): response = httpx.get("https://www.example.com") response.raise_for_status() except httpx.HTTPError as exc: - print(f"HTTP Exception for {exc.request.url} - {exc.message}") + print(f"HTTP Exception for {exc.request.url} - {exc}") ``` """
ivy-llc__ivy-23027
solve ### Bug Explanation The `paddle.linalg.solve` tests are failing. The tests and the front-end function are not implemented properly. The test should generate two matrices of shape [ *, M, M ] and [ *, M, K ] but the written test just generates two matrices of the same shape, and function arguments are mismatched returning `TypeError: solve() got an unexpected keyword argument 'x'` ### Steps to Reproduce Bug Run : `pytest ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_linalg.py::test_paddle_solve` ### Environment MacOs : 13.5 ### Ivy Version 0.0.0.0.0 ### Backend - [ ] NumPy - [ ] TensorFlow - [ ] PyTorch - [ ] JAX ### Device Mac M1
[ { "content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle import promote_types_of_paddle_inputs\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes({\"2.4.1 and above\": (\"int64\",)}, \"paddle\")\n@to_ivy_arrays_and_back\ndef bincount(x, weights=None, minlength=0, name=None):\n return ivy.bincount(x, weights=weights, minlength=minlength)\n\n\n# bmm\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef bmm(x, y, transpose_x=False, transpose_y=False, name=None):\n if len(ivy.shape(x)) != 3 or len(ivy.shape(y)) != 3:\n raise RuntimeError(\"input must be 3D matrices\")\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n\n\n# cholesky\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cholesky(x, /, *, upper=False, name=None):\n return ivy.cholesky(x, upper=upper)\n\n\n# cholesky_solve\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cholesky_solve(x, y, /, *, upper=False, name=None):\n if upper:\n y = ivy.matrix_transpose(y)\n Y = ivy.solve(y, x)\n return ivy.solve(ivy.matrix_transpose(y), Y)\n\n\n# cond\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cond(x, p=None, name=None):\n ret = ivy.cond(x, p=p, out=name)\n if ret.shape == ():\n ret = ret.reshape((1,))\n return ret\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef cross(x, y, /, *, axis=9, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.cross(x, y, axis=axis)\n\n\n@with_supported_dtypes({\"2.4.1 and above\": (\"float64\", \"float32\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef dist(x, y, p=2):\n ret = ivy.vector_norm(ivy.subtract(x, y), ord=p)\n return ivy.reshape(ret, (1,))\n\n\n# dot\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef dot(x, y, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n out = ivy.multiply(x, y)\n return ivy.sum(out, axis=ivy.get_num_dims(x) - 1, keepdims=False)\n\n\n# eig\n@to_ivy_arrays_and_back\ndef eig(x, name=None):\n return ivy.eig(x)\n\n\n# eigh\n@to_ivy_arrays_and_back\ndef eigh(x, UPLO=\"L\", name=None):\n return ivy.eigh(x, UPLO=UPLO)\n\n\n# eigvals\n@to_ivy_arrays_and_back\ndef eigvals(x, name=None):\n return ivy.eigvals(x)\n\n\n# eigvalsh\n@to_ivy_arrays_and_back\ndef eigvalsh(x, UPLO=\"L\", name=None):\n return ivy.eigvalsh(x, UPLO=UPLO)\n\n\n# matmul\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef matmul(x, y, transpose_x=False, transpose_y=False, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n\n\n# matrix_power\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef matrix_power(x, n, name=None):\n return ivy.matrix_power(x, n)\n\n\n# norm\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef norm(x, p=\"fro\", axis=None, keepdim=False, name=None):\n if axis is None and p is not None:\n if p == \"fro\":\n p = 2\n ret = ivy.vector_norm(x.flatten(), ord=p, axis=-1)\n if keepdim:\n ret = ret.reshape([1] * len(x.shape))\n return ret\n\n if isinstance(axis, tuple):\n axis = list(axis)\n if isinstance(axis, list) and len(axis) == 1:\n axis = axis[0]\n\n if isinstance(axis, int):\n if p == \"fro\":\n p = 2\n if p in [0, 1, 2, ivy.inf, -ivy.inf]:\n ret = ivy.vector_norm(x, ord=p, axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)):\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n\n elif isinstance(axis, list) and len(axis) == 2:\n if p == 0:\n raise ValueError\n elif p == 1:\n ret = ivy.sum(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == 2 or p == \"fro\":\n ret = ivy.matrix_norm(x, ord=\"fro\", axis=axis, keepdims=keepdim)\n elif p == ivy.inf:\n ret = ivy.max(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == -ivy.inf:\n ret = ivy.min(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)) and p > 0:\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n else:\n raise ValueError\n\n else:\n raise ValueError\n\n return ret\n\n\n# pinv\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pinv(x, rcond=1e-15, hermitian=False, name=None):\n # TODO: Add hermitian functionality\n return ivy.pinv(x, rtol=rcond)\n\n\n# qr\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef qr(x, mode=\"reduced\", name=None):\n return ivy.qr(x, mode=mode)\n\n\n# solve\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef solve(x1, x2, name=None):\n return ivy.solve(x1, x2)\n\n\n# transpose\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"uint8\", \"int8\", \"int16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef transpose(x, perm, name=None):\n return ivy.permute_dims(x, axes=perm)\n", "path": "ivy/functional/frontends/paddle/tensor/linalg.py" } ]
[ { "content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle import promote_types_of_paddle_inputs\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes({\"2.4.1 and above\": (\"int64\",)}, \"paddle\")\n@to_ivy_arrays_and_back\ndef bincount(x, weights=None, minlength=0, name=None):\n return ivy.bincount(x, weights=weights, minlength=minlength)\n\n\n# bmm\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef bmm(x, y, transpose_x=False, transpose_y=False, name=None):\n if len(ivy.shape(x)) != 3 or len(ivy.shape(y)) != 3:\n raise RuntimeError(\"input must be 3D matrices\")\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n\n\n# cholesky\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cholesky(x, /, *, upper=False, name=None):\n return ivy.cholesky(x, upper=upper)\n\n\n# cholesky_solve\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cholesky_solve(x, y, /, *, upper=False, name=None):\n if upper:\n y = ivy.matrix_transpose(y)\n Y = ivy.solve(y, x)\n return ivy.solve(ivy.matrix_transpose(y), Y)\n\n\n# cond\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cond(x, p=None, name=None):\n ret = ivy.cond(x, p=p, out=name)\n if ret.shape == ():\n ret = ret.reshape((1,))\n return ret\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef cross(x, y, /, *, axis=9, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.cross(x, y, axis=axis)\n\n\n@with_supported_dtypes({\"2.4.1 and above\": (\"float64\", \"float32\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef dist(x, y, p=2):\n ret = ivy.vector_norm(ivy.subtract(x, y), ord=p)\n return ivy.reshape(ret, (1,))\n\n\n# dot\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef dot(x, y, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n out = ivy.multiply(x, y)\n return ivy.sum(out, axis=ivy.get_num_dims(x) - 1, keepdims=False)\n\n\n# eig\n@to_ivy_arrays_and_back\ndef eig(x, name=None):\n return ivy.eig(x)\n\n\n# eigh\n@to_ivy_arrays_and_back\ndef eigh(x, UPLO=\"L\", name=None):\n return ivy.eigh(x, UPLO=UPLO)\n\n\n# eigvals\n@to_ivy_arrays_and_back\ndef eigvals(x, name=None):\n return ivy.eigvals(x)\n\n\n# eigvalsh\n@to_ivy_arrays_and_back\ndef eigvalsh(x, UPLO=\"L\", name=None):\n return ivy.eigvalsh(x, UPLO=UPLO)\n\n\n# matmul\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef matmul(x, y, transpose_x=False, transpose_y=False, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n\n\n# matrix_power\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef matrix_power(x, n, name=None):\n return ivy.matrix_power(x, n)\n\n\n# norm\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef norm(x, p=\"fro\", axis=None, keepdim=False, name=None):\n if axis is None and p is not None:\n if p == \"fro\":\n p = 2\n ret = ivy.vector_norm(x.flatten(), ord=p, axis=-1)\n if keepdim:\n ret = ret.reshape([1] * len(x.shape))\n return ret\n\n if isinstance(axis, tuple):\n axis = list(axis)\n if isinstance(axis, list) and len(axis) == 1:\n axis = axis[0]\n\n if isinstance(axis, int):\n if p == \"fro\":\n p = 2\n if p in [0, 1, 2, ivy.inf, -ivy.inf]:\n ret = ivy.vector_norm(x, ord=p, axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)):\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n\n elif isinstance(axis, list) and len(axis) == 2:\n if p == 0:\n raise ValueError\n elif p == 1:\n ret = ivy.sum(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == 2 or p == \"fro\":\n ret = ivy.matrix_norm(x, ord=\"fro\", axis=axis, keepdims=keepdim)\n elif p == ivy.inf:\n ret = ivy.max(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == -ivy.inf:\n ret = ivy.min(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)) and p > 0:\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n else:\n raise ValueError\n\n else:\n raise ValueError\n\n return ret\n\n\n# pinv\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pinv(x, rcond=1e-15, hermitian=False, name=None):\n # TODO: Add hermitian functionality\n return ivy.pinv(x, rtol=rcond)\n\n\n# qr\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef qr(x, mode=\"reduced\", name=None):\n return ivy.qr(x, mode=mode)\n\n\n# solve\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef solve(x, y, name=None):\n return ivy.solve(x, y)\n\n\n# transpose\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"uint8\", \"int8\", \"int16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef transpose(x, perm, name=None):\n return ivy.permute_dims(x, axes=perm)\n", "path": "ivy/functional/frontends/paddle/tensor/linalg.py" } ]
diff --git a/ivy/functional/frontends/paddle/tensor/linalg.py b/ivy/functional/frontends/paddle/tensor/linalg.py index 4ae10e9824324..34eff9474cb1b 100644 --- a/ivy/functional/frontends/paddle/tensor/linalg.py +++ b/ivy/functional/frontends/paddle/tensor/linalg.py @@ -183,10 +183,10 @@ def qr(x, mode="reduced", name=None): # solve -@with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle") +@with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle") @to_ivy_arrays_and_back -def solve(x1, x2, name=None): - return ivy.solve(x1, x2) +def solve(x, y, name=None): + return ivy.solve(x, y) # transpose diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_linalg.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_linalg.py index 1290c944bef8d..9712e8fede1d4 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_linalg.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_linalg.py @@ -12,6 +12,7 @@ ) from ivy_tests.test_ivy.test_frontends.test_tensorflow.test_linalg import ( + _get_first_matrix, _get_second_matrix, _get_cholesky_matrix, ) @@ -872,36 +873,35 @@ def test_paddle_qr( # solve @handle_frontend_test( - fn_tree="paddle.solve", - dtype_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - num_arrays=2, - shared_dtype=True, - min_value=-10, - max_value=10, - ), - aliases=["paddle.tensor.linalg.solve"], + fn_tree="paddle.tensor.linalg.solve", + aliases=["paddle.linalg.solve"], + x=_get_first_matrix(), + y=_get_second_matrix(), test_with_out=st.just(False), ) def test_paddle_solve( *, - dtype_x, + x, + y, frontend, - test_flags, backend_fw, + test_flags, fn_tree, on_device, ): - input_dtype, x = dtype_x + input_dtype1, x1 = x + input_dtype2, x2 = y helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, + input_dtypes=[input_dtype1, input_dtype2], backend_to_test=backend_fw, + frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, - x=x[0], - y=x[1], + rtol=1e-3, + atol=1e-3, + x=x1, + y=x2, )
DataDog__dd-trace-py-5408
Throws exception when reloading module in REPL (specifically with iPython), even when `DD_TRACE_ENABLED=false` <!-- Thanks for taking the time for reporting an issue! Before reporting an issue on dd-trace-py, please be sure to provide all necessary information. If you're hitting a bug, make sure that you're using the latest version of this library. --> ### Summary of problem The exception includes this stack trace: ```python [autoreload of my_module failed: Traceback (most recent call last): File "/home/ory/.pyenv/versions/3.10.7/envs/my_module/lib/python3.10/site-packages/IPython/extensions/autoreload.py", line 261, in check superreload(m, reload, self.old_objects) File "/home/ory/.pyenv/versions/3.10.7/envs/my_module/lib/python3.10/site-packages/IPython/extensions/autoreload.py", line 459, in superreload module = reload(module) File "/home/ory/.pyenv/versions/3.10.7/lib/python3.10/importlib/__init__.py", line 166, in reload spec = module.__spec__ = _bootstrap._find_spec(name, pkgpath, target) File "<frozen importlib._bootstrap>", line 945, in _find_spec File "/home/ory/.pyenv/versions/3.10.7/envs/my_module/lib/python3.10/site-packages/ddtrace/internal/module.py", line 368, in find_spec spec = find_spec(fullname) File "/home/ory/.pyenv/versions/3.10.7/lib/python3.10/importlib/util.py", line 111, in find_spec raise ValueError('{}.__spec__ is not set'.format(name)) from None ValueError: my_module.__spec__ is not set ``` ### Which version of dd-trace-py are you using? 1.8.0 ### Which version of pip are you using? 22.2.2
[ { "content": "from collections import defaultdict\nfrom os.path import abspath\nfrom os.path import expanduser\nfrom os.path import isdir\nfrom os.path import isfile\nfrom os.path import join\nimport sys\nfrom types import ModuleType\nfrom typing import Any\nfrom typing import Callable\nfrom typing import DefaultDict\nfrom typing import Dict\nfrom typing import Iterator\nfrom typing import List\nfrom typing import Optional\nfrom typing import Set\nfrom typing import Tuple\nfrom typing import Type\nfrom typing import Union\nfrom typing import cast\n\nfrom ddtrace.internal.compat import PY2\nfrom ddtrace.internal.logger import get_logger\nfrom ddtrace.internal.utils import get_argument_value\n\n\nlog = get_logger(__name__)\n\nModuleHookType = Callable[[ModuleType], None]\nPreExecHookType = Callable[[Any, ModuleType], None]\nPreExecHookCond = Union[str, Callable[[str], bool]]\n\n\n_run_code = None\n_post_run_module_hooks = [] # type: List[ModuleHookType]\n\n\ndef _wrapped_run_code(*args, **kwargs):\n # type: (*Any, **Any) -> Dict[str, Any]\n global _run_code, _post_run_module_hooks\n\n # DEV: If we are calling this wrapper then _run_code must have been set to\n # the original runpy._run_code.\n assert _run_code is not None\n\n mod_name = get_argument_value(args, kwargs, 3, \"mod_name\")\n\n try:\n return _run_code(*args, **kwargs)\n finally:\n module = sys.modules[mod_name]\n for hook in _post_run_module_hooks:\n hook(module)\n\n\ndef _patch_run_code():\n # type: () -> None\n global _run_code\n\n if _run_code is None:\n import runpy\n\n _run_code = runpy._run_code # type: ignore[attr-defined]\n runpy._run_code = _wrapped_run_code # type: ignore[attr-defined]\n\n\ndef register_post_run_module_hook(hook):\n # type: (ModuleHookType) -> None\n \"\"\"Register a post run module hook.\n\n The hooks gets called after the module is loaded. For this to work, the\n hook needs to be registered during the interpreter initialization, e.g. as\n part of a sitecustomize.py script.\n \"\"\"\n global _run_code, _post_run_module_hooks\n\n _patch_run_code()\n\n _post_run_module_hooks.append(hook)\n\n\ndef unregister_post_run_module_hook(hook):\n # type: (ModuleHookType) -> None\n \"\"\"Unregister a post run module hook.\n\n If the hook was not registered, a ``ValueError`` exception is raised.\n \"\"\"\n global _post_run_module_hooks\n\n _post_run_module_hooks.remove(hook)\n\n\ndef origin(module):\n # type: (ModuleType) -> str\n \"\"\"Get the origin source file of the module.\"\"\"\n try:\n # DEV: Use object.__getattribute__ to avoid potential side-effects.\n orig = abspath(object.__getattribute__(module, \"__file__\"))\n except (AttributeError, TypeError):\n # Module is probably only partially initialised, so we look at its\n # spec instead\n try:\n # DEV: Use object.__getattribute__ to avoid potential side-effects.\n orig = abspath(object.__getattribute__(module, \"__spec__\").origin)\n except (AttributeError, ValueError, TypeError):\n orig = None\n\n if orig is not None and isfile(orig):\n if orig.endswith(\".pyc\"):\n orig = orig[:-1]\n return orig\n\n return \"<unknown origin>\"\n\n\ndef _resolve(path):\n # type: (str) -> Optional[str]\n \"\"\"Resolve a (relative) path with respect to sys.path.\"\"\"\n for base in sys.path:\n if isdir(base):\n resolved_path = abspath(join(base, expanduser(path)))\n if isfile(resolved_path):\n return resolved_path\n return None\n\n\n# Borrowed from the wrapt module\n# https://github.com/GrahamDumpleton/wrapt/blob/df0e62c2740143cceb6cafea4c306dae1c559ef8/src/wrapt/importer.py\n\nif PY2:\n import pkgutil\n\n find_spec = ModuleSpec = None\n Loader = object\n\n find_loader = pkgutil.find_loader\n\nelse:\n from importlib.abc import Loader\n from importlib.machinery import ModuleSpec\n from importlib.util import find_spec\n\n def find_loader(fullname):\n # type: (str) -> Optional[Loader]\n return getattr(find_spec(fullname), \"loader\", None)\n\n\nLEGACY_DICT_COPY = sys.version_info < (3, 6)\n\n\nclass _ImportHookChainedLoader(Loader):\n def __init__(self, loader):\n # type: (Loader) -> None\n self.loader = loader\n self.callbacks = {} # type: Dict[Any, Callable[[ModuleType], None]]\n\n # DEV: load_module is deprecated so we define it at runtime if also\n # defined by the default loader. We also check and define for the\n # methods that are supposed to replace the load_module functionality.\n if hasattr(loader, \"load_module\"):\n self.load_module = self._load_module # type: ignore[assignment]\n if hasattr(loader, \"create_module\"):\n self.create_module = self._create_module # type: ignore[assignment]\n if hasattr(loader, \"exec_module\"):\n self.exec_module = self._exec_module # type: ignore[assignment]\n\n def __getattribute__(self, name):\n if name == \"__class__\":\n # Make isinstance believe that self is also an instance of\n # type(self.loader). This is required, e.g. by some tools, like\n # slotscheck, that can handle known loaders only.\n return self.loader.__class__\n\n return super(_ImportHookChainedLoader, self).__getattribute__(name)\n\n def __getattr__(self, name):\n # Proxy any other attribute access to the underlying loader.\n return getattr(self.loader, name)\n\n def add_callback(self, key, callback):\n # type: (Any, Callable[[ModuleType], None]) -> None\n self.callbacks[key] = callback\n\n def _load_module(self, fullname):\n # type: (str) -> ModuleType\n module = self.loader.load_module(fullname)\n for callback in self.callbacks.values():\n callback(module)\n\n return module\n\n def _create_module(self, spec):\n return self.loader.create_module(spec)\n\n def _exec_module(self, module):\n # Collect and run only the first hook that matches the module.\n pre_exec_hook = None\n\n for _ in sys.meta_path:\n if isinstance(_, ModuleWatchdog):\n try:\n for cond, hook in _._pre_exec_module_hooks:\n if isinstance(cond, str) and cond == module.__name__ or cond(module.__name__):\n # Several pre-exec hooks could match, we keep the first one\n pre_exec_hook = hook\n break\n except Exception:\n log.debug(\"Exception happened while processing pre_exec_module_hooks\", exc_info=True)\n\n if pre_exec_hook is not None:\n break\n\n if pre_exec_hook:\n pre_exec_hook(self, module)\n else:\n self.loader.exec_module(module)\n\n for callback in self.callbacks.values():\n callback(module)\n\n\nclass ModuleWatchdog(dict):\n \"\"\"Module watchdog.\n\n Replace the standard ``sys.modules`` dictionary to detect when modules are\n loaded/unloaded. This is also responsible for triggering any registered\n import hooks.\n\n Subclasses might customize the default behavior by overriding the\n ``after_import`` method, which is triggered on every module import, once\n the subclass is installed.\n \"\"\"\n\n _instance = None # type: Optional[ModuleWatchdog]\n\n def __init__(self):\n # type: () -> None\n self._hook_map = defaultdict(list) # type: DefaultDict[str, List[ModuleHookType]]\n self._om = None # type: Optional[Dict[str, ModuleType]]\n self._modules = sys.modules # type: Union[dict, ModuleWatchdog]\n self._finding = set() # type: Set[str]\n self._pre_exec_module_hooks = [] # type: List[Tuple[PreExecHookCond, PreExecHookType]]\n\n def __getitem__(self, item):\n # type: (str) -> ModuleType\n return self._modules.__getitem__(item)\n\n def __setitem__(self, name, module):\n # type: (str, ModuleType) -> None\n self._modules.__setitem__(name, module)\n\n @property\n def _origin_map(self):\n # type: () -> Dict[str, ModuleType]\n if self._om is None:\n try:\n self._om = {origin(module): module for module in sys.modules.values()}\n except RuntimeError:\n # The state of sys.modules might have been mutated by another\n # thread. We try to build the full mapping at the next occasion.\n # For now we take the more expensive route of building a list of\n # the current values, which might be incomplete.\n return {origin(module): module for module in list(sys.modules.values())}\n\n return self._om\n\n def _add_to_meta_path(self):\n # type: () -> None\n sys.meta_path.insert(0, self) # type: ignore[arg-type]\n\n @classmethod\n def _find_in_meta_path(cls):\n # type: () -> Optional[int]\n for i, meta_path in enumerate(sys.meta_path):\n if type(meta_path) is cls:\n return i\n return None\n\n @classmethod\n def _remove_from_meta_path(cls):\n # type: () -> None\n i = cls._find_in_meta_path()\n if i is not None:\n sys.meta_path.pop(i)\n\n def after_import(self, module):\n # type: (ModuleType) -> None\n path = origin(module)\n self._origin_map[path] = module\n\n # Collect all hooks by module origin and name\n hooks = []\n if path in self._hook_map:\n hooks.extend(self._hook_map[path])\n if module.__name__ in self._hook_map:\n hooks.extend(self._hook_map[module.__name__])\n\n if hooks:\n log.debug(\"Calling %d registered hooks on import of module '%s'\", len(hooks), module.__name__)\n for hook in hooks:\n hook(module)\n\n @classmethod\n def get_by_origin(cls, _origin):\n # type: (str) -> Optional[ModuleType]\n \"\"\"Lookup a module by its origin.\"\"\"\n cls._check_installed()\n\n instance = cast(ModuleWatchdog, cls._instance)\n\n path = _resolve(_origin)\n if path is not None:\n module = instance._origin_map.get(path)\n if module is not None:\n return module\n\n # Check if this is the __main__ module\n main_module = sys.modules.get(\"__main__\")\n if main_module is not None and origin(main_module) == path:\n # Register for future lookups\n instance._origin_map[path] = main_module\n\n return main_module\n\n return None\n\n def __delitem__(self, name):\n # type: (str) -> None\n try:\n path = origin(sys.modules[name])\n # Drop the module reference to reclaim memory\n del self._origin_map[path]\n except KeyError:\n pass\n\n self._modules.__delitem__(name)\n\n def __getattribute__(self, name):\n # type: (str) -> Any\n if LEGACY_DICT_COPY and name == \"keys\":\n # This is a potential attempt to make a copy of sys.modules using\n # dict(sys.modules) on a Python version that uses the C API to\n # perform the operation. Since we are an instance of a dict, this\n # means that we will end up looking like the empty dict, so we take\n # this chance to actually look like sys.modules.\n # NOTE: This is a potential source of memory leaks. However, we\n # expect this to occur only on defunct Python versions, and only\n # during special code executions, like test runs.\n super(ModuleWatchdog, self).clear()\n super(ModuleWatchdog, self).update(self._modules)\n\n try:\n return super(ModuleWatchdog, self).__getattribute__(\"_modules\").__getattribute__(name)\n except AttributeError:\n return super(ModuleWatchdog, self).__getattribute__(name)\n\n def __contains__(self, name):\n # type: (object) -> bool\n return self._modules.__contains__(name)\n\n def __len__(self):\n # type: () -> int\n return self._modules.__len__()\n\n def __iter__(self):\n # type: () -> Iterator\n return self._modules.__iter__()\n\n def find_module(self, fullname, path=None):\n # type: (str, Optional[str]) -> Union[Loader, None]\n if fullname in self._finding:\n return None\n\n self._finding.add(fullname)\n\n try:\n loader = find_loader(fullname)\n if loader is not None:\n if not isinstance(loader, _ImportHookChainedLoader):\n loader = _ImportHookChainedLoader(loader)\n\n if PY2:\n # With Python 2 we don't get all the finders invoked, so we\n # make sure we register all the callbacks at the earliest\n # opportunity.\n for finder in sys.meta_path:\n if isinstance(finder, ModuleWatchdog):\n loader.add_callback(type(finder), finder.after_import)\n else:\n loader.add_callback(type(self), self.after_import)\n\n return loader\n\n finally:\n self._finding.remove(fullname)\n\n return None\n\n def find_spec(self, fullname, path=None, target=None):\n # type: (str, Optional[str], Optional[ModuleType]) -> Optional[ModuleSpec]\n if fullname in self._finding:\n return None\n\n self._finding.add(fullname)\n\n try:\n spec = find_spec(fullname)\n if spec is None:\n return None\n\n loader = getattr(spec, \"loader\", None)\n\n if loader is not None:\n if not isinstance(loader, _ImportHookChainedLoader):\n spec.loader = _ImportHookChainedLoader(loader)\n\n cast(_ImportHookChainedLoader, spec.loader).add_callback(type(self), self.after_import)\n\n return spec\n\n finally:\n self._finding.remove(fullname)\n\n @classmethod\n def register_origin_hook(cls, origin, hook):\n # type: (str, ModuleHookType) -> None\n \"\"\"Register a hook to be called when the module with the given origin is\n imported.\n\n The hook will be called with the module object as argument.\n \"\"\"\n cls._check_installed()\n\n # DEV: Under the hypothesis that this is only ever called by the probe\n # poller thread, there are no further actions to take. Should this ever\n # change, then thread-safety might become a concern.\n path = _resolve(origin)\n if path is None:\n raise ValueError(\"Cannot resolve module origin %s\" % origin)\n\n log.debug(\"Registering hook '%r' on path '%s'\", hook, path)\n instance = cast(ModuleWatchdog, cls._instance)\n instance._hook_map[path].append(hook)\n try:\n module = instance._origin_map[path]\n except KeyError:\n # The module is not loaded yet. Nothing more we can do.\n return\n\n # The module was already imported so we invoke the hook straight-away\n log.debug(\"Calling hook '%r' on already imported module '%s'\", hook, module.__name__)\n hook(module)\n\n @classmethod\n def unregister_origin_hook(cls, origin, hook):\n # type: (str, ModuleHookType) -> None\n \"\"\"Unregister the hook registered with the given module origin and\n argument.\n \"\"\"\n cls._check_installed()\n\n path = _resolve(origin)\n if path is None:\n raise ValueError(\"Module origin %s cannot be resolved\", origin)\n\n instance = cast(ModuleWatchdog, cls._instance)\n if path not in instance._hook_map:\n raise ValueError(\"No hooks registered for origin %s\" % origin)\n\n try:\n if path in instance._hook_map:\n hooks = instance._hook_map[path]\n hooks.remove(hook)\n if not hooks:\n del instance._hook_map[path]\n except ValueError:\n raise ValueError(\"Hook %r not registered for origin %s\" % (hook, origin))\n\n @classmethod\n def register_module_hook(cls, module, hook):\n # type: (str, ModuleHookType) -> None\n \"\"\"Register a hook to be called when the module with the given name is\n imported.\n\n The hook will be called with the module object as argument.\n \"\"\"\n cls._check_installed()\n\n log.debug(\"Registering hook '%r' on module '%s'\", hook, module)\n instance = cast(ModuleWatchdog, cls._instance)\n instance._hook_map[module].append(hook)\n try:\n module_object = instance[module]\n except KeyError:\n # The module is not loaded yet. Nothing more we can do.\n return\n\n # The module was already imported so we invoke the hook straight-away\n log.debug(\"Calling hook '%r' on already imported module '%s'\", hook, module)\n hook(module_object)\n\n @classmethod\n def unregister_module_hook(cls, module, hook):\n # type: (str, ModuleHookType) -> None\n \"\"\"Unregister the hook registered with the given module name and\n argument.\n \"\"\"\n cls._check_installed()\n\n instance = cast(ModuleWatchdog, cls._instance)\n if module not in instance._hook_map:\n raise ValueError(\"No hooks registered for module %s\" % module)\n\n try:\n if module in instance._hook_map:\n hooks = instance._hook_map[module]\n hooks.remove(hook)\n if not hooks:\n del instance._hook_map[module]\n except ValueError:\n raise ValueError(\"Hook %r not registered for module %r\" % (hook, module))\n\n @classmethod\n def register_pre_exec_module_hook(cls, cond, hook):\n # type: (Type[ModuleWatchdog], PreExecHookCond, PreExecHookType) -> None\n \"\"\"Register a hook to execute before/instead of exec_module.\n\n The pre exec_module hook is executed before the module is executed\n to allow for changed modules to be executed as needed. To ensure\n that the hook is applied only to the modules that are required,\n the condition is evaluated against the module name.\n \"\"\"\n cls._check_installed()\n\n log.debug(\"Registering pre_exec module hook '%r' on condition '%s'\", hook, cond)\n instance = cast(ModuleWatchdog, cls._instance)\n instance._pre_exec_module_hooks.append((cond, hook))\n\n @classmethod\n def _check_installed(cls):\n # type: () -> None\n if not cls.is_installed():\n raise RuntimeError(\"%s is not installed\" % cls.__name__)\n\n @classmethod\n def install(cls):\n # type: () -> None\n \"\"\"Install the module watchdog.\"\"\"\n if cls.is_installed():\n raise RuntimeError(\"%s is already installed\" % cls.__name__)\n\n cls._instance = sys.modules = cls()\n sys.modules._add_to_meta_path()\n log.debug(\"%s installed\", cls)\n\n @classmethod\n def is_installed(cls):\n \"\"\"Check whether this module watchdog class is installed.\"\"\"\n return cls._instance is not None and type(cls._instance) is cls\n\n @classmethod\n def uninstall(cls):\n # type: () -> None\n \"\"\"Uninstall the module watchdog.\n\n This will uninstall only the most recently installed instance of this\n class.\n \"\"\"\n cls._check_installed()\n\n parent, current = None, sys.modules\n while isinstance(current, ModuleWatchdog):\n if type(current) is cls:\n cls._remove_from_meta_path()\n if parent is not None:\n setattr(parent, \"_modules\", getattr(current, \"_modules\"))\n else:\n sys.modules = getattr(current, \"_modules\")\n cls._instance = None\n log.debug(\"%s uninstalled\", cls)\n return\n parent = current\n current = current._modules\n", "path": "ddtrace/internal/module.py" } ]
[ { "content": "from collections import defaultdict\nfrom os.path import abspath\nfrom os.path import expanduser\nfrom os.path import isdir\nfrom os.path import isfile\nfrom os.path import join\nimport sys\nfrom types import ModuleType\nfrom typing import Any\nfrom typing import Callable\nfrom typing import DefaultDict\nfrom typing import Dict\nfrom typing import Iterator\nfrom typing import List\nfrom typing import Optional\nfrom typing import Set\nfrom typing import Tuple\nfrom typing import Type\nfrom typing import Union\nfrom typing import cast\n\nfrom ddtrace.internal.compat import PY2\nfrom ddtrace.internal.logger import get_logger\nfrom ddtrace.internal.utils import get_argument_value\n\n\nlog = get_logger(__name__)\n\nModuleHookType = Callable[[ModuleType], None]\nPreExecHookType = Callable[[Any, ModuleType], None]\nPreExecHookCond = Union[str, Callable[[str], bool]]\n\n\n_run_code = None\n_post_run_module_hooks = [] # type: List[ModuleHookType]\n\n\ndef _wrapped_run_code(*args, **kwargs):\n # type: (*Any, **Any) -> Dict[str, Any]\n global _run_code, _post_run_module_hooks\n\n # DEV: If we are calling this wrapper then _run_code must have been set to\n # the original runpy._run_code.\n assert _run_code is not None\n\n mod_name = get_argument_value(args, kwargs, 3, \"mod_name\")\n\n try:\n return _run_code(*args, **kwargs)\n finally:\n module = sys.modules[mod_name]\n for hook in _post_run_module_hooks:\n hook(module)\n\n\ndef _patch_run_code():\n # type: () -> None\n global _run_code\n\n if _run_code is None:\n import runpy\n\n _run_code = runpy._run_code # type: ignore[attr-defined]\n runpy._run_code = _wrapped_run_code # type: ignore[attr-defined]\n\n\ndef register_post_run_module_hook(hook):\n # type: (ModuleHookType) -> None\n \"\"\"Register a post run module hook.\n\n The hooks gets called after the module is loaded. For this to work, the\n hook needs to be registered during the interpreter initialization, e.g. as\n part of a sitecustomize.py script.\n \"\"\"\n global _run_code, _post_run_module_hooks\n\n _patch_run_code()\n\n _post_run_module_hooks.append(hook)\n\n\ndef unregister_post_run_module_hook(hook):\n # type: (ModuleHookType) -> None\n \"\"\"Unregister a post run module hook.\n\n If the hook was not registered, a ``ValueError`` exception is raised.\n \"\"\"\n global _post_run_module_hooks\n\n _post_run_module_hooks.remove(hook)\n\n\ndef origin(module):\n # type: (ModuleType) -> str\n \"\"\"Get the origin source file of the module.\"\"\"\n try:\n # DEV: Use object.__getattribute__ to avoid potential side-effects.\n orig = abspath(object.__getattribute__(module, \"__file__\"))\n except (AttributeError, TypeError):\n # Module is probably only partially initialised, so we look at its\n # spec instead\n try:\n # DEV: Use object.__getattribute__ to avoid potential side-effects.\n orig = abspath(object.__getattribute__(module, \"__spec__\").origin)\n except (AttributeError, ValueError, TypeError):\n orig = None\n\n if orig is not None and isfile(orig):\n if orig.endswith(\".pyc\"):\n orig = orig[:-1]\n return orig\n\n return \"<unknown origin>\"\n\n\ndef _resolve(path):\n # type: (str) -> Optional[str]\n \"\"\"Resolve a (relative) path with respect to sys.path.\"\"\"\n for base in sys.path:\n if isdir(base):\n resolved_path = abspath(join(base, expanduser(path)))\n if isfile(resolved_path):\n return resolved_path\n return None\n\n\n# Borrowed from the wrapt module\n# https://github.com/GrahamDumpleton/wrapt/blob/df0e62c2740143cceb6cafea4c306dae1c559ef8/src/wrapt/importer.py\n\nif PY2:\n import pkgutil\n\n find_spec = ModuleSpec = None\n Loader = object\n\n find_loader = pkgutil.find_loader\n\nelse:\n from importlib.abc import Loader\n from importlib.machinery import ModuleSpec\n from importlib.util import find_spec\n\n def find_loader(fullname):\n # type: (str) -> Optional[Loader]\n return getattr(find_spec(fullname), \"loader\", None)\n\n\nLEGACY_DICT_COPY = sys.version_info < (3, 6)\n\n\nclass _ImportHookChainedLoader(Loader):\n def __init__(self, loader):\n # type: (Loader) -> None\n self.loader = loader\n self.callbacks = {} # type: Dict[Any, Callable[[ModuleType], None]]\n\n # DEV: load_module is deprecated so we define it at runtime if also\n # defined by the default loader. We also check and define for the\n # methods that are supposed to replace the load_module functionality.\n if hasattr(loader, \"load_module\"):\n self.load_module = self._load_module # type: ignore[assignment]\n if hasattr(loader, \"create_module\"):\n self.create_module = self._create_module # type: ignore[assignment]\n if hasattr(loader, \"exec_module\"):\n self.exec_module = self._exec_module # type: ignore[assignment]\n\n def __getattribute__(self, name):\n if name == \"__class__\":\n # Make isinstance believe that self is also an instance of\n # type(self.loader). This is required, e.g. by some tools, like\n # slotscheck, that can handle known loaders only.\n return self.loader.__class__\n\n return super(_ImportHookChainedLoader, self).__getattribute__(name)\n\n def __getattr__(self, name):\n # Proxy any other attribute access to the underlying loader.\n return getattr(self.loader, name)\n\n def add_callback(self, key, callback):\n # type: (Any, Callable[[ModuleType], None]) -> None\n self.callbacks[key] = callback\n\n def _load_module(self, fullname):\n # type: (str) -> ModuleType\n module = self.loader.load_module(fullname)\n for callback in self.callbacks.values():\n callback(module)\n\n return module\n\n def _create_module(self, spec):\n return self.loader.create_module(spec)\n\n def _exec_module(self, module):\n # Collect and run only the first hook that matches the module.\n pre_exec_hook = None\n\n for _ in sys.meta_path:\n if isinstance(_, ModuleWatchdog):\n try:\n for cond, hook in _._pre_exec_module_hooks:\n if isinstance(cond, str) and cond == module.__name__ or cond(module.__name__):\n # Several pre-exec hooks could match, we keep the first one\n pre_exec_hook = hook\n break\n except Exception:\n log.debug(\"Exception happened while processing pre_exec_module_hooks\", exc_info=True)\n\n if pre_exec_hook is not None:\n break\n\n if pre_exec_hook:\n pre_exec_hook(self, module)\n else:\n self.loader.exec_module(module)\n\n for callback in self.callbacks.values():\n callback(module)\n\n\nclass ModuleWatchdog(dict):\n \"\"\"Module watchdog.\n\n Replace the standard ``sys.modules`` dictionary to detect when modules are\n loaded/unloaded. This is also responsible for triggering any registered\n import hooks.\n\n Subclasses might customize the default behavior by overriding the\n ``after_import`` method, which is triggered on every module import, once\n the subclass is installed.\n \"\"\"\n\n _instance = None # type: Optional[ModuleWatchdog]\n\n def __init__(self):\n # type: () -> None\n self._hook_map = defaultdict(list) # type: DefaultDict[str, List[ModuleHookType]]\n self._om = None # type: Optional[Dict[str, ModuleType]]\n self._modules = sys.modules # type: Union[dict, ModuleWatchdog]\n self._finding = set() # type: Set[str]\n self._pre_exec_module_hooks = [] # type: List[Tuple[PreExecHookCond, PreExecHookType]]\n\n def __getitem__(self, item):\n # type: (str) -> ModuleType\n return self._modules.__getitem__(item)\n\n def __setitem__(self, name, module):\n # type: (str, ModuleType) -> None\n self._modules.__setitem__(name, module)\n\n @property\n def _origin_map(self):\n # type: () -> Dict[str, ModuleType]\n if self._om is None:\n try:\n self._om = {origin(module): module for module in sys.modules.values()}\n except RuntimeError:\n # The state of sys.modules might have been mutated by another\n # thread. We try to build the full mapping at the next occasion.\n # For now we take the more expensive route of building a list of\n # the current values, which might be incomplete.\n return {origin(module): module for module in list(sys.modules.values())}\n\n return self._om\n\n def _add_to_meta_path(self):\n # type: () -> None\n sys.meta_path.insert(0, self) # type: ignore[arg-type]\n\n @classmethod\n def _find_in_meta_path(cls):\n # type: () -> Optional[int]\n for i, meta_path in enumerate(sys.meta_path):\n if type(meta_path) is cls:\n return i\n return None\n\n @classmethod\n def _remove_from_meta_path(cls):\n # type: () -> None\n i = cls._find_in_meta_path()\n if i is not None:\n sys.meta_path.pop(i)\n\n def after_import(self, module):\n # type: (ModuleType) -> None\n path = origin(module)\n self._origin_map[path] = module\n\n # Collect all hooks by module origin and name\n hooks = []\n if path in self._hook_map:\n hooks.extend(self._hook_map[path])\n if module.__name__ in self._hook_map:\n hooks.extend(self._hook_map[module.__name__])\n\n if hooks:\n log.debug(\"Calling %d registered hooks on import of module '%s'\", len(hooks), module.__name__)\n for hook in hooks:\n hook(module)\n\n @classmethod\n def get_by_origin(cls, _origin):\n # type: (str) -> Optional[ModuleType]\n \"\"\"Lookup a module by its origin.\"\"\"\n cls._check_installed()\n\n instance = cast(ModuleWatchdog, cls._instance)\n\n path = _resolve(_origin)\n if path is not None:\n module = instance._origin_map.get(path)\n if module is not None:\n return module\n\n # Check if this is the __main__ module\n main_module = sys.modules.get(\"__main__\")\n if main_module is not None and origin(main_module) == path:\n # Register for future lookups\n instance._origin_map[path] = main_module\n\n return main_module\n\n return None\n\n def __delitem__(self, name):\n # type: (str) -> None\n try:\n path = origin(sys.modules[name])\n # Drop the module reference to reclaim memory\n del self._origin_map[path]\n except KeyError:\n pass\n\n self._modules.__delitem__(name)\n\n def __getattribute__(self, name):\n # type: (str) -> Any\n if LEGACY_DICT_COPY and name == \"keys\":\n # This is a potential attempt to make a copy of sys.modules using\n # dict(sys.modules) on a Python version that uses the C API to\n # perform the operation. Since we are an instance of a dict, this\n # means that we will end up looking like the empty dict, so we take\n # this chance to actually look like sys.modules.\n # NOTE: This is a potential source of memory leaks. However, we\n # expect this to occur only on defunct Python versions, and only\n # during special code executions, like test runs.\n super(ModuleWatchdog, self).clear()\n super(ModuleWatchdog, self).update(self._modules)\n\n try:\n return super(ModuleWatchdog, self).__getattribute__(\"_modules\").__getattribute__(name)\n except AttributeError:\n return super(ModuleWatchdog, self).__getattribute__(name)\n\n def __contains__(self, name):\n # type: (object) -> bool\n return self._modules.__contains__(name)\n\n def __len__(self):\n # type: () -> int\n return self._modules.__len__()\n\n def __iter__(self):\n # type: () -> Iterator\n return self._modules.__iter__()\n\n def find_module(self, fullname, path=None):\n # type: (str, Optional[str]) -> Union[Loader, None]\n if fullname in self._finding:\n return None\n\n self._finding.add(fullname)\n\n try:\n loader = find_loader(fullname)\n if loader is not None:\n if not isinstance(loader, _ImportHookChainedLoader):\n loader = _ImportHookChainedLoader(loader)\n\n if PY2:\n # With Python 2 we don't get all the finders invoked, so we\n # make sure we register all the callbacks at the earliest\n # opportunity.\n for finder in sys.meta_path:\n if isinstance(finder, ModuleWatchdog):\n loader.add_callback(type(finder), finder.after_import)\n else:\n loader.add_callback(type(self), self.after_import)\n\n return loader\n\n finally:\n self._finding.remove(fullname)\n\n return None\n\n def find_spec(self, fullname, path=None, target=None):\n # type: (str, Optional[str], Optional[ModuleType]) -> Optional[ModuleSpec]\n if fullname in self._finding:\n return None\n\n self._finding.add(fullname)\n\n try:\n try:\n # Best effort\n spec = find_spec(fullname)\n except Exception:\n return None\n\n if spec is None:\n return None\n\n loader = getattr(spec, \"loader\", None)\n\n if loader is not None:\n if not isinstance(loader, _ImportHookChainedLoader):\n spec.loader = _ImportHookChainedLoader(loader)\n\n cast(_ImportHookChainedLoader, spec.loader).add_callback(type(self), self.after_import)\n\n return spec\n\n finally:\n self._finding.remove(fullname)\n\n @classmethod\n def register_origin_hook(cls, origin, hook):\n # type: (str, ModuleHookType) -> None\n \"\"\"Register a hook to be called when the module with the given origin is\n imported.\n\n The hook will be called with the module object as argument.\n \"\"\"\n cls._check_installed()\n\n # DEV: Under the hypothesis that this is only ever called by the probe\n # poller thread, there are no further actions to take. Should this ever\n # change, then thread-safety might become a concern.\n path = _resolve(origin)\n if path is None:\n raise ValueError(\"Cannot resolve module origin %s\" % origin)\n\n log.debug(\"Registering hook '%r' on path '%s'\", hook, path)\n instance = cast(ModuleWatchdog, cls._instance)\n instance._hook_map[path].append(hook)\n try:\n module = instance._origin_map[path]\n except KeyError:\n # The module is not loaded yet. Nothing more we can do.\n return\n\n # The module was already imported so we invoke the hook straight-away\n log.debug(\"Calling hook '%r' on already imported module '%s'\", hook, module.__name__)\n hook(module)\n\n @classmethod\n def unregister_origin_hook(cls, origin, hook):\n # type: (str, ModuleHookType) -> None\n \"\"\"Unregister the hook registered with the given module origin and\n argument.\n \"\"\"\n cls._check_installed()\n\n path = _resolve(origin)\n if path is None:\n raise ValueError(\"Module origin %s cannot be resolved\", origin)\n\n instance = cast(ModuleWatchdog, cls._instance)\n if path not in instance._hook_map:\n raise ValueError(\"No hooks registered for origin %s\" % origin)\n\n try:\n if path in instance._hook_map:\n hooks = instance._hook_map[path]\n hooks.remove(hook)\n if not hooks:\n del instance._hook_map[path]\n except ValueError:\n raise ValueError(\"Hook %r not registered for origin %s\" % (hook, origin))\n\n @classmethod\n def register_module_hook(cls, module, hook):\n # type: (str, ModuleHookType) -> None\n \"\"\"Register a hook to be called when the module with the given name is\n imported.\n\n The hook will be called with the module object as argument.\n \"\"\"\n cls._check_installed()\n\n log.debug(\"Registering hook '%r' on module '%s'\", hook, module)\n instance = cast(ModuleWatchdog, cls._instance)\n instance._hook_map[module].append(hook)\n try:\n module_object = instance[module]\n except KeyError:\n # The module is not loaded yet. Nothing more we can do.\n return\n\n # The module was already imported so we invoke the hook straight-away\n log.debug(\"Calling hook '%r' on already imported module '%s'\", hook, module)\n hook(module_object)\n\n @classmethod\n def unregister_module_hook(cls, module, hook):\n # type: (str, ModuleHookType) -> None\n \"\"\"Unregister the hook registered with the given module name and\n argument.\n \"\"\"\n cls._check_installed()\n\n instance = cast(ModuleWatchdog, cls._instance)\n if module not in instance._hook_map:\n raise ValueError(\"No hooks registered for module %s\" % module)\n\n try:\n if module in instance._hook_map:\n hooks = instance._hook_map[module]\n hooks.remove(hook)\n if not hooks:\n del instance._hook_map[module]\n except ValueError:\n raise ValueError(\"Hook %r not registered for module %r\" % (hook, module))\n\n @classmethod\n def register_pre_exec_module_hook(cls, cond, hook):\n # type: (Type[ModuleWatchdog], PreExecHookCond, PreExecHookType) -> None\n \"\"\"Register a hook to execute before/instead of exec_module.\n\n The pre exec_module hook is executed before the module is executed\n to allow for changed modules to be executed as needed. To ensure\n that the hook is applied only to the modules that are required,\n the condition is evaluated against the module name.\n \"\"\"\n cls._check_installed()\n\n log.debug(\"Registering pre_exec module hook '%r' on condition '%s'\", hook, cond)\n instance = cast(ModuleWatchdog, cls._instance)\n instance._pre_exec_module_hooks.append((cond, hook))\n\n @classmethod\n def _check_installed(cls):\n # type: () -> None\n if not cls.is_installed():\n raise RuntimeError(\"%s is not installed\" % cls.__name__)\n\n @classmethod\n def install(cls):\n # type: () -> None\n \"\"\"Install the module watchdog.\"\"\"\n if cls.is_installed():\n raise RuntimeError(\"%s is already installed\" % cls.__name__)\n\n cls._instance = sys.modules = cls()\n sys.modules._add_to_meta_path()\n log.debug(\"%s installed\", cls)\n\n @classmethod\n def is_installed(cls):\n \"\"\"Check whether this module watchdog class is installed.\"\"\"\n return cls._instance is not None and type(cls._instance) is cls\n\n @classmethod\n def uninstall(cls):\n # type: () -> None\n \"\"\"Uninstall the module watchdog.\n\n This will uninstall only the most recently installed instance of this\n class.\n \"\"\"\n cls._check_installed()\n\n parent, current = None, sys.modules\n while isinstance(current, ModuleWatchdog):\n if type(current) is cls:\n cls._remove_from_meta_path()\n if parent is not None:\n setattr(parent, \"_modules\", getattr(current, \"_modules\"))\n else:\n sys.modules = getattr(current, \"_modules\")\n cls._instance = None\n log.debug(\"%s uninstalled\", cls)\n return\n parent = current\n current = current._modules\n", "path": "ddtrace/internal/module.py" } ]
diff --git a/ddtrace/internal/module.py b/ddtrace/internal/module.py index e608e648487..b0668518ffc 100644 --- a/ddtrace/internal/module.py +++ b/ddtrace/internal/module.py @@ -404,7 +404,12 @@ def find_spec(self, fullname, path=None, target=None): self._finding.add(fullname) try: - spec = find_spec(fullname) + try: + # Best effort + spec = find_spec(fullname) + except Exception: + return None + if spec is None: return None diff --git a/docs/spelling_wordlist.txt b/docs/spelling_wordlist.txt index 2289730a2aa..37e5cdb4051 100644 --- a/docs/spelling_wordlist.txt +++ b/docs/spelling_wordlist.txt @@ -1,5 +1,7 @@ AArch AnyCallable +autoreload +autoreloading CPython Fargate Firehose diff --git a/releasenotes/notes/fix-internal-module-spec-best-effort-adbb7c32399d7317.yaml b/releasenotes/notes/fix-internal-module-spec-best-effort-adbb7c32399d7317.yaml new file mode 100644 index 00000000000..5975bb9514e --- /dev/null +++ b/releasenotes/notes/fix-internal-module-spec-best-effort-adbb7c32399d7317.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Prevent exceptions when autoreloading modules that directly or indirectly + import ddtrace with the iPython autoreload extension.
pytorch__pytorch-4656
Bug in CosineAnnealingLR in Python 2 (?) The learning rate only takes two values `base_lr` or `eta_min` if the `T_max` parameter passed in the constructor is an integer. This is probably because `self.last_epoch / self.T_max` evaluates to an integer in Python 2.
[ { "content": "import math\nfrom bisect import bisect_right\nfrom .optimizer import Optimizer\n\n\nclass _LRScheduler(object):\n def __init__(self, optimizer, last_epoch=-1):\n if not isinstance(optimizer, Optimizer):\n raise TypeError('{} is not an Optimizer'.format(\n type(optimizer).__name__))\n self.optimizer = optimizer\n if last_epoch == -1:\n for group in optimizer.param_groups:\n group.setdefault('initial_lr', group['lr'])\n else:\n for i, group in enumerate(optimizer.param_groups):\n if 'initial_lr' not in group:\n raise KeyError(\"param 'initial_lr' is not specified \"\n \"in param_groups[{}] when resuming an optimizer\".format(i))\n self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))\n self.step(last_epoch + 1)\n self.last_epoch = last_epoch\n\n def get_lr(self):\n raise NotImplementedError\n\n def step(self, epoch=None):\n if epoch is None:\n epoch = self.last_epoch + 1\n self.last_epoch = epoch\n for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):\n param_group['lr'] = lr\n\n\nclass LambdaLR(_LRScheduler):\n \"\"\"Sets the learning rate of each parameter group to the initial lr\n times a given function. When last_epoch=-1, sets initial lr as lr.\n\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n lr_lambda (function or list): A function which computes a multiplicative\n factor given an integer parameter epoch, or a list of such\n functions, one for each group in optimizer.param_groups.\n last_epoch (int): The index of last epoch. Default: -1.\n\n Example:\n >>> # Assuming optimizer has two groups.\n >>> lambda1 = lambda epoch: epoch // 30\n >>> lambda2 = lambda epoch: 0.95 ** epoch\n >>> scheduler = LambdaLR(optimizer, lr_lambda=[lambda1, lambda2])\n >>> for epoch in range(100):\n >>> scheduler.step()\n >>> train(...)\n >>> validate(...)\n \"\"\"\n def __init__(self, optimizer, lr_lambda, last_epoch=-1):\n self.optimizer = optimizer\n if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple):\n self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups)\n else:\n if len(lr_lambda) != len(optimizer.param_groups):\n raise ValueError(\"Expected {} lr_lambdas, but got {}\".format(\n len(optimizer.param_groups), len(lr_lambda)))\n self.lr_lambdas = list(lr_lambda)\n self.last_epoch = last_epoch\n super(LambdaLR, self).__init__(optimizer, last_epoch)\n\n def get_lr(self):\n return [base_lr * lmbda(self.last_epoch)\n for lmbda, base_lr in zip(self.lr_lambdas, self.base_lrs)]\n\n\nclass StepLR(_LRScheduler):\n \"\"\"Sets the learning rate of each parameter group to the initial lr\n decayed by gamma every step_size epochs. When last_epoch=-1, sets\n initial lr as lr.\n\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n step_size (int): Period of learning rate decay.\n gamma (float): Multiplicative factor of learning rate decay.\n Default: 0.1.\n last_epoch (int): The index of last epoch. Default: -1.\n\n Example:\n >>> # Assuming optimizer uses lr = 0.05 for all groups\n >>> # lr = 0.05 if epoch < 30\n >>> # lr = 0.005 if 30 <= epoch < 60\n >>> # lr = 0.0005 if 60 <= epoch < 90\n >>> # ...\n >>> scheduler = StepLR(optimizer, step_size=30, gamma=0.1)\n >>> for epoch in range(100):\n >>> scheduler.step()\n >>> train(...)\n >>> validate(...)\n \"\"\"\n\n def __init__(self, optimizer, step_size, gamma=0.1, last_epoch=-1):\n self.step_size = step_size\n self.gamma = gamma\n super(StepLR, self).__init__(optimizer, last_epoch)\n\n def get_lr(self):\n return [base_lr * self.gamma ** (self.last_epoch // self.step_size)\n for base_lr in self.base_lrs]\n\n\nclass MultiStepLR(_LRScheduler):\n \"\"\"Set the learning rate of each parameter group to the initial lr decayed\n by gamma once the number of epoch reaches one of the milestones. When\n last_epoch=-1, sets initial lr as lr.\n\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n milestones (list): List of epoch indices. Must be increasing.\n gamma (float): Multiplicative factor of learning rate decay.\n Default: 0.1.\n last_epoch (int): The index of last epoch. Default: -1.\n\n Example:\n >>> # Assuming optimizer uses lr = 0.05 for all groups\n >>> # lr = 0.05 if epoch < 30\n >>> # lr = 0.005 if 30 <= epoch < 80\n >>> # lr = 0.0005 if epoch >= 80\n >>> scheduler = MultiStepLR(optimizer, milestones=[30,80], gamma=0.1)\n >>> for epoch in range(100):\n >>> scheduler.step()\n >>> train(...)\n >>> validate(...)\n \"\"\"\n\n def __init__(self, optimizer, milestones, gamma=0.1, last_epoch=-1):\n if not list(milestones) == sorted(milestones):\n raise ValueError('Milestones should be a list of'\n ' increasing integers. Got {}', milestones)\n self.milestones = milestones\n self.gamma = gamma\n super(MultiStepLR, self).__init__(optimizer, last_epoch)\n\n def get_lr(self):\n return [base_lr * self.gamma ** bisect_right(self.milestones, self.last_epoch)\n for base_lr in self.base_lrs]\n\n\nclass ExponentialLR(_LRScheduler):\n \"\"\"Set the learning rate of each parameter group to the initial lr decayed\n by gamma every epoch. When last_epoch=-1, sets initial lr as lr.\n\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n gamma (float): Multiplicative factor of learning rate decay.\n last_epoch (int): The index of last epoch. Default: -1.\n \"\"\"\n\n def __init__(self, optimizer, gamma, last_epoch=-1):\n self.gamma = gamma\n super(ExponentialLR, self).__init__(optimizer, last_epoch)\n\n def get_lr(self):\n return [base_lr * self.gamma ** self.last_epoch\n for base_lr in self.base_lrs]\n\n\nclass CosineAnnealingLR(_LRScheduler):\n \"\"\"Set the learning rate of each parameter group using a cosine annealing\n schedule, where :math:`\\eta_{max}` is set to the initial lr and\n :math:`T_{cur}` is the number of epochs since the last restart in SGDR:\n\n .. math::\n\n \\eta_t = \\eta_{min} + \\frac{1}{2}(\\eta_{max} - \\eta_{min})(1 +\n \\cos(\\frac{T_{cur}}{T_{max}}\\pi))\n\n When last_epoch=-1, sets initial lr as lr.\n\n It has been proposed in\n `SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only\n implements the cosine annealing part of SGDR, and not the restarts.\n\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n T_max (int): Maximum number of iterations.\n eta_min (float): Minimum learning rate. Default: 0.\n last_epoch (int): The index of last epoch. Default: -1.\n\n .. _SGDR\\: Stochastic Gradient Descent with Warm Restarts:\n https://arxiv.org/abs/1608.03983\n \"\"\"\n\n def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1):\n self.T_max = T_max\n self.eta_min = eta_min\n super(CosineAnnealingLR, self).__init__(optimizer, last_epoch)\n\n def get_lr(self):\n return [self.eta_min + (base_lr - self.eta_min) *\n (1 + math.cos(self.last_epoch / self.T_max * math.pi)) / 2\n for base_lr in self.base_lrs]\n\n\nclass ReduceLROnPlateau(object):\n \"\"\"Reduce learning rate when a metric has stopped improving.\n Models often benefit from reducing the learning rate by a factor\n of 2-10 once learning stagnates. This scheduler reads a metrics\n quantity and if no improvement is seen for a 'patience' number\n of epochs, the learning rate is reduced.\n\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n mode (str): One of `min`, `max`. In `min` mode, lr will\n be reduced when the quantity monitored has stopped\n decreasing; in `max` mode it will be reduced when the\n quantity monitored has stopped increasing. Default: 'min'.\n factor (float): Factor by which the learning rate will be\n reduced. new_lr = lr * factor. Default: 0.1.\n patience (int): Number of epochs with no improvement after\n which learning rate will be reduced. Default: 10.\n verbose (bool): If ``True``, prints a message to stdout for\n each update. Default: ``False``.\n threshold (float): Threshold for measuring the new optimum,\n to only focus on significant changes. Default: 1e-4.\n threshold_mode (str): One of `rel`, `abs`. In `rel` mode,\n dynamic_threshold = best * ( 1 + threshold ) in 'max'\n mode or best * ( 1 - threshold ) in `min` mode.\n In `abs` mode, dynamic_threshold = best + threshold in\n `max` mode or best - threshold in `min` mode. Default: 'rel'.\n cooldown (int): Number of epochs to wait before resuming\n normal operation after lr has been reduced. Default: 0.\n min_lr (float or list): A scalar or a list of scalars. A\n lower bound on the learning rate of all param groups\n or each group respectively. Default: 0.\n eps (float): Minimal decay applied to lr. If the difference\n between new and old lr is smaller than eps, the update is\n ignored. Default: 1e-8.\n\n Example:\n >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)\n >>> scheduler = ReduceLROnPlateau(optimizer, 'min')\n >>> for epoch in range(10):\n >>> train(...)\n >>> val_loss = validate(...)\n >>> # Note that step should be called after validate()\n >>> scheduler.step(val_loss)\n \"\"\"\n\n def __init__(self, optimizer, mode='min', factor=0.1, patience=10,\n verbose=False, threshold=1e-4, threshold_mode='rel',\n cooldown=0, min_lr=0, eps=1e-8):\n\n if factor >= 1.0:\n raise ValueError('Factor should be < 1.0.')\n self.factor = factor\n\n if not isinstance(optimizer, Optimizer):\n raise TypeError('{} is not an Optimizer'.format(\n type(optimizer).__name__))\n self.optimizer = optimizer\n\n if isinstance(min_lr, list) or isinstance(min_lr, tuple):\n if len(min_lr) != len(optimizer.param_groups):\n raise ValueError(\"expected {} min_lrs, got {}\".format(\n len(optimizer.param_groups), len(min_lr)))\n self.min_lrs = list(min_lr)\n else:\n self.min_lrs = [min_lr] * len(optimizer.param_groups)\n\n self.patience = patience\n self.verbose = verbose\n self.cooldown = cooldown\n self.cooldown_counter = 0\n self.mode = mode\n self.threshold = threshold\n self.threshold_mode = threshold_mode\n self.best = None\n self.num_bad_epochs = None\n self.mode_worse = None # the worse value for the chosen mode\n self.is_better = None\n self.eps = eps\n self.last_epoch = -1\n self._init_is_better(mode=mode, threshold=threshold,\n threshold_mode=threshold_mode)\n self._reset()\n\n def _reset(self):\n \"\"\"Resets num_bad_epochs counter and cooldown counter.\"\"\"\n self.best = self.mode_worse\n self.cooldown_counter = 0\n self.num_bad_epochs = 0\n\n def step(self, metrics, epoch=None):\n current = metrics\n if epoch is None:\n epoch = self.last_epoch = self.last_epoch + 1\n self.last_epoch = epoch\n\n if self.is_better(current, self.best):\n self.best = current\n self.num_bad_epochs = 0\n else:\n self.num_bad_epochs += 1\n\n if self.in_cooldown:\n self.cooldown_counter -= 1\n self.num_bad_epochs = 0 # ignore any bad epochs in cooldown\n\n if self.num_bad_epochs > self.patience:\n self._reduce_lr(epoch)\n self.cooldown_counter = self.cooldown\n self.num_bad_epochs = 0\n\n def _reduce_lr(self, epoch):\n for i, param_group in enumerate(self.optimizer.param_groups):\n old_lr = float(param_group['lr'])\n new_lr = max(old_lr * self.factor, self.min_lrs[i])\n if old_lr - new_lr > self.eps:\n param_group['lr'] = new_lr\n if self.verbose:\n print('Epoch {:5d}: reducing learning rate'\n ' of group {} to {:.4e}.'.format(epoch, i, new_lr))\n\n @property\n def in_cooldown(self):\n return self.cooldown_counter > 0\n\n def _init_is_better(self, mode, threshold, threshold_mode):\n if mode not in {'min', 'max'}:\n raise ValueError('mode ' + mode + ' is unknown!')\n if threshold_mode not in {'rel', 'abs'}:\n raise ValueError('threshold mode ' + threshold_mode + ' is unknown!')\n if mode == 'min' and threshold_mode == 'rel':\n rel_epsilon = 1. - threshold\n self.is_better = lambda a, best: a < best * rel_epsilon\n self.mode_worse = float('Inf')\n elif mode == 'min' and threshold_mode == 'abs':\n self.is_better = lambda a, best: a < best - threshold\n self.mode_worse = float('Inf')\n elif mode == 'max' and threshold_mode == 'rel':\n rel_epsilon = threshold + 1.\n self.is_better = lambda a, best: a > best * rel_epsilon\n self.mode_worse = -float('Inf')\n else: # mode == 'max' and epsilon_mode == 'abs':\n self.is_better = lambda a, best: a > best + threshold\n self.mode_worse = -float('Inf')\n", "path": "torch/optim/lr_scheduler.py" } ]
[ { "content": "import math\nfrom bisect import bisect_right\nfrom .optimizer import Optimizer\n\n\nclass _LRScheduler(object):\n def __init__(self, optimizer, last_epoch=-1):\n if not isinstance(optimizer, Optimizer):\n raise TypeError('{} is not an Optimizer'.format(\n type(optimizer).__name__))\n self.optimizer = optimizer\n if last_epoch == -1:\n for group in optimizer.param_groups:\n group.setdefault('initial_lr', group['lr'])\n else:\n for i, group in enumerate(optimizer.param_groups):\n if 'initial_lr' not in group:\n raise KeyError(\"param 'initial_lr' is not specified \"\n \"in param_groups[{}] when resuming an optimizer\".format(i))\n self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))\n self.step(last_epoch + 1)\n self.last_epoch = last_epoch\n\n def get_lr(self):\n raise NotImplementedError\n\n def step(self, epoch=None):\n if epoch is None:\n epoch = self.last_epoch + 1\n self.last_epoch = epoch\n for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):\n param_group['lr'] = lr\n\n\nclass LambdaLR(_LRScheduler):\n \"\"\"Sets the learning rate of each parameter group to the initial lr\n times a given function. When last_epoch=-1, sets initial lr as lr.\n\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n lr_lambda (function or list): A function which computes a multiplicative\n factor given an integer parameter epoch, or a list of such\n functions, one for each group in optimizer.param_groups.\n last_epoch (int): The index of last epoch. Default: -1.\n\n Example:\n >>> # Assuming optimizer has two groups.\n >>> lambda1 = lambda epoch: epoch // 30\n >>> lambda2 = lambda epoch: 0.95 ** epoch\n >>> scheduler = LambdaLR(optimizer, lr_lambda=[lambda1, lambda2])\n >>> for epoch in range(100):\n >>> scheduler.step()\n >>> train(...)\n >>> validate(...)\n \"\"\"\n def __init__(self, optimizer, lr_lambda, last_epoch=-1):\n self.optimizer = optimizer\n if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple):\n self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups)\n else:\n if len(lr_lambda) != len(optimizer.param_groups):\n raise ValueError(\"Expected {} lr_lambdas, but got {}\".format(\n len(optimizer.param_groups), len(lr_lambda)))\n self.lr_lambdas = list(lr_lambda)\n self.last_epoch = last_epoch\n super(LambdaLR, self).__init__(optimizer, last_epoch)\n\n def get_lr(self):\n return [base_lr * lmbda(self.last_epoch)\n for lmbda, base_lr in zip(self.lr_lambdas, self.base_lrs)]\n\n\nclass StepLR(_LRScheduler):\n \"\"\"Sets the learning rate of each parameter group to the initial lr\n decayed by gamma every step_size epochs. When last_epoch=-1, sets\n initial lr as lr.\n\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n step_size (int): Period of learning rate decay.\n gamma (float): Multiplicative factor of learning rate decay.\n Default: 0.1.\n last_epoch (int): The index of last epoch. Default: -1.\n\n Example:\n >>> # Assuming optimizer uses lr = 0.05 for all groups\n >>> # lr = 0.05 if epoch < 30\n >>> # lr = 0.005 if 30 <= epoch < 60\n >>> # lr = 0.0005 if 60 <= epoch < 90\n >>> # ...\n >>> scheduler = StepLR(optimizer, step_size=30, gamma=0.1)\n >>> for epoch in range(100):\n >>> scheduler.step()\n >>> train(...)\n >>> validate(...)\n \"\"\"\n\n def __init__(self, optimizer, step_size, gamma=0.1, last_epoch=-1):\n self.step_size = step_size\n self.gamma = gamma\n super(StepLR, self).__init__(optimizer, last_epoch)\n\n def get_lr(self):\n return [base_lr * self.gamma ** (self.last_epoch // self.step_size)\n for base_lr in self.base_lrs]\n\n\nclass MultiStepLR(_LRScheduler):\n \"\"\"Set the learning rate of each parameter group to the initial lr decayed\n by gamma once the number of epoch reaches one of the milestones. When\n last_epoch=-1, sets initial lr as lr.\n\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n milestones (list): List of epoch indices. Must be increasing.\n gamma (float): Multiplicative factor of learning rate decay.\n Default: 0.1.\n last_epoch (int): The index of last epoch. Default: -1.\n\n Example:\n >>> # Assuming optimizer uses lr = 0.05 for all groups\n >>> # lr = 0.05 if epoch < 30\n >>> # lr = 0.005 if 30 <= epoch < 80\n >>> # lr = 0.0005 if epoch >= 80\n >>> scheduler = MultiStepLR(optimizer, milestones=[30,80], gamma=0.1)\n >>> for epoch in range(100):\n >>> scheduler.step()\n >>> train(...)\n >>> validate(...)\n \"\"\"\n\n def __init__(self, optimizer, milestones, gamma=0.1, last_epoch=-1):\n if not list(milestones) == sorted(milestones):\n raise ValueError('Milestones should be a list of'\n ' increasing integers. Got {}', milestones)\n self.milestones = milestones\n self.gamma = gamma\n super(MultiStepLR, self).__init__(optimizer, last_epoch)\n\n def get_lr(self):\n return [base_lr * self.gamma ** bisect_right(self.milestones, self.last_epoch)\n for base_lr in self.base_lrs]\n\n\nclass ExponentialLR(_LRScheduler):\n \"\"\"Set the learning rate of each parameter group to the initial lr decayed\n by gamma every epoch. When last_epoch=-1, sets initial lr as lr.\n\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n gamma (float): Multiplicative factor of learning rate decay.\n last_epoch (int): The index of last epoch. Default: -1.\n \"\"\"\n\n def __init__(self, optimizer, gamma, last_epoch=-1):\n self.gamma = gamma\n super(ExponentialLR, self).__init__(optimizer, last_epoch)\n\n def get_lr(self):\n return [base_lr * self.gamma ** self.last_epoch\n for base_lr in self.base_lrs]\n\n\nclass CosineAnnealingLR(_LRScheduler):\n \"\"\"Set the learning rate of each parameter group using a cosine annealing\n schedule, where :math:`\\eta_{max}` is set to the initial lr and\n :math:`T_{cur}` is the number of epochs since the last restart in SGDR:\n\n .. math::\n\n \\eta_t = \\eta_{min} + \\frac{1}{2}(\\eta_{max} - \\eta_{min})(1 +\n \\cos(\\frac{T_{cur}}{T_{max}}\\pi))\n\n When last_epoch=-1, sets initial lr as lr.\n\n It has been proposed in\n `SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only\n implements the cosine annealing part of SGDR, and not the restarts.\n\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n T_max (int): Maximum number of iterations.\n eta_min (float): Minimum learning rate. Default: 0.\n last_epoch (int): The index of last epoch. Default: -1.\n\n .. _SGDR\\: Stochastic Gradient Descent with Warm Restarts:\n https://arxiv.org/abs/1608.03983\n \"\"\"\n\n def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1):\n self.T_max = T_max\n self.eta_min = eta_min\n super(CosineAnnealingLR, self).__init__(optimizer, last_epoch)\n\n def get_lr(self):\n return [self.eta_min + (base_lr - self.eta_min) *\n (1 + math.cos(math.pi * self.last_epoch / self.T_max)) / 2\n for base_lr in self.base_lrs]\n\n\nclass ReduceLROnPlateau(object):\n \"\"\"Reduce learning rate when a metric has stopped improving.\n Models often benefit from reducing the learning rate by a factor\n of 2-10 once learning stagnates. This scheduler reads a metrics\n quantity and if no improvement is seen for a 'patience' number\n of epochs, the learning rate is reduced.\n\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n mode (str): One of `min`, `max`. In `min` mode, lr will\n be reduced when the quantity monitored has stopped\n decreasing; in `max` mode it will be reduced when the\n quantity monitored has stopped increasing. Default: 'min'.\n factor (float): Factor by which the learning rate will be\n reduced. new_lr = lr * factor. Default: 0.1.\n patience (int): Number of epochs with no improvement after\n which learning rate will be reduced. Default: 10.\n verbose (bool): If ``True``, prints a message to stdout for\n each update. Default: ``False``.\n threshold (float): Threshold for measuring the new optimum,\n to only focus on significant changes. Default: 1e-4.\n threshold_mode (str): One of `rel`, `abs`. In `rel` mode,\n dynamic_threshold = best * ( 1 + threshold ) in 'max'\n mode or best * ( 1 - threshold ) in `min` mode.\n In `abs` mode, dynamic_threshold = best + threshold in\n `max` mode or best - threshold in `min` mode. Default: 'rel'.\n cooldown (int): Number of epochs to wait before resuming\n normal operation after lr has been reduced. Default: 0.\n min_lr (float or list): A scalar or a list of scalars. A\n lower bound on the learning rate of all param groups\n or each group respectively. Default: 0.\n eps (float): Minimal decay applied to lr. If the difference\n between new and old lr is smaller than eps, the update is\n ignored. Default: 1e-8.\n\n Example:\n >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)\n >>> scheduler = ReduceLROnPlateau(optimizer, 'min')\n >>> for epoch in range(10):\n >>> train(...)\n >>> val_loss = validate(...)\n >>> # Note that step should be called after validate()\n >>> scheduler.step(val_loss)\n \"\"\"\n\n def __init__(self, optimizer, mode='min', factor=0.1, patience=10,\n verbose=False, threshold=1e-4, threshold_mode='rel',\n cooldown=0, min_lr=0, eps=1e-8):\n\n if factor >= 1.0:\n raise ValueError('Factor should be < 1.0.')\n self.factor = factor\n\n if not isinstance(optimizer, Optimizer):\n raise TypeError('{} is not an Optimizer'.format(\n type(optimizer).__name__))\n self.optimizer = optimizer\n\n if isinstance(min_lr, list) or isinstance(min_lr, tuple):\n if len(min_lr) != len(optimizer.param_groups):\n raise ValueError(\"expected {} min_lrs, got {}\".format(\n len(optimizer.param_groups), len(min_lr)))\n self.min_lrs = list(min_lr)\n else:\n self.min_lrs = [min_lr] * len(optimizer.param_groups)\n\n self.patience = patience\n self.verbose = verbose\n self.cooldown = cooldown\n self.cooldown_counter = 0\n self.mode = mode\n self.threshold = threshold\n self.threshold_mode = threshold_mode\n self.best = None\n self.num_bad_epochs = None\n self.mode_worse = None # the worse value for the chosen mode\n self.is_better = None\n self.eps = eps\n self.last_epoch = -1\n self._init_is_better(mode=mode, threshold=threshold,\n threshold_mode=threshold_mode)\n self._reset()\n\n def _reset(self):\n \"\"\"Resets num_bad_epochs counter and cooldown counter.\"\"\"\n self.best = self.mode_worse\n self.cooldown_counter = 0\n self.num_bad_epochs = 0\n\n def step(self, metrics, epoch=None):\n current = metrics\n if epoch is None:\n epoch = self.last_epoch = self.last_epoch + 1\n self.last_epoch = epoch\n\n if self.is_better(current, self.best):\n self.best = current\n self.num_bad_epochs = 0\n else:\n self.num_bad_epochs += 1\n\n if self.in_cooldown:\n self.cooldown_counter -= 1\n self.num_bad_epochs = 0 # ignore any bad epochs in cooldown\n\n if self.num_bad_epochs > self.patience:\n self._reduce_lr(epoch)\n self.cooldown_counter = self.cooldown\n self.num_bad_epochs = 0\n\n def _reduce_lr(self, epoch):\n for i, param_group in enumerate(self.optimizer.param_groups):\n old_lr = float(param_group['lr'])\n new_lr = max(old_lr * self.factor, self.min_lrs[i])\n if old_lr - new_lr > self.eps:\n param_group['lr'] = new_lr\n if self.verbose:\n print('Epoch {:5d}: reducing learning rate'\n ' of group {} to {:.4e}.'.format(epoch, i, new_lr))\n\n @property\n def in_cooldown(self):\n return self.cooldown_counter > 0\n\n def _init_is_better(self, mode, threshold, threshold_mode):\n if mode not in {'min', 'max'}:\n raise ValueError('mode ' + mode + ' is unknown!')\n if threshold_mode not in {'rel', 'abs'}:\n raise ValueError('threshold mode ' + threshold_mode + ' is unknown!')\n if mode == 'min' and threshold_mode == 'rel':\n rel_epsilon = 1. - threshold\n self.is_better = lambda a, best: a < best * rel_epsilon\n self.mode_worse = float('Inf')\n elif mode == 'min' and threshold_mode == 'abs':\n self.is_better = lambda a, best: a < best - threshold\n self.mode_worse = float('Inf')\n elif mode == 'max' and threshold_mode == 'rel':\n rel_epsilon = threshold + 1.\n self.is_better = lambda a, best: a > best * rel_epsilon\n self.mode_worse = -float('Inf')\n else: # mode == 'max' and epsilon_mode == 'abs':\n self.is_better = lambda a, best: a > best + threshold\n self.mode_worse = -float('Inf')\n", "path": "torch/optim/lr_scheduler.py" } ]
diff --git a/test/test_optim.py b/test/test_optim.py index 66a78fd8119c42..f05b219a42fccb 100644 --- a/test/test_optim.py +++ b/test/test_optim.py @@ -503,7 +503,7 @@ def test_cos_anneal_lr(self): epochs = 10 eta_min = 1e-10 single_targets = [eta_min + (0.05 - eta_min) * - (1 + math.cos(x / epochs * math.pi)) / 2 + (1 + math.cos(math.pi * x / epochs)) / 2 for x in range(epochs)] targets = [single_targets, list(map(lambda x: x * epochs, single_targets))] scheduler = CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min) diff --git a/torch/optim/lr_scheduler.py b/torch/optim/lr_scheduler.py index 33b7ebf34db103..0b125c459c6658 100644 --- a/torch/optim/lr_scheduler.py +++ b/torch/optim/lr_scheduler.py @@ -194,7 +194,7 @@ def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1): def get_lr(self): return [self.eta_min + (base_lr - self.eta_min) * - (1 + math.cos(self.last_epoch / self.T_max * math.pi)) / 2 + (1 + math.cos(math.pi * self.last_epoch / self.T_max)) / 2 for base_lr in self.base_lrs]
Qiskit__qiskit-2448
No module named 'vcr': requirement is missing (vcrpy) <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: 0.10.1 - **Python version**: 3.7.3 - **Operating system**: windows 10 ### What is the current behavior? Fresh qiskit installation inside a new environment on windows 10. In one of the terra tutorial (using_the_transpiler) `from qiskit.test.mock import FakeTokyo` is failing 'ModuleNotFoundError: No module named vcr' ### Suggested solutions 'pip install vcrpy' 'vcrpy' needs to be added in requirements.
[ { "content": "# -*- coding: utf-8 -*-\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Common utilities for Qiskit.\"\"\"\n\nimport platform\nimport re\nimport socket\nimport sys\nimport warnings\n\nimport psutil\nfrom marshmallow.warnings import ChangedInMarshmallow3Warning\n\n\ndef _check_python_version():\n \"\"\"Check for Python version 3.5+.\"\"\"\n if sys.version_info < (3, 5):\n raise Exception('Qiskit requires Python version 3.5 or greater.')\n\n\ndef _filter_deprecation_warnings():\n \"\"\"Apply filters to deprecation warnings.\n\n Force the `DeprecationWarning` warnings to be displayed for the qiskit\n module, overriding the system configuration as they are ignored by default\n [1] for end-users. Additionally, silence the `ChangedInMarshmallow3Warning`\n messages.\n\n TODO: on Python 3.7, this might not be needed due to PEP-0565 [2].\n\n [1] https://docs.python.org/3/library/warnings.html#default-warning-filters\n [2] https://www.python.org/dev/peps/pep-0565/\n \"\"\"\n deprecation_filter = ('always', None, DeprecationWarning,\n re.compile(r'^qiskit\\.*', re.UNICODE), 0)\n\n # Instead of using warnings.simple_filter() directly, the internal\n # _add_filter() function is used for being able to match against the\n # module.\n try:\n warnings._add_filter(*deprecation_filter, append=False)\n except AttributeError:\n # ._add_filter is internal and not available in some Python versions.\n pass\n\n # Add a filter for ignoring ChangedInMarshmallow3Warning, as we depend on\n # marhsmallow 2 explicitly. 2.17.0 introduced new deprecation warnings that\n # are useful for eventually migrating, but too verbose for our purposes.\n warnings.simplefilter('ignore', category=ChangedInMarshmallow3Warning)\n\n\n_check_python_version()\n_filter_deprecation_warnings()\n\n\ndef local_hardware_info():\n \"\"\"Basic hardware information about the local machine.\n\n Gives actual number of CPU's in the machine, even when hyperthreading is\n turned on. CPU count defaults to 1 when true count can't be determined.\n\n Returns:\n dict: The hardware information.\n \"\"\"\n results = {\n 'os': platform.system(),\n 'memory': psutil.virtual_memory().total / (1024 ** 3),\n 'cpus': psutil.cpu_count(logical=False) or 1\n }\n return results\n\n\ndef _has_connection(hostname, port):\n \"\"\"Checks if internet connection exists to host via specified port.\n\n If any exception is raised while trying to open a socket this will return\n false.\n\n Args:\n hostname (str): Hostname to connect to.\n port (int): Port to connect to\n\n Returns:\n bool: Has connection or not\n\n \"\"\"\n try:\n host = socket.gethostbyname(hostname)\n socket.create_connection((host, port), 2)\n return True\n except Exception: # pylint: disable=broad-except\n return False\n", "path": "qiskit/util.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Common utilities for Qiskit.\"\"\"\n\nimport platform\nimport re\nimport socket\nimport sys\nimport warnings\n\nimport psutil\nfrom marshmallow.warnings import ChangedInMarshmallow3Warning\n\n\ndef _check_python_version():\n \"\"\"Check for Python version 3.5+.\"\"\"\n if sys.version_info < (3, 5):\n raise Exception('Qiskit requires Python version 3.5 or greater.')\n\n\ndef _filter_deprecation_warnings():\n \"\"\"Apply filters to deprecation warnings.\n\n Force the `DeprecationWarning` warnings to be displayed for the qiskit\n module, overriding the system configuration as they are ignored by default\n [1] for end-users. Additionally, silence the `ChangedInMarshmallow3Warning`\n messages.\n\n TODO: on Python 3.7, this might not be needed due to PEP-0565 [2].\n\n [1] https://docs.python.org/3/library/warnings.html#default-warning-filters\n [2] https://www.python.org/dev/peps/pep-0565/\n \"\"\"\n deprecation_filter = ('always', None, DeprecationWarning,\n re.compile(r'^qiskit\\.*', re.UNICODE), 0)\n\n # Instead of using warnings.simple_filter() directly, the internal\n # _add_filter() function is used for being able to match against the\n # module.\n try:\n warnings._add_filter(*deprecation_filter, append=False)\n except AttributeError:\n # ._add_filter is internal and not available in some Python versions.\n pass\n\n # Add a filter for ignoring ChangedInMarshmallow3Warning, as we depend on\n # marhsmallow 2 explicitly. 2.17.0 introduced new deprecation warnings that\n # are useful for eventually migrating, but too verbose for our purposes.\n warnings.simplefilter('ignore', category=ChangedInMarshmallow3Warning)\n\n\n_check_python_version()\n_filter_deprecation_warnings()\n\n\ndef local_hardware_info():\n \"\"\"Basic hardware information about the local machine.\n\n Gives actual number of CPU's in the machine, even when hyperthreading is\n turned on. CPU count defaults to 1 when true count can't be determined.\n\n Returns:\n dict: The hardware information.\n \"\"\"\n results = {\n 'os': platform.system(),\n 'memory': psutil.virtual_memory().total / (1024 ** 3),\n 'cpus': psutil.cpu_count(logical=False) or 1\n }\n return results\n\n\ndef _has_connection(hostname, port):\n \"\"\"Checks if internet connection exists to host via specified port.\n\n If any exception is raised while trying to open a socket this will return\n false.\n\n Args:\n hostname (str): Hostname to connect to.\n port (int): Port to connect to\n\n Returns:\n bool: Has connection or not\n\n \"\"\"\n try:\n host = socket.gethostbyname(hostname)\n socket.create_connection((host, port), 2).close()\n return True\n except Exception: # pylint: disable=broad-except\n return False\n", "path": "qiskit/util.py" } ]
diff --git a/.pylintrc b/.pylintrc index ae9061c82f2a..6040fbfed61f 100644 --- a/.pylintrc +++ b/.pylintrc @@ -21,7 +21,7 @@ persistent=yes # List of plugins (as comma separated values of python modules names) to load, # usually to register additional checkers. load-plugins=pylint.extensions.docparams, # enable checking of docstring args - pylint.extensions.docstyle, # basic docstring stle checks + pylint.extensions.docstyle, # basic docstring style checks pylintfileheader # Check license comments file-header=(?:(?:#[^\n]*)?\n)*# This code is part of Qiskit.\n#\n# \(C\) Copyright IBM [0-9, -]*.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d9344065b780..2d495534781e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -22,7 +22,8 @@ The format is based on `Keep a Changelog`_. Deprecated ---------- -- The gates `U` and `CX` are being deprecated in favor of `u3` and `cx`. +- The gates ``U`` and ``CX`` are being deprecated in favor of ``u3`` and ``cx``. +- The decorator ``requires_qe_access`` is being deprecated in favor of ``online_test``. Added ----- @@ -40,6 +41,8 @@ Changed - When adding a register to a circuit, an error will now be raised if a register of the same name is already present. Previously, an error would only be raised if the same register was added twice. +- Qubits and classical bits are not represented as a tuples anymore, but as + instances of ``Qubit`` and ``Clbit`` respectively. Removed ------- diff --git a/Makefile b/Makefile index 79399d949809..4dabcdaae082 100644 --- a/Makefile +++ b/Makefile @@ -32,7 +32,7 @@ else CONCURRENCY := $(shell echo "$(NPROCS) 2" | awk '{printf "%.0f", $$1 / $$2}') endif -.PHONY: env lint test test_record test_mock test_ci +.PHONY: env lint test test_ci # Dependencies need to be installed on the Anaconda virtual environment. env: @@ -55,13 +55,6 @@ style: test: python3 -m unittest discover -s test -v -test_mock: - env QISKIT_TESTS=mock_online python3 -m unittest discover -s test -v - -test_recording: - -rm test/cassettes/* - env QISKIT_TESTS=rec python3 -m unittest discover -s test -v - test_ci: echo "Detected $(NPROCS) CPUs running with $(CONCURRENCY) workers" stestr run --concurrency $(CONCURRENCY) diff --git a/qiskit/test/__init__.py b/qiskit/test/__init__.py index 820df145f4e0..4eb20ffa2326 100644 --- a/qiskit/test/__init__.py +++ b/qiskit/test/__init__.py @@ -15,6 +15,6 @@ """Functionality and helpers for testing Qiskit.""" from .base import QiskitTestCase -from .decorators import requires_aer_provider, requires_qe_access, slow_test +from .decorators import requires_aer_provider, online_test, slow_test, requires_qe_access from .reference_circuits import ReferenceCircuits from .utils import Path diff --git a/qiskit/test/decorators.py b/qiskit/test/decorators.py index e55cf681de48..bf8917cca37e 100644 --- a/qiskit/test/decorators.py +++ b/qiskit/test/decorators.py @@ -18,11 +18,13 @@ import os import sys import unittest +from warnings import warn -from .utils import Path -from .http_recorder import http_recorder +from qiskit.util import _has_connection from .testing_options import get_test_options +HAS_NET_CONNECTION = None + def is_aer_provider_available(): """Check if the C++ simulator can be instantiated. @@ -137,7 +139,27 @@ def _get_credentials(test_object, test_options): def requires_qe_access(func): - """Decorator that signals that the test uses the online API: + """Deprecated in favor of `online_test`""" + warn("`requires_qe_access` is going to be replaced in favor of `online_test`", + DeprecationWarning) + + @functools.wraps(func) + def _wrapper(self, *args, **kwargs): + if TEST_OPTIONS['skip_online']: + raise unittest.SkipTest('Skipping online tests') + + credentials = _get_credentials(self, TEST_OPTIONS) + self.using_ibmq_credentials = credentials.is_ibmq() + kwargs.update({'qe_token': credentials.token, + 'qe_url': credentials.url}) + + return func(self, *args, **kwargs) + + return _wrapper + + +def online_test(func): + """Decorator that signals that the test uses the network (and the online API): It involves: * determines if the test should be skipped by checking environment @@ -159,23 +181,24 @@ def requires_qe_access(func): @functools.wraps(func) def _wrapper(self, *args, **kwargs): + # To avoid checking the connection in each test + global HAS_NET_CONNECTION # pylint: disable=global-statement + if TEST_OPTIONS['skip_online']: raise unittest.SkipTest('Skipping online tests') + if HAS_NET_CONNECTION is None: + HAS_NET_CONNECTION = _has_connection('qiskit.org', 443) + + if not HAS_NET_CONNECTION: + raise unittest.SkipTest("Test requires internet connection.") + credentials = _get_credentials(self, TEST_OPTIONS) self.using_ibmq_credentials = credentials.is_ibmq() kwargs.update({'qe_token': credentials.token, 'qe_url': credentials.url}) - decorated_func = func - if TEST_OPTIONS['rec'] or TEST_OPTIONS['mock_online']: - # For recording or for replaying existing cassettes, the test - # should be decorated with @use_cassette. - vcr_mode = 'new_episodes' if TEST_OPTIONS['rec'] else 'none' - decorated_func = http_recorder( - vcr_mode, Path.CASSETTES.value).use_cassette()(decorated_func) - - return decorated_func(self, *args, **kwargs) + return func(self, *args, **kwargs) return _wrapper diff --git a/qiskit/test/http_recorder.py b/qiskit/test/http_recorder.py deleted file mode 100644 index 83a18880c459..000000000000 --- a/qiskit/test/http_recorder.py +++ /dev/null @@ -1,289 +0,0 @@ -# -*- coding: utf-8 -*- - -# This code is part of Qiskit. -# -# (C) Copyright IBM 2017, 2018. -# -# This code is licensed under the Apache License, Version 2.0. You may -# obtain a copy of this license in the LICENSE.txt file in the root directory -# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. -# -# Any modifications or derivative works of this code must retain this -# copyright notice, and modified files need to carry a notice indicating -# that they have been altered from the originals. - -"""Utilities (based on VCRpy) to record remote requests and allow testing offline/cached.""" - -import json -from contextlib import suppress -from vcr.persisters.filesystem import FilesystemPersister -from vcr import VCR - - -class IdRemoverPersister(FilesystemPersister): - """VCR Persister for Qiskit. - - IdRemoverPersister is a VCR persister. This is, it implements a way to save and load cassettes. - This persister in particular inherits load_cassette from FilesystemPersister (basically, it - loads a standard cassette in the standard way from the file system). On the saving side, it - replaces some fields in the JSON content of the responses with dummy values. - """ - - @staticmethod - def get_responses_with(string_to_find, cassette_dict): - """Filters the requests from cassette_dict - - Args: - string_to_find (str): request path - cassette_dict (dict): a VCR cassette dictionary - - Returns: - Request: VCR's representation of a request. - """ - return [response for response, request in - zip(cassette_dict['responses'], cassette_dict['requests']) - if string_to_find in request.path] - - @staticmethod - def get_new_id(field, path, id_tracker, type_=str): - """Creates a new dummy id (or value) for replacing an existing id (or value). - - Args: - field (str): field name is used, in same cases, to create a dummy value. - path (str): path of the request is used, in same cases, to create a dummy value. - id_tracker (dict): a map of already assigned ids and generated ids. - type_ (type): type of the value. - - Returns: - str: that is used to replace a value. - """ - - if type_ == float: - return 0.42 - if type_ == int: - return 42 - dummy_name = 'dummy%s%s' % (path.replace('/', ''), field) - count = len(list(filter(lambda x: str(x).startswith(dummy_name), id_tracker.values()))) - return "%s%02d" % (dummy_name, count + 1) - - @staticmethod - def get_matching_dicts(data_dict, map_list): - """Find subdicts that are described in map_list. - - Args: - data_dict (dict): in which the map_list is going to be searched. - map_list (list): the list of nested keys to find in the data_dict - - Returns: - list: a list of dictionaries, each of them matches map_list. - """ - ret = [] - if not map_list: - return ret - if isinstance(data_dict, list): - for sub_data_dict in data_dict: - ret.extend(IdRemoverPersister.get_matching_dicts(sub_data_dict, map_list)) - if isinstance(data_dict, dict): - if map_list[0] in data_dict.keys(): - if len(map_list) == 1: - return [data_dict] - else: - ret.extend( - IdRemoverPersister.get_matching_dicts(data_dict[map_list[0]], map_list[1:])) - return ret - - @staticmethod - def remove_id_in_a_json(jsonobj, field, path, id_tracker): - """Replaces ids with dummy values in a json. - - Replaces in jsonobj (in-place) the field with dummy value (which is constructed with - id_tracker, if it was already replaced, or path, if it needs to be created). - - Args: - jsonobj (dict): json dictionary from the response body - field (str): string with the field in the response to by replaced - path (str): request path - id_tracker (dict): a dictionary of the ids already assigned. - """ - - map_list = field.split('.') - for matching_dict in IdRemoverPersister.get_matching_dicts(jsonobj, map_list): - with suppress(KeyError): - old_id = matching_dict[map_list[-1]] - if old_id not in id_tracker: - new_id = IdRemoverPersister.get_new_id(field, path, id_tracker, type(old_id)) - id_tracker[old_id] = new_id - matching_dict[map_list[-1]] = id_tracker[old_id] - - @staticmethod - def remove_ids_in_a_response(response, fields, path, id_tracker): - """Replaces ids with dummy values in a response. - - Replaces in response (in-place) the fields with dummy values (which is constructed with - id_tracker, if it was already replaced, or path, if it needs to be created). - - Args: - response (dict): dictionary of the response body - fields (list): list of fields in the response to by replaced - path (str): request path - id_tracker (dict): a dictionary of the ids already assigned. - """ - body = json.loads(response['body']['string'].decode('utf-8')) - for field in fields: - IdRemoverPersister.remove_id_in_a_json(body, field, path, id_tracker) - response['body']['string'] = json.dumps(body).encode('utf-8') - - @staticmethod - def remove_ids(ids2remove, cassette_dict): - """Replaces ids with dummy values in a cassette. - - Replaces in cassette_dict (in-place) the fields defined by ids2remove with dummy values. - Internally, it used a map (id_tracker) between real values and dummy values to keep - consistency during the renaming. - - Args: - ids2remove (dict): {request_path: [json_fields]} - cassette_dict (dict): a VCR cassette dictionary. - """ - - id_tracker = {} # {old_id: new_id} - for path, fields in ids2remove.items(): - responses = IdRemoverPersister.get_responses_with(path, cassette_dict) - for response in responses: - IdRemoverPersister.remove_ids_in_a_response(response, fields, path, id_tracker) - for old_id, new_id in id_tracker.items(): - if isinstance(old_id, str): - for request in cassette_dict['requests']: - request.uri = request.uri.replace(old_id, new_id) - - @staticmethod - def save_cassette(cassette_path, cassette_dict, serializer): - """Extends FilesystemPersister.save_cassette - - Extends FilesystemPersister.save_cassette. Replaces particular values (defined by - ids2remove) which are replaced by a dummy value. The full manipulation is in - cassette_dict, before saving it using FilesystemPersister.save_cassette - - Args: - cassette_path (str): the file location where the cassette will be saved. - cassette_dict (dict): a VCR cassette dictionary. This is the information that will - be dump in cassette_path, using serializer. - serializer (callable): the serializer for dumping cassette_dict in cassette_path. - """ - ids2remove = {'/api/users/loginWithToken': ['id', - 'userId', - 'created'], - '/api/Jobs': ['id', - 'userId', - 'creationDate', - 'qasms.executionId', - 'qasms.result.date', - 'qasms.result.data.time', - 'qasms.result.data.additionalData.seed'], - '/api/Backends': ['internalId', - 'topologyId'], - '/api/Backends/ibmqx5/queue/status': ['lengthQueue'], - '/api/Backends/ibmqx4/queue/status': ['lengthQueue']} - IdRemoverPersister.remove_ids(ids2remove, cassette_dict) - super(IdRemoverPersister, IdRemoverPersister).save_cassette(cassette_path, - cassette_dict, - serializer) - - -def http_recorder(vcr_mode, cassette_dir): - """Creates a VCR object in vcr_mode mode. - - Args: - vcr_mode (string): the parameter for record_mode. - cassette_dir (string): path to the cassettes. - - Returns: - VCR: a VCR object. - """ - my_vcr = VCR( - cassette_library_dir=cassette_dir, - record_mode=vcr_mode, - match_on=['method', 'scheme', 'host', 'port', 'path', 'unordered_query'], - filter_headers=['x-qx-client-application', 'User-Agent'], - filter_query_parameters=[('access_token', 'dummyapiusersloginWithTokenid01')], - filter_post_data_parameters=[('apiToken', 'apiToken_dummy')], - decode_compressed_response=True, - before_record_response=_purge_headers_cb(['Date', - ('Set-Cookie', 'dummy_cookie'), - 'X-Global-Transaction-ID', - 'Etag', - 'Content-Security-Policy', - 'X-Content-Security-Policy', - 'X-Webkit-Csp', - 'content-length'])) - my_vcr.register_matcher('unordered_query', _unordered_query_matcher) - my_vcr.register_persister(IdRemoverPersister) - return my_vcr - - -def _purge_headers_cb(headers): - """Remove headers from the response. - - Args: - headers (list): headers to remove from the response - - Returns: - callable: for been used in before_record_response VCR constructor. - """ - header_list = [] - for item in headers: - if not isinstance(item, tuple): - item = (item, None) - header_list.append(item[0:2]) # ensure the tuple is a pair - - def before_record_response_cb(response): - """Purge headers from response. - - Args: - response (dict): a VCR response - - Returns: - dict: a VCR response - """ - for (header, value) in header_list: - with suppress(KeyError): - if value: - response['headers'][header] = value - else: - del response['headers'][header] - return response - - return before_record_response_cb - - -def _unordered_query_matcher(request1, request2): - """A VCR matcher that ignores the order of values in the query string. - - A VCR matcher (a la VCR.matcher) that ignores the order of the values in the query string. - Useful for filter params, for example. - - Args: - request1 (Request): a VCR request - request2 (Request): a VCR request - - Returns: - bool: True if they match. - """ - if request1.query == request2.query: - return True - - dict1 = dict(request1.query) - dict2 = dict(request2.query) - - if dict1 == dict2: - return True - - if dict1.keys() != dict2.keys(): - return False - - for key, value in dict1.items(): - with suppress(ValueError): - dict1[key] = json.loads(value) - dict2[key] = json.loads(dict2[key]) - - return dict1 == dict2 diff --git a/qiskit/test/utils.py b/qiskit/test/utils.py index a861fa7a7ca2..929e3a6bddda 100644 --- a/qiskit/test/utils.py +++ b/qiskit/test/utils.py @@ -33,8 +33,6 @@ class Path(Enum): EXAMPLES = os.path.normpath(os.path.join(SDK, '..', 'examples')) # Schemas path: qiskit/schemas SCHEMAS = os.path.normpath(os.path.join(SDK, 'schemas')) - # VCR cassettes path: qiskit/test/cassettes/ - CASSETTES = os.path.normpath(os.path.join(TEST, '..', 'cassettes')) # Sample QASMs path: qiskit/test/python/qasm QASMS = os.path.normpath(os.path.join(TEST, 'qasm')) diff --git a/qiskit/util.py b/qiskit/util.py index ca6998629203..90ad9f30abf8 100644 --- a/qiskit/util.py +++ b/qiskit/util.py @@ -97,7 +97,7 @@ def _has_connection(hostname, port): """ try: host = socket.gethostbyname(hostname) - socket.create_connection((host, port), 2) + socket.create_connection((host, port), 2).close() return True except Exception: # pylint: disable=broad-except return False diff --git a/requirements-dev.txt b/requirements-dev.txt index 737f11814f22..4dedc3cd967f 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -9,7 +9,6 @@ pydot pylint>=2.3,<2.4 pylintfileheader>=0.0.2 stestr>=2.0.0 -vcrpy PyGithub wheel cython>=0.27.1 diff --git a/test/cassettes/test_backend_monitor b/test/cassettes/test_backend_monitor deleted file mode 100644 index 087f4fcf861f..000000000000 --- a/test/cassettes/test_backend_monitor +++ /dev/null @@ -1,381 +0,0 @@ -interactions: -- request: - body: apiToken=apiToken_dummy - headers: - Accept: ['*/*'] - Accept-Encoding: ['gzip, deflate'] - Connection: [keep-alive] - Content-Length: ['137'] - Content-Type: [application/x-www-form-urlencoded] - method: POST - uri: https://quantumexperience.ng.bluemix.net/api/users/loginWithToken - response: - body: {string: '{"created": "dummyapiusersloginWithTokencreated01", "userId": - "dummyapiusersloginWithTokenuserId01", "id": "dummyapiusersloginWithTokenid01", - "ttl": 1209600}'} - headers: - Access-Control-Allow-Credentials: ['true'] - Access-Control-Allow-Origin: ['https://quantumexperience.mybluemix.net/'] - Cache-Control: ['no-store, no-cache, must-revalidate, proxy-revalidate'] - Connection: [Keep-Alive] - Content-Type: [application/json; charset=utf-8] - Expires: ['0'] - Pragma: [no-cache] - Set-Cookie: dummy_cookie - Strict-Transport-Security: [max-age=86400] - Surrogate-Control: [no-store] - Transfer-Encoding: [chunked] - Vary: ['Origin, Accept-Encoding'] - X-Backside-Transport: [OK OK] - X-Content-Type-Options: [nosniff] - X-Download-Options: [noopen] - X-Frame-Options: [SAMEORIGIN] - X-Xss-Protection: [1; mode=block] - status: {code: 200, message: OK} -- request: - body: null - headers: - Accept: ['*/*'] - Accept-Encoding: ['gzip, deflate'] - Connection: [keep-alive] - method: GET - uri: https://quantumexperience.ng.bluemix.net/api/Backends/v/1?access_token=dummyapiusersloginWithTokenid01 - response: - body: {string: '[{"online_date": "2018-11-06T05:00:00Z", "backend_name": "ibmqx4", - "max_shots": 8192, "coupling_map": [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], - [4, 2]], "simulator": false, "sample_name": "raven", "max_experiments": 75, - "local": false, "backend_version": "1.0.0", "n_qubits": 5, "basis_gates": - ["u1", "u2", "u3", "cx", "id"], "conditional": false, "url": "None", "gates": - [{"qasm_def": "gate id q { U(0,0,0) q; }", "parameters": [], "coupling_map": - [[0], [1], [2], [3], [4]], "name": "id"}, {"qasm_def": "gate u1(lambda) q - { U(0,0,lambda) q; }", "parameters": ["lambda"], "coupling_map": [[0], [1], - [2], [3], [4]], "name": "u1"}, {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) - q; }", "parameters": ["phi", "lambda"], "coupling_map": [[0], [1], [2], [3], - [4]], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) q { U(theta,phi,lambda) - q; }", "parameters": ["theta", "phi", "lambda"], "coupling_map": [[0], [1], - [2], [3], [4]], "name": "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", - "parameters": [], "coupling_map": [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], - [4, 2]], "name": "cx"}], "description": "5 qubit device", "open_pulse": false, - "memory": true, "credits_required": true, "allow_q_object": true, "n_registers": - 1}, {"online_date": "2018-11-06T05:00:00Z", "backend_name": "ibmq_16_melbourne", - "max_shots": 8192, "coupling_map": [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], - [5, 4], [5, 6], [5, 9], [6, 8], [7, 8], [9, 8], [9, 10], [11, 3], [11, 10], - [11, 12], [12, 2], [13, 1], [13, 12]], "simulator": false, "sample_name": - "albatross", "max_experiments": 75, "local": false, "backend_version": "1.0.0", - "n_qubits": 14, "basis_gates": ["u1", "u2", "u3", "cx", "id"], "conditional": - false, "url": "None", "gates": [{"qasm_def": "gate id q { U(0,0,0) q; }", - "parameters": [], "coupling_map": [[0], [1], [2], [3], [4], [5], [6], [7], - [8], [9], [10], [11], [12], [13]], "name": "id"}, {"qasm_def": "gate u1(lambda) - q { U(0,0,lambda) q; }", "parameters": ["lambda"], "coupling_map": [[0], [1], - [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13]], "name": "u1"}, - {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) q; }", "parameters": - ["phi", "lambda"], "coupling_map": [[0], [1], [2], [3], [4], [5], [6], [7], - [8], [9], [10], [11], [12], [13]], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) - q { U(theta,phi,lambda) q; }", "parameters": ["theta", "phi", "lambda"], "coupling_map": - [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13]], - "name": "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", "parameters": [], - "coupling_map": [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], [5, 4], [5, 6], - [5, 9], [6, 8], [7, 8], [9, 8], [9, 10], [11, 3], [11, 10], [11, 12], [12, - 2], [13, 1], [13, 12]], "name": "cx"}], "description": "14 qubit device", - "open_pulse": false, "memory": false, "credits_required": true, "allow_q_object": - true, "n_registers": 1}, {"backend_name": "ibmq_qasm_simulator", "backend_version": - "0.1.547", "simulator": true, "max_experiments": 300, "local": false, "max_shots": - 8192, "n_qubits": 32, "basis_gates": ["u1", "u2", "u3", "cx"], "conditional": - true, "gates": [{"qasm_def": "gate u1(lambda) q { U(0,0,lambda) q; }", "parameters": - ["lambda"], "name": "u1"}, {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) - q; }", "parameters": ["phi", "lambda"], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) - q { U(theta,phi,lambda) q; }", "parameters": ["theta", "phi", "lambda"], "name": - "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", "parameters": [], "name": - "cx"}], "open_pulse": false, "memory": true, "allow_q_object": true}]'} - headers: - Access-Control-Allow-Credentials: ['true'] - Access-Control-Allow-Origin: ['https://quantumexperience.mybluemix.net/'] - Cache-Control: ['no-store, no-cache, must-revalidate, proxy-revalidate'] - Connection: [Keep-Alive] - Content-Type: [application/json; charset=utf-8] - Expires: ['0'] - Pragma: [no-cache] - Set-Cookie: dummy_cookie - Strict-Transport-Security: [max-age=86400] - Surrogate-Control: [no-store] - Transfer-Encoding: [chunked] - Vary: ['Origin, Accept-Encoding'] - X-Backside-Transport: [OK OK] - X-Content-Type-Options: [nosniff] - X-Download-Options: [noopen] - X-Frame-Options: [SAMEORIGIN] - X-Xss-Protection: [1; mode=block] - status: {code: 200, message: OK} -- request: - body: null - headers: - Accept: ['*/*'] - Accept-Encoding: ['gzip, deflate'] - Connection: [keep-alive] - method: GET - uri: https://quantumexperience.ng.bluemix.net/api/Backends/v/1?access_token=dummyapiusersloginWithTokenid01 - response: - body: {string: '[{"online_date": "2018-11-06T05:00:00Z", "backend_name": "ibmqx4", - "max_shots": 8192, "coupling_map": [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], - [4, 2]], "simulator": false, "sample_name": "raven", "max_experiments": 75, - "local": false, "backend_version": "1.0.0", "n_qubits": 5, "basis_gates": - ["u1", "u2", "u3", "cx", "id"], "conditional": false, "url": "None", "gates": - [{"qasm_def": "gate id q { U(0,0,0) q; }", "parameters": [], "coupling_map": - [[0], [1], [2], [3], [4]], "name": "id"}, {"qasm_def": "gate u1(lambda) q - { U(0,0,lambda) q; }", "parameters": ["lambda"], "coupling_map": [[0], [1], - [2], [3], [4]], "name": "u1"}, {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) - q; }", "parameters": ["phi", "lambda"], "coupling_map": [[0], [1], [2], [3], - [4]], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) q { U(theta,phi,lambda) - q; }", "parameters": ["theta", "phi", "lambda"], "coupling_map": [[0], [1], - [2], [3], [4]], "name": "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", - "parameters": [], "coupling_map": [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], - [4, 2]], "name": "cx"}], "description": "5 qubit device", "open_pulse": false, - "memory": true, "credits_required": true, "allow_q_object": true, "n_registers": - 1}, {"online_date": "2018-11-06T05:00:00Z", "backend_name": "ibmq_16_melbourne", - "max_shots": 8192, "coupling_map": [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], - [5, 4], [5, 6], [5, 9], [6, 8], [7, 8], [9, 8], [9, 10], [11, 3], [11, 10], - [11, 12], [12, 2], [13, 1], [13, 12]], "simulator": false, "sample_name": - "albatross", "max_experiments": 75, "local": false, "backend_version": "1.0.0", - "n_qubits": 14, "basis_gates": ["u1", "u2", "u3", "cx", "id"], "conditional": - false, "url": "None", "gates": [{"qasm_def": "gate id q { U(0,0,0) q; }", - "parameters": [], "coupling_map": [[0], [1], [2], [3], [4], [5], [6], [7], - [8], [9], [10], [11], [12], [13]], "name": "id"}, {"qasm_def": "gate u1(lambda) - q { U(0,0,lambda) q; }", "parameters": ["lambda"], "coupling_map": [[0], [1], - [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13]], "name": "u1"}, - {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) q; }", "parameters": - ["phi", "lambda"], "coupling_map": [[0], [1], [2], [3], [4], [5], [6], [7], - [8], [9], [10], [11], [12], [13]], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) - q { U(theta,phi,lambda) q; }", "parameters": ["theta", "phi", "lambda"], "coupling_map": - [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13]], - "name": "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", "parameters": [], - "coupling_map": [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], [5, 4], [5, 6], - [5, 9], [6, 8], [7, 8], [9, 8], [9, 10], [11, 3], [11, 10], [11, 12], [12, - 2], [13, 1], [13, 12]], "name": "cx"}], "description": "14 qubit device", - "open_pulse": false, "memory": false, "credits_required": true, "allow_q_object": - true, "n_registers": 1}, {"backend_name": "ibmq_qasm_simulator", "backend_version": - "0.1.547", "simulator": true, "max_experiments": 300, "local": false, "max_shots": - 8192, "n_qubits": 32, "basis_gates": ["u1", "u2", "u3", "cx"], "conditional": - true, "gates": [{"qasm_def": "gate u1(lambda) q { U(0,0,lambda) q; }", "parameters": - ["lambda"], "name": "u1"}, {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) - q; }", "parameters": ["phi", "lambda"], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) - q { U(theta,phi,lambda) q; }", "parameters": ["theta", "phi", "lambda"], "name": - "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", "parameters": [], "name": - "cx"}], "open_pulse": false, "memory": true, "allow_q_object": true}]'} - headers: - Access-Control-Allow-Credentials: ['true'] - Access-Control-Allow-Origin: ['https://quantumexperience.mybluemix.net/'] - Cache-Control: ['no-store, no-cache, must-revalidate, proxy-revalidate'] - Connection: [Keep-Alive] - Content-Type: [application/json; charset=utf-8] - Expires: ['0'] - Pragma: [no-cache] - Set-Cookie: dummy_cookie - Strict-Transport-Security: [max-age=86400] - Surrogate-Control: [no-store] - Transfer-Encoding: [chunked] - Vary: ['Origin, Accept-Encoding'] - X-Backside-Transport: [OK OK] - X-Content-Type-Options: [nosniff] - X-Download-Options: [noopen] - X-Frame-Options: [SAMEORIGIN] - X-Xss-Protection: [1; mode=block] - status: {code: 200, message: OK} -- request: - body: null - headers: - Accept: ['*/*'] - Accept-Encoding: ['gzip, deflate'] - Connection: [keep-alive] - method: GET - uri: https://quantumexperience.ng.bluemix.net/api/Backends/ibmqx4/queue/status - response: - body: {string: '{"state": true, "status": "active", "backend_version": "1.0.0", - "lengthQueue": 42}'} - headers: - Access-Control-Allow-Credentials: ['true'] - Access-Control-Allow-Origin: ['https://quantumexperience.mybluemix.net/'] - Cache-Control: ['no-store, no-cache, must-revalidate, proxy-revalidate'] - Connection: [Keep-Alive] - Content-Type: [application/json; charset=utf-8] - Expires: ['0'] - Pragma: [no-cache] - Set-Cookie: dummy_cookie - Strict-Transport-Security: [max-age=86400] - Surrogate-Control: [no-store] - Transfer-Encoding: [chunked] - Vary: ['Origin, Accept-Encoding'] - X-Backside-Transport: [OK OK] - X-Content-Type-Options: [nosniff] - X-Download-Options: [noopen] - X-Frame-Options: [SAMEORIGIN] - X-Xss-Protection: [1; mode=block] - status: {code: 200, message: OK} -- request: - body: null - headers: - Accept: ['*/*'] - Accept-Encoding: ['gzip, deflate'] - Connection: [keep-alive] - method: GET - uri: https://quantumexperience.ng.bluemix.net/api/Backends/v/1?access_token=dummyapiusersloginWithTokenid01 - response: - body: {string: '[{"online_date": "2018-11-06T05:00:00Z", "backend_name": "ibmqx4", - "max_shots": 8192, "coupling_map": [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], - [4, 2]], "simulator": false, "sample_name": "raven", "max_experiments": 75, - "local": false, "backend_version": "1.0.0", "n_qubits": 5, "basis_gates": - ["u1", "u2", "u3", "cx", "id"], "conditional": false, "url": "None", "gates": - [{"qasm_def": "gate id q { U(0,0,0) q; }", "parameters": [], "coupling_map": - [[0], [1], [2], [3], [4]], "name": "id"}, {"qasm_def": "gate u1(lambda) q - { U(0,0,lambda) q; }", "parameters": ["lambda"], "coupling_map": [[0], [1], - [2], [3], [4]], "name": "u1"}, {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) - q; }", "parameters": ["phi", "lambda"], "coupling_map": [[0], [1], [2], [3], - [4]], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) q { U(theta,phi,lambda) - q; }", "parameters": ["theta", "phi", "lambda"], "coupling_map": [[0], [1], - [2], [3], [4]], "name": "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", - "parameters": [], "coupling_map": [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], - [4, 2]], "name": "cx"}], "description": "5 qubit device", "open_pulse": false, - "memory": true, "credits_required": true, "allow_q_object": true, "n_registers": - 1}, {"online_date": "2018-11-06T05:00:00Z", "backend_name": "ibmq_16_melbourne", - "max_shots": 8192, "coupling_map": [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], - [5, 4], [5, 6], [5, 9], [6, 8], [7, 8], [9, 8], [9, 10], [11, 3], [11, 10], - [11, 12], [12, 2], [13, 1], [13, 12]], "simulator": false, "sample_name": - "albatross", "max_experiments": 75, "local": false, "backend_version": "1.0.0", - "n_qubits": 14, "basis_gates": ["u1", "u2", "u3", "cx", "id"], "conditional": - false, "url": "None", "gates": [{"qasm_def": "gate id q { U(0,0,0) q; }", - "parameters": [], "coupling_map": [[0], [1], [2], [3], [4], [5], [6], [7], - [8], [9], [10], [11], [12], [13]], "name": "id"}, {"qasm_def": "gate u1(lambda) - q { U(0,0,lambda) q; }", "parameters": ["lambda"], "coupling_map": [[0], [1], - [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13]], "name": "u1"}, - {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) q; }", "parameters": - ["phi", "lambda"], "coupling_map": [[0], [1], [2], [3], [4], [5], [6], [7], - [8], [9], [10], [11], [12], [13]], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) - q { U(theta,phi,lambda) q; }", "parameters": ["theta", "phi", "lambda"], "coupling_map": - [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13]], - "name": "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", "parameters": [], - "coupling_map": [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], [5, 4], [5, 6], - [5, 9], [6, 8], [7, 8], [9, 8], [9, 10], [11, 3], [11, 10], [11, 12], [12, - 2], [13, 1], [13, 12]], "name": "cx"}], "description": "14 qubit device", - "open_pulse": false, "memory": false, "credits_required": true, "allow_q_object": - true, "n_registers": 1}, {"backend_name": "ibmq_qasm_simulator", "backend_version": - "0.1.547", "simulator": true, "max_experiments": 300, "local": false, "max_shots": - 8192, "n_qubits": 32, "basis_gates": ["u1", "u2", "u3", "cx"], "conditional": - true, "gates": [{"qasm_def": "gate u1(lambda) q { U(0,0,lambda) q; }", "parameters": - ["lambda"], "name": "u1"}, {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) - q; }", "parameters": ["phi", "lambda"], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) - q { U(theta,phi,lambda) q; }", "parameters": ["theta", "phi", "lambda"], "name": - "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", "parameters": [], "name": - "cx"}], "open_pulse": false, "memory": true, "allow_q_object": true}]'} - headers: - Access-Control-Allow-Credentials: ['true'] - Access-Control-Allow-Origin: ['https://quantumexperience.mybluemix.net/'] - Cache-Control: ['no-store, no-cache, must-revalidate, proxy-revalidate'] - Connection: [Keep-Alive] - Content-Type: [application/json; charset=utf-8] - Expires: ['0'] - Pragma: [no-cache] - Set-Cookie: dummy_cookie - Strict-Transport-Security: [max-age=86400] - Surrogate-Control: [no-store] - Transfer-Encoding: [chunked] - Vary: ['Origin, Accept-Encoding'] - X-Backside-Transport: [OK OK] - X-Content-Type-Options: [nosniff] - X-Download-Options: [noopen] - X-Frame-Options: [SAMEORIGIN] - X-Xss-Protection: [1; mode=block] - status: {code: 200, message: OK} -- request: - body: null - headers: - Accept: ['*/*'] - Accept-Encoding: ['gzip, deflate'] - Connection: [keep-alive] - method: GET - uri: https://quantumexperience.ng.bluemix.net/api/Backends/ibmqx4/properties?access_token=dummyapiusersloginWithTokenid01&version=1 - response: - body: {string: '{"backend_name": "ibmqx4", "backend_version": "1.0.0", "qubits": - [[{"date": "2019-02-05T02:15:59Z", "unit": "\u00b5s", "name": "T1", "value": - 40.23580728497557}, {"date": "2019-02-05T02:16:42Z", "unit": "\u00b5s", "name": - "T2", "value": 28.053796455696148}, {"date": "2019-02-05T02:54:28Z", "unit": - "GHz", "name": "frequency", "value": 5.2498546876946115}, {"date": "2019-02-05T02:15:40Z", - "unit": "", "name": "readout_error", "value": 0.08450000000000002}], [{"date": - "2019-02-04T10:14:24Z", "unit": "\u00b5s", "name": "T1", "value": 67.39179431370326}, - {"date": "2019-02-05T02:17:25Z", "unit": "\u00b5s", "name": "T2", "value": - 10.763705758897595}, {"date": "2019-02-05T02:54:28Z", "unit": "GHz", "name": - "frequency", "value": 5.295772109235333}, {"date": "2019-02-05T02:15:40Z", - "unit": "", "name": "readout_error", "value": 0.07925000000000004}], [{"date": - "2019-02-05T02:15:59Z", "unit": "\u00b5s", "name": "T1", "value": 43.393200515665946}, - {"date": "2019-02-05T02:18:06Z", "unit": "\u00b5s", "name": "T2", "value": - 27.3542760576606}, {"date": "2019-02-05T02:54:28Z", "unit": "GHz", "name": - "frequency", "value": 5.3533332743669355}, {"date": "2019-02-05T02:15:40Z", - "unit": "", "name": "readout_error", "value": 0.03699999999999992}], [{"date": - "2019-02-05T02:15:59Z", "unit": "\u00b5s", "name": "T1", "value": 55.541546751266985}, - {"date": "2019-02-05T02:17:25Z", "unit": "\u00b5s", "name": "T2", "value": - 14.846271380938276}, {"date": "2019-02-05T02:54:28Z", "unit": "GHz", "name": - "frequency", "value": 5.434936834513384}, {"date": "2019-02-05T02:15:40Z", - "unit": "", "name": "readout_error", "value": 0.03875000000000006}], [{"date": - "2019-02-05T02:15:59Z", "unit": "\u00b5s", "name": "T1", "value": 53.860484623965284}, - {"date": "2019-02-05T02:16:42Z", "unit": "\u00b5s", "name": "T2", "value": - 4.983364732947786}, {"date": "2019-02-05T02:54:28Z", "unit": "GHz", "name": - "frequency", "value": 5.175855462568935}, {"date": "2019-02-05T02:15:40Z", - "unit": "", "name": "readout_error", "value": 0.26875000000000004}]], "gates": - [{"parameters": [{"date": "2019-02-05T10:57:11Z", "unit": "", "name": "gate_error", - "value": 0}], "qubits": [0], "gate": "u1"}, {"parameters": [{"date": "2019-02-05T10:57:11Z", - "unit": "", "name": "gate_error", "value": 0.0006867731322012238}], "qubits": - [0], "gate": "u2"}, {"parameters": [{"date": "2019-02-05T10:57:11Z", "unit": - "", "name": "gate_error", "value": 0.0013735462644024476}], "qubits": [0], - "gate": "u3"}, {"parameters": [{"date": "2019-02-05T10:57:11Z", "unit": "", - "name": "gate_error", "value": 0}], "qubits": [1], "gate": "u1"}, {"parameters": - [{"date": "2019-02-05T10:57:11Z", "unit": "", "name": "gate_error", "value": - 0.00128782749692391}], "qubits": [1], "gate": "u2"}, {"parameters": [{"date": - "2019-02-05T10:57:11Z", "unit": "", "name": "gate_error", "value": 0.00257565499384782}], - "qubits": [1], "gate": "u3"}, {"parameters": [{"date": "2019-02-05T10:57:11Z", - "unit": "", "name": "gate_error", "value": 0}], "qubits": [2], "gate": "u1"}, - {"parameters": [{"date": "2019-02-05T10:57:11Z", "unit": "", "name": "gate_error", - "value": 0.00128782749692391}], "qubits": [2], "gate": "u2"}, {"parameters": - [{"date": "2019-02-05T10:57:11Z", "unit": "", "name": "gate_error", "value": - 0.00257565499384782}], "qubits": [2], "gate": "u3"}, {"parameters": [{"date": - "2019-02-05T10:57:11Z", "unit": "", "name": "gate_error", "value": 0}], "qubits": - [3], "gate": "u1"}, {"parameters": [{"date": "2019-02-05T10:57:11Z", "unit": - "", "name": "gate_error", "value": 0.001803112096824766}], "qubits": [3], - "gate": "u2"}, {"parameters": [{"date": "2019-02-05T10:57:11Z", "unit": "", - "name": "gate_error", "value": 0.003606224193649532}], "qubits": [3], "gate": - "u3"}, {"parameters": [{"date": "2019-02-05T10:57:11Z", "unit": "", "name": - "gate_error", "value": 0}], "qubits": [4], "gate": "u1"}, {"parameters": [{"date": - "2019-02-05T10:57:11Z", "unit": "", "name": "gate_error", "value": 0.006444645993361475}], - "qubits": [4], "gate": "u2"}, {"parameters": [{"date": "2019-02-05T10:57:11Z", - "unit": "", "name": "gate_error", "value": 0.01288929198672295}], "qubits": - [4], "gate": "u3"}, {"parameters": [{"date": "2019-02-05T02:25:32Z", "unit": - "", "name": "gate_error", "value": 0.03594617578113263}], "qubits": [1, 0], - "name": "CX1_0", "gate": "cx"}, {"parameters": [{"date": "2019-02-05T02:31:04Z", - "unit": "", "name": "gate_error", "value": 0.03205473341614962}], "qubits": - [2, 0], "name": "CX2_0", "gate": "cx"}, {"parameters": [{"date": "2019-02-05T02:36:21Z", - "unit": "", "name": "gate_error", "value": 0.048500617566183984}], "qubits": - [2, 1], "name": "CX2_1", "gate": "cx"}, {"parameters": [{"date": "2019-02-05T02:41:40Z", - "unit": "", "name": "gate_error", "value": 0.07474221943376097}], "qubits": - [3, 2], "name": "CX3_2", "gate": "cx"}, {"parameters": [{"date": "2019-02-05T02:47:44Z", - "unit": "", "name": "gate_error", "value": 0.07660114123887399}], "qubits": - [3, 4], "name": "CX3_4", "gate": "cx"}, {"parameters": [{"date": "2019-02-04T10:53:35Z", - "unit": "", "name": "gate_error", "value": 0.06824929220587475}], "qubits": - [4, 2], "name": "CX4_2", "gate": "cx"}], "last_update_date": "2019-02-05T02:54:28.000Z", - "general": []}'} - headers: - Access-Control-Allow-Credentials: ['true'] - Access-Control-Allow-Origin: ['https://quantumexperience.mybluemix.net/'] - Cache-Control: ['no-store, no-cache, must-revalidate, proxy-revalidate'] - Connection: [Keep-Alive] - Content-Type: [application/json; charset=utf-8] - Expires: ['0'] - Pragma: [no-cache] - Set-Cookie: dummy_cookie - Strict-Transport-Security: [max-age=86400] - Surrogate-Control: [no-store] - Transfer-Encoding: [chunked] - Vary: ['Origin, Accept-Encoding'] - X-Backside-Transport: [OK OK] - X-Content-Type-Options: [nosniff] - X-Download-Options: [noopen] - X-Frame-Options: [SAMEORIGIN] - X-Xss-Protection: [1; mode=block] - status: {code: 200, message: OK} -version: 1 diff --git a/test/cassettes/test_backend_overview b/test/cassettes/test_backend_overview deleted file mode 100644 index af512f726732..000000000000 --- a/test/cassettes/test_backend_overview +++ /dev/null @@ -1,976 +0,0 @@ -interactions: -- request: - body: apiToken=apiToken_dummy - headers: - Accept: ['*/*'] - Accept-Encoding: ['gzip, deflate'] - Connection: [keep-alive] - Content-Length: ['137'] - Content-Type: [application/x-www-form-urlencoded] - method: POST - uri: https://quantumexperience.ng.bluemix.net/api/users/loginWithToken - response: - body: {string: '{"created": "dummyapiusersloginWithTokencreated01", "userId": - "dummyapiusersloginWithTokenuserId01", "id": "dummyapiusersloginWithTokenid01", - "ttl": 1209600}'} - headers: - Access-Control-Allow-Credentials: ['true'] - Access-Control-Allow-Origin: ['https://quantumexperience.mybluemix.net/'] - Cache-Control: ['no-store, no-cache, must-revalidate, proxy-revalidate'] - Connection: [Keep-Alive] - Content-Type: [application/json; charset=utf-8] - Expires: ['0'] - Pragma: [no-cache] - Set-Cookie: dummy_cookie - Strict-Transport-Security: [max-age=86400] - Surrogate-Control: [no-store] - Transfer-Encoding: [chunked] - Vary: ['Origin, Accept-Encoding'] - X-Backside-Transport: [OK OK] - X-Content-Type-Options: [nosniff] - X-Download-Options: [noopen] - X-Frame-Options: [SAMEORIGIN] - X-Xss-Protection: [1; mode=block] - status: {code: 200, message: OK} -- request: - body: null - headers: - Accept: ['*/*'] - Accept-Encoding: ['gzip, deflate'] - Connection: [keep-alive] - method: GET - uri: https://quantumexperience.ng.bluemix.net/api/Backends/v/1?access_token=dummyapiusersloginWithTokenid01 - response: - body: {string: '[{"online_date": "2018-11-06T05:00:00Z", "backend_name": "ibmqx4", - "max_shots": 8192, "coupling_map": [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], - [4, 2]], "simulator": false, "sample_name": "raven", "max_experiments": 75, - "local": false, "backend_version": "1.0.0", "n_qubits": 5, "basis_gates": - ["u1", "u2", "u3", "cx", "id"], "conditional": false, "url": "None", "gates": - [{"qasm_def": "gate id q { U(0,0,0) q; }", "parameters": [], "coupling_map": - [[0], [1], [2], [3], [4]], "name": "id"}, {"qasm_def": "gate u1(lambda) q - { U(0,0,lambda) q; }", "parameters": ["lambda"], "coupling_map": [[0], [1], - [2], [3], [4]], "name": "u1"}, {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) - q; }", "parameters": ["phi", "lambda"], "coupling_map": [[0], [1], [2], [3], - [4]], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) q { U(theta,phi,lambda) - q; }", "parameters": ["theta", "phi", "lambda"], "coupling_map": [[0], [1], - [2], [3], [4]], "name": "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", - "parameters": [], "coupling_map": [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], - [4, 2]], "name": "cx"}], "description": "5 qubit device", "open_pulse": false, - "memory": true, "credits_required": true, "allow_q_object": true, "n_registers": - 1}, {"online_date": "2018-11-06T05:00:00Z", "backend_name": "ibmq_16_melbourne", - "max_shots": 8192, "coupling_map": [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], - [5, 4], [5, 6], [5, 9], [6, 8], [7, 8], [9, 8], [9, 10], [11, 3], [11, 10], - [11, 12], [12, 2], [13, 1], [13, 12]], "simulator": false, "sample_name": - "albatross", "max_experiments": 75, "local": false, "backend_version": "1.0.0", - "n_qubits": 14, "basis_gates": ["u1", "u2", "u3", "cx", "id"], "conditional": - false, "url": "None", "gates": [{"qasm_def": "gate id q { U(0,0,0) q; }", - "parameters": [], "coupling_map": [[0], [1], [2], [3], [4], [5], [6], [7], - [8], [9], [10], [11], [12], [13]], "name": "id"}, {"qasm_def": "gate u1(lambda) - q { U(0,0,lambda) q; }", "parameters": ["lambda"], "coupling_map": [[0], [1], - [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13]], "name": "u1"}, - {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) q; }", "parameters": - ["phi", "lambda"], "coupling_map": [[0], [1], [2], [3], [4], [5], [6], [7], - [8], [9], [10], [11], [12], [13]], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) - q { U(theta,phi,lambda) q; }", "parameters": ["theta", "phi", "lambda"], "coupling_map": - [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13]], - "name": "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", "parameters": [], - "coupling_map": [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], [5, 4], [5, 6], - [5, 9], [6, 8], [7, 8], [9, 8], [9, 10], [11, 3], [11, 10], [11, 12], [12, - 2], [13, 1], [13, 12]], "name": "cx"}], "description": "14 qubit device", - "open_pulse": false, "memory": false, "credits_required": true, "allow_q_object": - true, "n_registers": 1}, {"backend_name": "ibmq_qasm_simulator", "backend_version": - "0.1.547", "simulator": true, "max_experiments": 300, "local": false, "max_shots": - 8192, "n_qubits": 32, "basis_gates": ["u1", "u2", "u3", "cx"], "conditional": - true, "gates": [{"qasm_def": "gate u1(lambda) q { U(0,0,lambda) q; }", "parameters": - ["lambda"], "name": "u1"}, {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) - q; }", "parameters": ["phi", "lambda"], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) - q { U(theta,phi,lambda) q; }", "parameters": ["theta", "phi", "lambda"], "name": - "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", "parameters": [], "name": - "cx"}], "open_pulse": false, "memory": true, "allow_q_object": true}]'} - headers: - Access-Control-Allow-Credentials: ['true'] - Access-Control-Allow-Origin: ['https://quantumexperience.mybluemix.net/'] - Cache-Control: ['no-store, no-cache, must-revalidate, proxy-revalidate'] - Connection: [Keep-Alive] - Content-Type: [application/json; charset=utf-8] - Expires: ['0'] - Pragma: [no-cache] - Set-Cookie: dummy_cookie - Strict-Transport-Security: [max-age=86400] - Surrogate-Control: [no-store] - Transfer-Encoding: [chunked] - Vary: ['Origin, Accept-Encoding'] - X-Backside-Transport: [OK OK] - X-Content-Type-Options: [nosniff] - X-Download-Options: [noopen] - X-Frame-Options: [SAMEORIGIN] - X-Xss-Protection: [1; mode=block] - status: {code: 200, message: OK} -- request: - body: null - headers: - Accept: ['*/*'] - Accept-Encoding: ['gzip, deflate'] - Connection: [keep-alive] - method: GET - uri: https://quantumexperience.ng.bluemix.net/api/Backends/v/1?access_token=dummyapiusersloginWithTokenid01 - response: - body: {string: '[{"online_date": "2018-11-06T05:00:00Z", "backend_name": "ibmqx4", - "max_shots": 8192, "coupling_map": [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], - [4, 2]], "simulator": false, "sample_name": "raven", "max_experiments": 75, - "local": false, "backend_version": "1.0.0", "n_qubits": 5, "basis_gates": - ["u1", "u2", "u3", "cx", "id"], "conditional": false, "url": "None", "gates": - [{"qasm_def": "gate id q { U(0,0,0) q; }", "parameters": [], "coupling_map": - [[0], [1], [2], [3], [4]], "name": "id"}, {"qasm_def": "gate u1(lambda) q - { U(0,0,lambda) q; }", "parameters": ["lambda"], "coupling_map": [[0], [1], - [2], [3], [4]], "name": "u1"}, {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) - q; }", "parameters": ["phi", "lambda"], "coupling_map": [[0], [1], [2], [3], - [4]], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) q { U(theta,phi,lambda) - q; }", "parameters": ["theta", "phi", "lambda"], "coupling_map": [[0], [1], - [2], [3], [4]], "name": "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", - "parameters": [], "coupling_map": [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], - [4, 2]], "name": "cx"}], "description": "5 qubit device", "open_pulse": false, - "memory": true, "credits_required": true, "allow_q_object": true, "n_registers": - 1}, {"online_date": "2018-11-06T05:00:00Z", "backend_name": "ibmq_16_melbourne", - "max_shots": 8192, "coupling_map": [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], - [5, 4], [5, 6], [5, 9], [6, 8], [7, 8], [9, 8], [9, 10], [11, 3], [11, 10], - [11, 12], [12, 2], [13, 1], [13, 12]], "simulator": false, "sample_name": - "albatross", "max_experiments": 75, "local": false, "backend_version": "1.0.0", - "n_qubits": 14, "basis_gates": ["u1", "u2", "u3", "cx", "id"], "conditional": - false, "url": "None", "gates": [{"qasm_def": "gate id q { U(0,0,0) q; }", - "parameters": [], "coupling_map": [[0], [1], [2], [3], [4], [5], [6], [7], - [8], [9], [10], [11], [12], [13]], "name": "id"}, {"qasm_def": "gate u1(lambda) - q { U(0,0,lambda) q; }", "parameters": ["lambda"], "coupling_map": [[0], [1], - [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13]], "name": "u1"}, - {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) q; }", "parameters": - ["phi", "lambda"], "coupling_map": [[0], [1], [2], [3], [4], [5], [6], [7], - [8], [9], [10], [11], [12], [13]], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) - q { U(theta,phi,lambda) q; }", "parameters": ["theta", "phi", "lambda"], "coupling_map": - [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13]], - "name": "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", "parameters": [], - "coupling_map": [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], [5, 4], [5, 6], - [5, 9], [6, 8], [7, 8], [9, 8], [9, 10], [11, 3], [11, 10], [11, 12], [12, - 2], [13, 1], [13, 12]], "name": "cx"}], "description": "14 qubit device", - "open_pulse": false, "memory": false, "credits_required": true, "allow_q_object": - true, "n_registers": 1}, {"backend_name": "ibmq_qasm_simulator", "backend_version": - "0.1.547", "simulator": true, "max_experiments": 300, "local": false, "max_shots": - 8192, "n_qubits": 32, "basis_gates": ["u1", "u2", "u3", "cx"], "conditional": - true, "gates": [{"qasm_def": "gate u1(lambda) q { U(0,0,lambda) q; }", "parameters": - ["lambda"], "name": "u1"}, {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) - q; }", "parameters": ["phi", "lambda"], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) - q { U(theta,phi,lambda) q; }", "parameters": ["theta", "phi", "lambda"], "name": - "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", "parameters": [], "name": - "cx"}], "open_pulse": false, "memory": true, "allow_q_object": true}]'} - headers: - Access-Control-Allow-Credentials: ['true'] - Access-Control-Allow-Origin: ['https://quantumexperience.mybluemix.net/'] - Cache-Control: ['no-store, no-cache, must-revalidate, proxy-revalidate'] - Connection: [Keep-Alive] - Content-Type: [application/json; charset=utf-8] - Expires: ['0'] - Pragma: [no-cache] - Set-Cookie: dummy_cookie - Strict-Transport-Security: [max-age=86400] - Surrogate-Control: [no-store] - Transfer-Encoding: [chunked] - Vary: ['Origin, Accept-Encoding'] - X-Backside-Transport: [OK OK] - X-Content-Type-Options: [nosniff] - X-Download-Options: [noopen] - X-Frame-Options: [SAMEORIGIN] - X-Xss-Protection: [1; mode=block] - status: {code: 200, message: OK} -- request: - body: null - headers: - Accept: ['*/*'] - Accept-Encoding: ['gzip, deflate'] - Connection: [keep-alive] - method: GET - uri: https://quantumexperience.ng.bluemix.net/api/Backends/ibmqx4/queue/status - response: - body: {string: '{"state": true, "status": "active", "backend_version": "1.0.0", - "lengthQueue": 42}'} - headers: - Access-Control-Allow-Credentials: ['true'] - Access-Control-Allow-Origin: ['https://quantumexperience.mybluemix.net/'] - Cache-Control: ['no-store, no-cache, must-revalidate, proxy-revalidate'] - Connection: [Keep-Alive] - Content-Type: [application/json; charset=utf-8] - Expires: ['0'] - Pragma: [no-cache] - Set-Cookie: dummy_cookie - Strict-Transport-Security: [max-age=86400] - Surrogate-Control: [no-store] - Transfer-Encoding: [chunked] - Vary: ['Origin, Accept-Encoding'] - X-Backside-Transport: [OK OK] - X-Content-Type-Options: [nosniff] - X-Download-Options: [noopen] - X-Frame-Options: [SAMEORIGIN] - X-Xss-Protection: [1; mode=block] - status: {code: 200, message: OK} -- request: - body: null - headers: - Accept: ['*/*'] - Accept-Encoding: ['gzip, deflate'] - Connection: [keep-alive] - method: GET - uri: https://quantumexperience.ng.bluemix.net/api/Backends/v/1?access_token=dummyapiusersloginWithTokenid01 - response: - body: {string: '[{"online_date": "2018-11-06T05:00:00Z", "backend_name": "ibmqx4", - "max_shots": 8192, "coupling_map": [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], - [4, 2]], "simulator": false, "sample_name": "raven", "max_experiments": 75, - "local": false, "backend_version": "1.0.0", "n_qubits": 5, "basis_gates": - ["u1", "u2", "u3", "cx", "id"], "conditional": false, "url": "None", "gates": - [{"qasm_def": "gate id q { U(0,0,0) q; }", "parameters": [], "coupling_map": - [[0], [1], [2], [3], [4]], "name": "id"}, {"qasm_def": "gate u1(lambda) q - { U(0,0,lambda) q; }", "parameters": ["lambda"], "coupling_map": [[0], [1], - [2], [3], [4]], "name": "u1"}, {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) - q; }", "parameters": ["phi", "lambda"], "coupling_map": [[0], [1], [2], [3], - [4]], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) q { U(theta,phi,lambda) - q; }", "parameters": ["theta", "phi", "lambda"], "coupling_map": [[0], [1], - [2], [3], [4]], "name": "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", - "parameters": [], "coupling_map": [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], - [4, 2]], "name": "cx"}], "description": "5 qubit device", "open_pulse": false, - "memory": true, "credits_required": true, "allow_q_object": true, "n_registers": - 1}, {"online_date": "2018-11-06T05:00:00Z", "backend_name": "ibmq_16_melbourne", - "max_shots": 8192, "coupling_map": [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], - [5, 4], [5, 6], [5, 9], [6, 8], [7, 8], [9, 8], [9, 10], [11, 3], [11, 10], - [11, 12], [12, 2], [13, 1], [13, 12]], "simulator": false, "sample_name": - "albatross", "max_experiments": 75, "local": false, "backend_version": "1.0.0", - "n_qubits": 14, "basis_gates": ["u1", "u2", "u3", "cx", "id"], "conditional": - false, "url": "None", "gates": [{"qasm_def": "gate id q { U(0,0,0) q; }", - "parameters": [], "coupling_map": [[0], [1], [2], [3], [4], [5], [6], [7], - [8], [9], [10], [11], [12], [13]], "name": "id"}, {"qasm_def": "gate u1(lambda) - q { U(0,0,lambda) q; }", "parameters": ["lambda"], "coupling_map": [[0], [1], - [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13]], "name": "u1"}, - {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) q; }", "parameters": - ["phi", "lambda"], "coupling_map": [[0], [1], [2], [3], [4], [5], [6], [7], - [8], [9], [10], [11], [12], [13]], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) - q { U(theta,phi,lambda) q; }", "parameters": ["theta", "phi", "lambda"], "coupling_map": - [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13]], - "name": "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", "parameters": [], - "coupling_map": [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], [5, 4], [5, 6], - [5, 9], [6, 8], [7, 8], [9, 8], [9, 10], [11, 3], [11, 10], [11, 12], [12, - 2], [13, 1], [13, 12]], "name": "cx"}], "description": "14 qubit device", - "open_pulse": false, "memory": false, "credits_required": true, "allow_q_object": - true, "n_registers": 1}, {"backend_name": "ibmq_qasm_simulator", "backend_version": - "0.1.547", "simulator": true, "max_experiments": 300, "local": false, "max_shots": - 8192, "n_qubits": 32, "basis_gates": ["u1", "u2", "u3", "cx"], "conditional": - true, "gates": [{"qasm_def": "gate u1(lambda) q { U(0,0,lambda) q; }", "parameters": - ["lambda"], "name": "u1"}, {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) - q; }", "parameters": ["phi", "lambda"], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) - q { U(theta,phi,lambda) q; }", "parameters": ["theta", "phi", "lambda"], "name": - "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", "parameters": [], "name": - "cx"}], "open_pulse": false, "memory": true, "allow_q_object": true}]'} - headers: - Access-Control-Allow-Credentials: ['true'] - Access-Control-Allow-Origin: ['https://quantumexperience.mybluemix.net/'] - Cache-Control: ['no-store, no-cache, must-revalidate, proxy-revalidate'] - Connection: [Keep-Alive] - Content-Type: [application/json; charset=utf-8] - Expires: ['0'] - Pragma: [no-cache] - Set-Cookie: dummy_cookie - Strict-Transport-Security: [max-age=86400] - Surrogate-Control: [no-store] - Transfer-Encoding: [chunked] - Vary: ['Origin, Accept-Encoding'] - X-Backside-Transport: [OK OK] - X-Content-Type-Options: [nosniff] - X-Download-Options: [noopen] - X-Frame-Options: [SAMEORIGIN] - X-Xss-Protection: [1; mode=block] - status: {code: 200, message: OK} -- request: - body: null - headers: - Accept: ['*/*'] - Accept-Encoding: ['gzip, deflate'] - Connection: [keep-alive] - method: GET - uri: https://quantumexperience.ng.bluemix.net/api/Backends/ibmq_16_melbourne/queue/status - response: - body: {string: '{"state": true, "status": "active", "backend_version": "1.0.0", - "lengthQueue": 40}'} - headers: - Access-Control-Allow-Credentials: ['true'] - Access-Control-Allow-Origin: ['https://quantumexperience.mybluemix.net/'] - Cache-Control: ['no-store, no-cache, must-revalidate, proxy-revalidate'] - Connection: [Keep-Alive] - Content-Type: [application/json; charset=utf-8] - Expires: ['0'] - Pragma: [no-cache] - Set-Cookie: dummy_cookie - Strict-Transport-Security: [max-age=86400] - Surrogate-Control: [no-store] - Transfer-Encoding: [chunked] - Vary: ['Origin, Accept-Encoding'] - X-Backside-Transport: [OK OK] - X-Content-Type-Options: [nosniff] - X-Download-Options: [noopen] - X-Frame-Options: [SAMEORIGIN] - X-Xss-Protection: [1; mode=block] - status: {code: 200, message: OK} -- request: - body: null - headers: - Accept: ['*/*'] - Accept-Encoding: ['gzip, deflate'] - Connection: [keep-alive] - method: GET - uri: https://quantumexperience.ng.bluemix.net/api/Backends/v/1?access_token=dummyapiusersloginWithTokenid01 - response: - body: {string: '[{"online_date": "2018-11-06T05:00:00Z", "backend_name": "ibmqx4", - "max_shots": 8192, "coupling_map": [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], - [4, 2]], "simulator": false, "sample_name": "raven", "max_experiments": 75, - "local": false, "backend_version": "1.0.0", "n_qubits": 5, "basis_gates": - ["u1", "u2", "u3", "cx", "id"], "conditional": false, "url": "None", "gates": - [{"qasm_def": "gate id q { U(0,0,0) q; }", "parameters": [], "coupling_map": - [[0], [1], [2], [3], [4]], "name": "id"}, {"qasm_def": "gate u1(lambda) q - { U(0,0,lambda) q; }", "parameters": ["lambda"], "coupling_map": [[0], [1], - [2], [3], [4]], "name": "u1"}, {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) - q; }", "parameters": ["phi", "lambda"], "coupling_map": [[0], [1], [2], [3], - [4]], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) q { U(theta,phi,lambda) - q; }", "parameters": ["theta", "phi", "lambda"], "coupling_map": [[0], [1], - [2], [3], [4]], "name": "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", - "parameters": [], "coupling_map": [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], - [4, 2]], "name": "cx"}], "description": "5 qubit device", "open_pulse": false, - "memory": true, "credits_required": true, "allow_q_object": true, "n_registers": - 1}, {"online_date": "2018-11-06T05:00:00Z", "backend_name": "ibmq_16_melbourne", - "max_shots": 8192, "coupling_map": [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], - [5, 4], [5, 6], [5, 9], [6, 8], [7, 8], [9, 8], [9, 10], [11, 3], [11, 10], - [11, 12], [12, 2], [13, 1], [13, 12]], "simulator": false, "sample_name": - "albatross", "max_experiments": 75, "local": false, "backend_version": "1.0.0", - "n_qubits": 14, "basis_gates": ["u1", "u2", "u3", "cx", "id"], "conditional": - false, "url": "None", "gates": [{"qasm_def": "gate id q { U(0,0,0) q; }", - "parameters": [], "coupling_map": [[0], [1], [2], [3], [4], [5], [6], [7], - [8], [9], [10], [11], [12], [13]], "name": "id"}, {"qasm_def": "gate u1(lambda) - q { U(0,0,lambda) q; }", "parameters": ["lambda"], "coupling_map": [[0], [1], - [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13]], "name": "u1"}, - {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) q; }", "parameters": - ["phi", "lambda"], "coupling_map": [[0], [1], [2], [3], [4], [5], [6], [7], - [8], [9], [10], [11], [12], [13]], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) - q { U(theta,phi,lambda) q; }", "parameters": ["theta", "phi", "lambda"], "coupling_map": - [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13]], - "name": "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", "parameters": [], - "coupling_map": [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], [5, 4], [5, 6], - [5, 9], [6, 8], [7, 8], [9, 8], [9, 10], [11, 3], [11, 10], [11, 12], [12, - 2], [13, 1], [13, 12]], "name": "cx"}], "description": "14 qubit device", - "open_pulse": false, "memory": false, "credits_required": true, "allow_q_object": - true, "n_registers": 1}, {"backend_name": "ibmq_qasm_simulator", "backend_version": - "0.1.547", "simulator": true, "max_experiments": 300, "local": false, "max_shots": - 8192, "n_qubits": 32, "basis_gates": ["u1", "u2", "u3", "cx"], "conditional": - true, "gates": [{"qasm_def": "gate u1(lambda) q { U(0,0,lambda) q; }", "parameters": - ["lambda"], "name": "u1"}, {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) - q; }", "parameters": ["phi", "lambda"], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) - q { U(theta,phi,lambda) q; }", "parameters": ["theta", "phi", "lambda"], "name": - "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", "parameters": [], "name": - "cx"}], "open_pulse": false, "memory": true, "allow_q_object": true}]'} - headers: - Access-Control-Allow-Credentials: ['true'] - Access-Control-Allow-Origin: ['https://quantumexperience.mybluemix.net/'] - Cache-Control: ['no-store, no-cache, must-revalidate, proxy-revalidate'] - Connection: [Keep-Alive] - Content-Type: [application/json; charset=utf-8] - Expires: ['0'] - Pragma: [no-cache] - Set-Cookie: dummy_cookie - Strict-Transport-Security: [max-age=86400] - Surrogate-Control: [no-store] - Transfer-Encoding: [chunked] - Vary: ['Origin, Accept-Encoding'] - X-Backside-Transport: [OK OK] - X-Content-Type-Options: [nosniff] - X-Download-Options: [noopen] - X-Frame-Options: [SAMEORIGIN] - X-Xss-Protection: [1; mode=block] - status: {code: 200, message: OK} -- request: - body: null - headers: - Accept: ['*/*'] - Accept-Encoding: ['gzip, deflate'] - Connection: [keep-alive] - method: GET - uri: https://quantumexperience.ng.bluemix.net/api/Backends/ibmq_16_melbourne/queue/status - response: - body: {string: '{"state": true, "status": "active", "backend_version": "1.0.0", - "lengthQueue": 40}'} - headers: - Access-Control-Allow-Credentials: ['true'] - Access-Control-Allow-Origin: ['https://quantumexperience.mybluemix.net/'] - Cache-Control: ['no-store, no-cache, must-revalidate, proxy-revalidate'] - Connection: [Keep-Alive] - Content-Type: [application/json; charset=utf-8] - Expires: ['0'] - Pragma: [no-cache] - Set-Cookie: dummy_cookie - Strict-Transport-Security: [max-age=86400] - Surrogate-Control: [no-store] - Transfer-Encoding: [chunked] - Vary: ['Origin, Accept-Encoding'] - X-Backside-Transport: [OK OK] - X-Content-Type-Options: [nosniff] - X-Download-Options: [noopen] - X-Frame-Options: [SAMEORIGIN] - X-Xss-Protection: [1; mode=block] - status: {code: 200, message: OK} -- request: - body: null - headers: - Accept: ['*/*'] - Accept-Encoding: ['gzip, deflate'] - Connection: [keep-alive] - method: GET - uri: https://quantumexperience.ng.bluemix.net/api/Backends/v/1?access_token=dummyapiusersloginWithTokenid01 - response: - body: {string: '[{"online_date": "2018-11-06T05:00:00Z", "backend_name": "ibmqx4", - "max_shots": 8192, "coupling_map": [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], - [4, 2]], "simulator": false, "sample_name": "raven", "max_experiments": 75, - "local": false, "backend_version": "1.0.0", "n_qubits": 5, "basis_gates": - ["u1", "u2", "u3", "cx", "id"], "conditional": false, "url": "None", "gates": - [{"qasm_def": "gate id q { U(0,0,0) q; }", "parameters": [], "coupling_map": - [[0], [1], [2], [3], [4]], "name": "id"}, {"qasm_def": "gate u1(lambda) q - { U(0,0,lambda) q; }", "parameters": ["lambda"], "coupling_map": [[0], [1], - [2], [3], [4]], "name": "u1"}, {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) - q; }", "parameters": ["phi", "lambda"], "coupling_map": [[0], [1], [2], [3], - [4]], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) q { U(theta,phi,lambda) - q; }", "parameters": ["theta", "phi", "lambda"], "coupling_map": [[0], [1], - [2], [3], [4]], "name": "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", - "parameters": [], "coupling_map": [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], - [4, 2]], "name": "cx"}], "description": "5 qubit device", "open_pulse": false, - "memory": true, "credits_required": true, "allow_q_object": true, "n_registers": - 1}, {"online_date": "2018-11-06T05:00:00Z", "backend_name": "ibmq_16_melbourne", - "max_shots": 8192, "coupling_map": [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], - [5, 4], [5, 6], [5, 9], [6, 8], [7, 8], [9, 8], [9, 10], [11, 3], [11, 10], - [11, 12], [12, 2], [13, 1], [13, 12]], "simulator": false, "sample_name": - "albatross", "max_experiments": 75, "local": false, "backend_version": "1.0.0", - "n_qubits": 14, "basis_gates": ["u1", "u2", "u3", "cx", "id"], "conditional": - false, "url": "None", "gates": [{"qasm_def": "gate id q { U(0,0,0) q; }", - "parameters": [], "coupling_map": [[0], [1], [2], [3], [4], [5], [6], [7], - [8], [9], [10], [11], [12], [13]], "name": "id"}, {"qasm_def": "gate u1(lambda) - q { U(0,0,lambda) q; }", "parameters": ["lambda"], "coupling_map": [[0], [1], - [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13]], "name": "u1"}, - {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) q; }", "parameters": - ["phi", "lambda"], "coupling_map": [[0], [1], [2], [3], [4], [5], [6], [7], - [8], [9], [10], [11], [12], [13]], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) - q { U(theta,phi,lambda) q; }", "parameters": ["theta", "phi", "lambda"], "coupling_map": - [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13]], - "name": "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", "parameters": [], - "coupling_map": [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], [5, 4], [5, 6], - [5, 9], [6, 8], [7, 8], [9, 8], [9, 10], [11, 3], [11, 10], [11, 12], [12, - 2], [13, 1], [13, 12]], "name": "cx"}], "description": "14 qubit device", - "open_pulse": false, "memory": false, "credits_required": true, "allow_q_object": - true, "n_registers": 1}, {"backend_name": "ibmq_qasm_simulator", "backend_version": - "0.1.547", "simulator": true, "max_experiments": 300, "local": false, "max_shots": - 8192, "n_qubits": 32, "basis_gates": ["u1", "u2", "u3", "cx"], "conditional": - true, "gates": [{"qasm_def": "gate u1(lambda) q { U(0,0,lambda) q; }", "parameters": - ["lambda"], "name": "u1"}, {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) - q; }", "parameters": ["phi", "lambda"], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) - q { U(theta,phi,lambda) q; }", "parameters": ["theta", "phi", "lambda"], "name": - "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", "parameters": [], "name": - "cx"}], "open_pulse": false, "memory": true, "allow_q_object": true}]'} - headers: - Access-Control-Allow-Credentials: ['true'] - Access-Control-Allow-Origin: ['https://quantumexperience.mybluemix.net/'] - Cache-Control: ['no-store, no-cache, must-revalidate, proxy-revalidate'] - Connection: [Keep-Alive] - Content-Type: [application/json; charset=utf-8] - Expires: ['0'] - Pragma: [no-cache] - Set-Cookie: dummy_cookie - Strict-Transport-Security: [max-age=86400] - Surrogate-Control: [no-store] - Transfer-Encoding: [chunked] - Vary: ['Origin, Accept-Encoding'] - X-Backside-Transport: [OK OK] - X-Content-Type-Options: [nosniff] - X-Download-Options: [noopen] - X-Frame-Options: [SAMEORIGIN] - X-Xss-Protection: [1; mode=block] - status: {code: 200, message: OK} -- request: - body: null - headers: - Accept: ['*/*'] - Accept-Encoding: ['gzip, deflate'] - Connection: [keep-alive] - method: GET - uri: https://quantumexperience.ng.bluemix.net/api/Backends/ibmqx4/queue/status - response: - body: {string: '{"state": true, "status": "active", "backend_version": "1.0.0", - "lengthQueue": 42}'} - headers: - Access-Control-Allow-Credentials: ['true'] - Access-Control-Allow-Origin: ['https://quantumexperience.mybluemix.net/'] - Cache-Control: ['no-store, no-cache, must-revalidate, proxy-revalidate'] - Connection: [Keep-Alive] - Content-Type: [application/json; charset=utf-8] - Expires: ['0'] - Pragma: [no-cache] - Set-Cookie: dummy_cookie - Strict-Transport-Security: [max-age=86400] - Surrogate-Control: [no-store] - Transfer-Encoding: [chunked] - Vary: ['Origin, Accept-Encoding'] - X-Backside-Transport: [OK OK] - X-Content-Type-Options: [nosniff] - X-Download-Options: [noopen] - X-Frame-Options: [SAMEORIGIN] - X-Xss-Protection: [1; mode=block] - status: {code: 200, message: OK} -- request: - body: null - headers: - Accept: ['*/*'] - Accept-Encoding: ['gzip, deflate'] - Connection: [keep-alive] - method: GET - uri: https://quantumexperience.ng.bluemix.net/api/Backends/v/1?access_token=dummyapiusersloginWithTokenid01 - response: - body: {string: '[{"online_date": "2018-11-06T05:00:00Z", "backend_name": "ibmqx4", - "max_shots": 8192, "coupling_map": [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], - [4, 2]], "simulator": false, "sample_name": "raven", "max_experiments": 75, - "local": false, "backend_version": "1.0.0", "n_qubits": 5, "basis_gates": - ["u1", "u2", "u3", "cx", "id"], "conditional": false, "url": "None", "gates": - [{"qasm_def": "gate id q { U(0,0,0) q; }", "parameters": [], "coupling_map": - [[0], [1], [2], [3], [4]], "name": "id"}, {"qasm_def": "gate u1(lambda) q - { U(0,0,lambda) q; }", "parameters": ["lambda"], "coupling_map": [[0], [1], - [2], [3], [4]], "name": "u1"}, {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) - q; }", "parameters": ["phi", "lambda"], "coupling_map": [[0], [1], [2], [3], - [4]], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) q { U(theta,phi,lambda) - q; }", "parameters": ["theta", "phi", "lambda"], "coupling_map": [[0], [1], - [2], [3], [4]], "name": "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", - "parameters": [], "coupling_map": [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], - [4, 2]], "name": "cx"}], "description": "5 qubit device", "open_pulse": false, - "memory": true, "credits_required": true, "allow_q_object": true, "n_registers": - 1}, {"online_date": "2018-11-06T05:00:00Z", "backend_name": "ibmq_16_melbourne", - "max_shots": 8192, "coupling_map": [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], - [5, 4], [5, 6], [5, 9], [6, 8], [7, 8], [9, 8], [9, 10], [11, 3], [11, 10], - [11, 12], [12, 2], [13, 1], [13, 12]], "simulator": false, "sample_name": - "albatross", "max_experiments": 75, "local": false, "backend_version": "1.0.0", - "n_qubits": 14, "basis_gates": ["u1", "u2", "u3", "cx", "id"], "conditional": - false, "url": "None", "gates": [{"qasm_def": "gate id q { U(0,0,0) q; }", - "parameters": [], "coupling_map": [[0], [1], [2], [3], [4], [5], [6], [7], - [8], [9], [10], [11], [12], [13]], "name": "id"}, {"qasm_def": "gate u1(lambda) - q { U(0,0,lambda) q; }", "parameters": ["lambda"], "coupling_map": [[0], [1], - [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13]], "name": "u1"}, - {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) q; }", "parameters": - ["phi", "lambda"], "coupling_map": [[0], [1], [2], [3], [4], [5], [6], [7], - [8], [9], [10], [11], [12], [13]], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) - q { U(theta,phi,lambda) q; }", "parameters": ["theta", "phi", "lambda"], "coupling_map": - [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13]], - "name": "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", "parameters": [], - "coupling_map": [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], [5, 4], [5, 6], - [5, 9], [6, 8], [7, 8], [9, 8], [9, 10], [11, 3], [11, 10], [11, 12], [12, - 2], [13, 1], [13, 12]], "name": "cx"}], "description": "14 qubit device", - "open_pulse": false, "memory": false, "credits_required": true, "allow_q_object": - true, "n_registers": 1}, {"backend_name": "ibmq_qasm_simulator", "backend_version": - "0.1.547", "simulator": true, "max_experiments": 300, "local": false, "max_shots": - 8192, "n_qubits": 32, "basis_gates": ["u1", "u2", "u3", "cx"], "conditional": - true, "gates": [{"qasm_def": "gate u1(lambda) q { U(0,0,lambda) q; }", "parameters": - ["lambda"], "name": "u1"}, {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) - q; }", "parameters": ["phi", "lambda"], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) - q { U(theta,phi,lambda) q; }", "parameters": ["theta", "phi", "lambda"], "name": - "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", "parameters": [], "name": - "cx"}], "open_pulse": false, "memory": true, "allow_q_object": true}]'} - headers: - Access-Control-Allow-Credentials: ['true'] - Access-Control-Allow-Origin: ['https://quantumexperience.mybluemix.net/'] - Cache-Control: ['no-store, no-cache, must-revalidate, proxy-revalidate'] - Connection: [Keep-Alive] - Content-Type: [application/json; charset=utf-8] - Expires: ['0'] - Pragma: [no-cache] - Set-Cookie: dummy_cookie - Strict-Transport-Security: [max-age=86400] - Surrogate-Control: [no-store] - Transfer-Encoding: [chunked] - Vary: ['Origin, Accept-Encoding'] - X-Backside-Transport: [OK OK] - X-Content-Type-Options: [nosniff] - X-Download-Options: [noopen] - X-Frame-Options: [SAMEORIGIN] - X-Xss-Protection: [1; mode=block] - status: {code: 200, message: OK} -- request: - body: null - headers: - Accept: ['*/*'] - Accept-Encoding: ['gzip, deflate'] - Connection: [keep-alive] - method: GET - uri: https://quantumexperience.ng.bluemix.net/api/Backends/ibmq_16_melbourne/properties?access_token=dummyapiusersloginWithTokenid01&version=1 - response: - body: {string: '{"backend_name": "ibmq_16_melbourne", "backend_version": "1.0.0", - "qubits": [[{"date": "2019-02-08T07:24:46Z", "unit": "\u00b5s", "name": "T1", - "value": 75.96106177842839}, {"date": "2019-02-08T07:26:00Z", "unit": "\u00b5s", - "name": "T2", "value": 20.8057158459146}, {"date": "2019-02-08T09:22:04Z", - "unit": "GHz", "name": "frequency", "value": 5.1000759392113855}, {"date": - "2019-02-08T07:24:20Z", "unit": "", "name": "readout_error", "value": 0.0867}], - [{"date": "2019-02-08T07:24:46Z", "unit": "\u00b5s", "name": "T1", "value": - 48.35498967351742}, {"date": "2019-02-08T07:27:01Z", "unit": "\u00b5s", "name": - "T2", "value": 106.19700447796879}, {"date": "2019-02-08T09:22:04Z", "unit": - "GHz", "name": "frequency", "value": 5.238659501129794}, {"date": "2019-02-08T07:24:20Z", - "unit": "", "name": "readout_error", "value": 0.07750000000000001}], [{"date": - "2019-02-08T07:24:46Z", "unit": "\u00b5s", "name": "T1", "value": 83.33016929069103}, - {"date": "2019-02-08T07:28:02Z", "unit": "\u00b5s", "name": "T2", "value": - 143.9968400517331}, {"date": "2019-02-08T09:22:04Z", "unit": "GHz", "name": - "frequency", "value": 5.03300771358076}, {"date": "2019-02-08T07:24:20Z", - "unit": "", "name": "readout_error", "value": 0.08389999999999997}], [{"date": - "2019-02-08T07:24:46Z", "unit": "\u00b5s", "name": "T1", "value": 61.35153237405397}, - {"date": "2019-02-08T07:29:05Z", "unit": "\u00b5s", "name": "T2", "value": - 59.591728676307696}, {"date": "2019-02-08T09:22:04Z", "unit": "GHz", "name": - "frequency", "value": 4.896170097816411}, {"date": "2019-02-08T07:24:20Z", - "unit": "", "name": "readout_error", "value": 0.2329}], [{"date": "2019-02-08T07:24:46Z", - "unit": "\u00b5s", "name": "T1", "value": 58.37651453120168}, {"date": "2019-02-08T07:26:00Z", - "unit": "\u00b5s", "name": "T2", "value": 39.26840578288146}, {"date": "2019-02-08T09:22:04Z", - "unit": "GHz", "name": "frequency", "value": 5.0272302387915655}, {"date": - "2019-02-08T07:24:20Z", "unit": "", "name": "readout_error", "value": 0.02300000000000002}], - [{"date": "2019-02-08T07:24:46Z", "unit": "\u00b5s", "name": "T1", "value": - 22.72871799765391}, {"date": "2019-02-08T07:27:01Z", "unit": "\u00b5s", "name": - "T2", "value": 37.79789351697824}, {"date": "2019-02-08T09:22:04Z", "unit": - "GHz", "name": "frequency", "value": 5.067144859702533}, {"date": "2019-02-08T07:24:20Z", - "unit": "", "name": "readout_error", "value": 0.05010000000000003}], [{"date": - "2019-02-07T07:31:24Z", "unit": "\u00b5s", "name": "T1", "value": 55.96731254275887}, - {"date": "2019-02-08T07:28:02Z", "unit": "\u00b5s", "name": "T2", "value": - 50.2573329320607}, {"date": "2019-02-08T09:22:04Z", "unit": "GHz", "name": - "frequency", "value": 4.92380186054606}, {"date": "2019-02-08T07:24:20Z", - "unit": "", "name": "readout_error", "value": 0.03970000000000007}], [{"date": - "2019-02-08T07:24:46Z", "unit": "\u00b5s", "name": "T1", "value": 43.23735783723179}, - {"date": "2019-02-08T07:29:05Z", "unit": "\u00b5s", "name": "T2", "value": - 97.48037449118453}, {"date": "2019-02-08T09:22:04Z", "unit": "GHz", "name": - "frequency", "value": 4.974517320613159}, {"date": "2019-02-08T07:24:20Z", - "unit": "", "name": "readout_error", "value": 0.053300000000000014}], [{"date": - "2019-02-08T07:24:46Z", "unit": "\u00b5s", "name": "T1", "value": 56.847309252980956}, - {"date": "2019-02-08T07:26:00Z", "unit": "\u00b5s", "name": "T2", "value": - 92.92386230529024}, {"date": "2019-02-08T09:22:04Z", "unit": "GHz", "name": - "frequency", "value": 4.7397852983601725}, {"date": "2019-02-08T07:24:20Z", - "unit": "", "name": "readout_error", "value": 0.06499999999999995}], [{"date": - "2019-02-08T07:24:46Z", "unit": "\u00b5s", "name": "T1", "value": 33.31011991217951}, - {"date": "2019-02-08T07:28:02Z", "unit": "\u00b5s", "name": "T2", "value": - 75.07465442414373}, {"date": "2019-02-08T09:22:04Z", "unit": "GHz", "name": - "frequency", "value": 4.963366733569522}, {"date": "2019-02-08T07:24:20Z", - "unit": "", "name": "readout_error", "value": 0.12109999999999999}], [{"date": - "2019-02-08T07:24:46Z", "unit": "\u00b5s", "name": "T1", "value": 40.028390636704025}, - {"date": "2019-02-08T07:27:01Z", "unit": "\u00b5s", "name": "T2", "value": - 49.201566672903326}, {"date": "2019-02-08T09:22:04Z", "unit": "GHz", "name": - "frequency", "value": 4.945087520892438}, {"date": "2019-02-08T07:24:20Z", - "unit": "", "name": "readout_error", "value": 0.05059999999999998}], [{"date": - "2019-02-08T07:24:46Z", "unit": "\u00b5s", "name": "T1", "value": 65.67291748177534}, - {"date": "2019-02-08T07:28:02Z", "unit": "\u00b5s", "name": "T2", "value": - 119.6563074422006}, {"date": "2019-02-08T09:22:04Z", "unit": "GHz", "name": - "frequency", "value": 5.005281828430722}, {"date": "2019-02-08T07:24:20Z", - "unit": "", "name": "readout_error", "value": 0.10749999999999993}], [{"date": - "2019-02-08T07:24:46Z", "unit": "\u00b5s", "name": "T1", "value": 78.93072850344592}, - {"date": "2019-02-08T07:27:01Z", "unit": "\u00b5s", "name": "T2", "value": - 75.83757478264616}, {"date": "2019-02-08T09:22:04Z", "unit": "GHz", "name": - "frequency", "value": 4.760146329316313}, {"date": "2019-02-08T07:24:20Z", - "unit": "", "name": "readout_error", "value": 0.05049999999999999}], [{"date": - "2019-02-08T07:24:46Z", "unit": "\u00b5s", "name": "T1", "value": 27.14948595011101}, - {"date": "2019-02-08T07:26:00Z", "unit": "\u00b5s", "name": "T2", "value": - 53.26526727648976}, {"date": "2019-02-08T09:22:04Z", "unit": "GHz", "name": - "frequency", "value": 4.9684746329685225}, {"date": "2019-02-08T07:24:20Z", - "unit": "", "name": "readout_error", "value": 0.04730000000000001}]], "gates": - [{"parameters": [{"date": "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", - "value": 0}], "qubits": [0], "gate": "u1"}, {"parameters": [{"date": "2019-02-09T07:42:54Z", - "unit": "", "name": "gate_error", "value": 0.001679428505927727}], "qubits": - [0], "gate": "u2"}, {"parameters": [{"date": "2019-02-09T07:42:54Z", "unit": - "", "name": "gate_error", "value": 0.003358857011855454}], "qubits": [0], - "gate": "u3"}, {"parameters": [{"date": "2019-02-09T07:42:54Z", "unit": "", - "name": "gate_error", "value": 0}], "qubits": [1], "gate": "u1"}, {"parameters": - [{"date": "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", "value": - 0.006080477938646023}], "qubits": [1], "gate": "u2"}, {"parameters": [{"date": - "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", "value": 0.012160955877292046}], - "qubits": [1], "gate": "u3"}, {"parameters": [{"date": "2019-02-09T07:42:54Z", - "unit": "", "name": "gate_error", "value": 0}], "qubits": [2], "gate": "u1"}, - {"parameters": [{"date": "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", - "value": 0.004801001301362573}], "qubits": [2], "gate": "u2"}, {"parameters": - [{"date": "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", "value": - 0.009602002602725146}], "qubits": [2], "gate": "u3"}, {"parameters": [{"date": - "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", "value": 0}], "qubits": - [3], "gate": "u1"}, {"parameters": [{"date": "2019-02-09T07:42:54Z", "unit": - "", "name": "gate_error", "value": 0.0019100464965977615}], "qubits": [3], - "gate": "u2"}, {"parameters": [{"date": "2019-02-09T07:42:54Z", "unit": "", - "name": "gate_error", "value": 0.003820092993195523}], "qubits": [3], "gate": - "u3"}, {"parameters": [{"date": "2019-02-09T07:42:54Z", "unit": "", "name": - "gate_error", "value": 0}], "qubits": [4], "gate": "u1"}, {"parameters": [{"date": - "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", "value": 0.0016789859784202}], - "qubits": [4], "gate": "u2"}, {"parameters": [{"date": "2019-02-09T07:42:54Z", - "unit": "", "name": "gate_error", "value": 0.0033579719568404}], "qubits": - [4], "gate": "u3"}, {"parameters": [{"date": "2019-02-09T07:42:54Z", "unit": - "", "name": "gate_error", "value": 0}], "qubits": [5], "gate": "u1"}, {"parameters": - [{"date": "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", "value": - 0.0024183102041727134}], "qubits": [5], "gate": "u2"}, {"parameters": [{"date": - "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", "value": 0.004836620408345427}], - "qubits": [5], "gate": "u3"}, {"parameters": [{"date": "2019-02-09T07:42:54Z", - "unit": "", "name": "gate_error", "value": 0}], "qubits": [6], "gate": "u1"}, - {"parameters": [{"date": "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", - "value": 0.0014604602446032233}], "qubits": [6], "gate": "u2"}, {"parameters": - [{"date": "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", "value": - 0.0029209204892064466}], "qubits": [6], "gate": "u3"}, {"parameters": [{"date": - "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", "value": 0}], "qubits": - [7], "gate": "u1"}, {"parameters": [{"date": "2019-02-09T07:42:54Z", "unit": - "", "name": "gate_error", "value": 0.0031350322639342454}], "qubits": [7], - "gate": "u2"}, {"parameters": [{"date": "2019-02-09T07:42:54Z", "unit": "", - "name": "gate_error", "value": 0.006270064527868491}], "qubits": [7], "gate": - "u3"}, {"parameters": [{"date": "2019-02-09T07:42:54Z", "unit": "", "name": - "gate_error", "value": 0}], "qubits": [8], "gate": "u1"}, {"parameters": [{"date": - "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", "value": 0.0024551154718702173}], - "qubits": [8], "gate": "u2"}, {"parameters": [{"date": "2019-02-09T07:42:54Z", - "unit": "", "name": "gate_error", "value": 0.0049102309437404346}], "qubits": - [8], "gate": "u3"}, {"parameters": [{"date": "2019-02-09T07:42:54Z", "unit": - "", "name": "gate_error", "value": 0}], "qubits": [9], "gate": "u1"}, {"parameters": - [{"date": "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", "value": - 0.003478010867496828}], "qubits": [9], "gate": "u2"}, {"parameters": [{"date": - "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", "value": 0.006956021734993656}], - "qubits": [9], "gate": "u3"}, {"parameters": [{"date": "2019-02-09T07:42:54Z", - "unit": "", "name": "gate_error", "value": 0}], "qubits": [10], "gate": "u1"}, - {"parameters": [{"date": "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", - "value": 0.002305088055136073}], "qubits": [10], "gate": "u2"}, {"parameters": - [{"date": "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", "value": - 0.004610176110272146}], "qubits": [10], "gate": "u3"}, {"parameters": [{"date": - "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", "value": 0}], "qubits": - [11], "gate": "u1"}, {"parameters": [{"date": "2019-02-09T07:42:54Z", "unit": - "", "name": "gate_error", "value": 0.0014647255665026226}], "qubits": [11], - "gate": "u2"}, {"parameters": [{"date": "2019-02-09T07:42:54Z", "unit": "", - "name": "gate_error", "value": 0.0029294511330052453}], "qubits": [11], "gate": - "u3"}, {"parameters": [{"date": "2019-02-09T07:42:54Z", "unit": "", "name": - "gate_error", "value": 0}], "qubits": [12], "gate": "u1"}, {"parameters": - [{"date": "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", "value": - 0.0034429776451645466}], "qubits": [12], "gate": "u2"}, {"parameters": [{"date": - "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", "value": 0.006885955290329093}], - "qubits": [12], "gate": "u3"}, {"parameters": [{"date": "2019-02-09T07:42:54Z", - "unit": "", "name": "gate_error", "value": 0}], "qubits": [13], "gate": "u1"}, - {"parameters": [{"date": "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", - "value": 0.006289145077929659}], "qubits": [13], "gate": "u2"}, {"parameters": - [{"date": "2019-02-09T07:42:54Z", "unit": "", "name": "gate_error", "value": - 0.012578290155859317}], "qubits": [13], "gate": "u3"}, {"parameters": [{"date": - "2019-02-08T08:22:17Z", "unit": "", "name": "gate_error", "value": 0.03992150159559102}], - "qubits": [1, 0], "name": "CX1_0", "gate": "cx"}, {"parameters": [{"date": - "2019-02-08T08:25:30Z", "unit": "", "name": "gate_error", "value": 0.02898713750680537}], - "qubits": [1, 2], "name": "CX1_2", "gate": "cx"}, {"parameters": [{"date": - "2019-02-08T08:29:06Z", "unit": "", "name": "gate_error", "value": 0.03636241333882234}], - "qubits": [2, 3], "name": "CX2_3", "gate": "cx"}, {"parameters": [{"date": - "2019-02-08T08:32:22Z", "unit": "", "name": "gate_error", "value": 0.030334963393280623}], - "qubits": [4, 3], "name": "CX4_3", "gate": "cx"}, {"parameters": [{"date": - "2019-02-08T08:35:36Z", "unit": "", "name": "gate_error", "value": 0.03885429984423519}], - "qubits": [4, 10], "name": "CX4_10", "gate": "cx"}, {"parameters": [{"date": - "2019-02-08T08:38:49Z", "unit": "", "name": "gate_error", "value": 0.05486455397851017}], - "qubits": [5, 4], "name": "CX5_4", "gate": "cx"}, {"parameters": [{"date": - "2019-02-08T08:42:15Z", "unit": "", "name": "gate_error", "value": 0.0637315588872944}], - "qubits": [5, 6], "name": "CX5_6", "gate": "cx"}, {"parameters": [{"date": - "2019-02-08T08:46:04Z", "unit": "", "name": "gate_error", "value": 0.06629596672877747}], - "qubits": [5, 9], "name": "CX5_9", "gate": "cx"}, {"parameters": [{"date": - "2019-02-08T08:49:36Z", "unit": "", "name": "gate_error", "value": 0.038006868775274066}], - "qubits": [6, 8], "name": "CX6_8", "gate": "cx"}, {"parameters": [{"date": - "2019-02-08T08:53:04Z", "unit": "", "name": "gate_error", "value": 0.033951635142591974}], - "qubits": [7, 8], "name": "CX7_8", "gate": "cx"}, {"parameters": [{"date": - "2019-02-08T08:56:16Z", "unit": "", "name": "gate_error", "value": 0.041215682504731455}], - "qubits": [9, 8], "name": "CX9_8", "gate": "cx"}, {"parameters": [{"date": - "2019-02-08T09:00:12Z", "unit": "", "name": "gate_error", "value": 0.046416114232033484}], - "qubits": [9, 10], "name": "CX9_10", "gate": "cx"}, {"parameters": [{"date": - "2019-02-06T09:11:14Z", "unit": "", "name": "gate_error", "value": 0.04309999280284493}], - "qubits": [11, 3], "name": "CX11_3", "gate": "cx"}, {"parameters": [{"date": - "2019-02-08T09:03:39Z", "unit": "", "name": "gate_error", "value": 0.034499732994308}], - "qubits": [11, 10], "name": "CX11_10", "gate": "cx"}, {"parameters": [{"date": - "2019-02-08T09:06:56Z", "unit": "", "name": "gate_error", "value": 0.0574353496208172}], - "qubits": [11, 12], "name": "CX11_12", "gate": "cx"}, {"parameters": [{"date": - "2019-02-08T09:13:31Z", "unit": "", "name": "gate_error", "value": 0.05926881901365755}], - "qubits": [12, 2], "name": "CX12_2", "gate": "cx"}, {"parameters": [{"date": - "2019-02-08T09:18:08Z", "unit": "", "name": "gate_error", "value": 0.1110388544989424}], - "qubits": [13, 1], "name": "CX13_1", "gate": "cx"}, {"parameters": [{"date": - "2019-02-08T09:22:04Z", "unit": "", "name": "gate_error", "value": 0.04073863017965598}], - "qubits": [13, 12], "name": "CX13_12", "gate": "cx"}], "last_update_date": - "2019-02-08T09:22:04.000Z", "general": []}'} - headers: - Access-Control-Allow-Credentials: ['true'] - Access-Control-Allow-Origin: ['https://quantumexperience.mybluemix.net/'] - Cache-Control: ['no-store, no-cache, must-revalidate, proxy-revalidate'] - Connection: [Keep-Alive] - Content-Type: [application/json; charset=utf-8] - Expires: ['0'] - Pragma: [no-cache] - Set-Cookie: dummy_cookie - Strict-Transport-Security: [max-age=86400] - Surrogate-Control: [no-store] - Transfer-Encoding: [chunked] - Vary: ['Origin, Accept-Encoding'] - X-Backside-Transport: [OK OK] - X-Content-Type-Options: [nosniff] - X-Download-Options: [noopen] - X-Frame-Options: [SAMEORIGIN] - X-Xss-Protection: [1; mode=block] - status: {code: 200, message: OK} -- request: - body: null - headers: - Accept: ['*/*'] - Accept-Encoding: ['gzip, deflate'] - Connection: [keep-alive] - method: GET - uri: https://quantumexperience.ng.bluemix.net/api/Backends/v/1?access_token=dummyapiusersloginWithTokenid01 - response: - body: {string: '[{"online_date": "2018-11-06T05:00:00Z", "backend_name": "ibmqx4", - "max_shots": 8192, "coupling_map": [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], - [4, 2]], "simulator": false, "sample_name": "raven", "max_experiments": 75, - "local": false, "backend_version": "1.0.0", "n_qubits": 5, "basis_gates": - ["u1", "u2", "u3", "cx", "id"], "conditional": false, "url": "None", "gates": - [{"qasm_def": "gate id q { U(0,0,0) q; }", "parameters": [], "coupling_map": - [[0], [1], [2], [3], [4]], "name": "id"}, {"qasm_def": "gate u1(lambda) q - { U(0,0,lambda) q; }", "parameters": ["lambda"], "coupling_map": [[0], [1], - [2], [3], [4]], "name": "u1"}, {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) - q; }", "parameters": ["phi", "lambda"], "coupling_map": [[0], [1], [2], [3], - [4]], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) q { U(theta,phi,lambda) - q; }", "parameters": ["theta", "phi", "lambda"], "coupling_map": [[0], [1], - [2], [3], [4]], "name": "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", - "parameters": [], "coupling_map": [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], - [4, 2]], "name": "cx"}], "description": "5 qubit device", "open_pulse": false, - "memory": true, "credits_required": true, "allow_q_object": true, "n_registers": - 1}, {"online_date": "2018-11-06T05:00:00Z", "backend_name": "ibmq_16_melbourne", - "max_shots": 8192, "coupling_map": [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], - [5, 4], [5, 6], [5, 9], [6, 8], [7, 8], [9, 8], [9, 10], [11, 3], [11, 10], - [11, 12], [12, 2], [13, 1], [13, 12]], "simulator": false, "sample_name": - "albatross", "max_experiments": 75, "local": false, "backend_version": "1.0.0", - "n_qubits": 14, "basis_gates": ["u1", "u2", "u3", "cx", "id"], "conditional": - false, "url": "None", "gates": [{"qasm_def": "gate id q { U(0,0,0) q; }", - "parameters": [], "coupling_map": [[0], [1], [2], [3], [4], [5], [6], [7], - [8], [9], [10], [11], [12], [13]], "name": "id"}, {"qasm_def": "gate u1(lambda) - q { U(0,0,lambda) q; }", "parameters": ["lambda"], "coupling_map": [[0], [1], - [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13]], "name": "u1"}, - {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) q; }", "parameters": - ["phi", "lambda"], "coupling_map": [[0], [1], [2], [3], [4], [5], [6], [7], - [8], [9], [10], [11], [12], [13]], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) - q { U(theta,phi,lambda) q; }", "parameters": ["theta", "phi", "lambda"], "coupling_map": - [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13]], - "name": "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", "parameters": [], - "coupling_map": [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], [5, 4], [5, 6], - [5, 9], [6, 8], [7, 8], [9, 8], [9, 10], [11, 3], [11, 10], [11, 12], [12, - 2], [13, 1], [13, 12]], "name": "cx"}], "description": "14 qubit device", - "open_pulse": false, "memory": false, "credits_required": true, "allow_q_object": - true, "n_registers": 1}, {"backend_name": "ibmq_qasm_simulator", "backend_version": - "0.1.547", "simulator": true, "max_experiments": 300, "local": false, "max_shots": - 8192, "n_qubits": 32, "basis_gates": ["u1", "u2", "u3", "cx"], "conditional": - true, "gates": [{"qasm_def": "gate u1(lambda) q { U(0,0,lambda) q; }", "parameters": - ["lambda"], "name": "u1"}, {"qasm_def": "gate u2(phi,lambda) q { U(pi/2,phi,lambda) - q; }", "parameters": ["phi", "lambda"], "name": "u2"}, {"qasm_def": "u3(theta,phi,lambda) - q { U(theta,phi,lambda) q; }", "parameters": ["theta", "phi", "lambda"], "name": - "u3"}, {"qasm_def": "gate cx q1,q2 { CX q1,q2; }", "parameters": [], "name": - "cx"}], "open_pulse": false, "memory": true, "allow_q_object": true}]'} - headers: - Access-Control-Allow-Credentials: ['true'] - Access-Control-Allow-Origin: ['https://quantumexperience.mybluemix.net/'] - Cache-Control: ['no-store, no-cache, must-revalidate, proxy-revalidate'] - Connection: [Keep-Alive] - Content-Type: [application/json; charset=utf-8] - Expires: ['0'] - Pragma: [no-cache] - Set-Cookie: dummy_cookie - Strict-Transport-Security: [max-age=86400] - Surrogate-Control: [no-store] - Transfer-Encoding: [chunked] - Vary: ['Origin, Accept-Encoding'] - X-Backside-Transport: [OK OK] - X-Content-Type-Options: [nosniff] - X-Download-Options: [noopen] - X-Frame-Options: [SAMEORIGIN] - X-Xss-Protection: [1; mode=block] - status: {code: 200, message: OK} -- request: - body: null - headers: - Accept: ['*/*'] - Accept-Encoding: ['gzip, deflate'] - Connection: [keep-alive] - method: GET - uri: https://quantumexperience.ng.bluemix.net/api/Backends/ibmqx4/properties?access_token=dummyapiusersloginWithTokenid01&version=1 - response: - body: {string: '{"backend_name": "ibmqx4", "backend_version": "1.0.0", "qubits": - [[{"date": "2019-02-05T02:15:59Z", "unit": "\u00b5s", "name": "T1", "value": - 40.23580728497557}, {"date": "2019-02-05T02:16:42Z", "unit": "\u00b5s", "name": - "T2", "value": 28.053796455696148}, {"date": "2019-02-05T02:54:28Z", "unit": - "GHz", "name": "frequency", "value": 5.2498546876946115}, {"date": "2019-02-05T02:15:40Z", - "unit": "", "name": "readout_error", "value": 0.08450000000000002}], [{"date": - "2019-02-04T10:14:24Z", "unit": "\u00b5s", "name": "T1", "value": 67.39179431370326}, - {"date": "2019-02-05T02:17:25Z", "unit": "\u00b5s", "name": "T2", "value": - 10.763705758897595}, {"date": "2019-02-05T02:54:28Z", "unit": "GHz", "name": - "frequency", "value": 5.295772109235333}, {"date": "2019-02-05T02:15:40Z", - "unit": "", "name": "readout_error", "value": 0.07925000000000004}], [{"date": - "2019-02-05T02:15:59Z", "unit": "\u00b5s", "name": "T1", "value": 43.393200515665946}, - {"date": "2019-02-05T02:18:06Z", "unit": "\u00b5s", "name": "T2", "value": - 27.3542760576606}, {"date": "2019-02-05T02:54:28Z", "unit": "GHz", "name": - "frequency", "value": 5.3533332743669355}, {"date": "2019-02-05T02:15:40Z", - "unit": "", "name": "readout_error", "value": 0.03699999999999992}], [{"date": - "2019-02-05T02:15:59Z", "unit": "\u00b5s", "name": "T1", "value": 55.541546751266985}, - {"date": "2019-02-05T02:17:25Z", "unit": "\u00b5s", "name": "T2", "value": - 14.846271380938276}, {"date": "2019-02-05T02:54:28Z", "unit": "GHz", "name": - "frequency", "value": 5.434936834513384}, {"date": "2019-02-05T02:15:40Z", - "unit": "", "name": "readout_error", "value": 0.03875000000000006}], [{"date": - "2019-02-05T02:15:59Z", "unit": "\u00b5s", "name": "T1", "value": 53.860484623965284}, - {"date": "2019-02-05T02:16:42Z", "unit": "\u00b5s", "name": "T2", "value": - 4.983364732947786}, {"date": "2019-02-05T02:54:28Z", "unit": "GHz", "name": - "frequency", "value": 5.175855462568935}, {"date": "2019-02-05T02:15:40Z", - "unit": "", "name": "readout_error", "value": 0.26875000000000004}]], "gates": - [{"parameters": [{"date": "2019-02-05T10:57:11Z", "unit": "", "name": "gate_error", - "value": 0}], "qubits": [0], "gate": "u1"}, {"parameters": [{"date": "2019-02-05T10:57:11Z", - "unit": "", "name": "gate_error", "value": 0.0006867731322012238}], "qubits": - [0], "gate": "u2"}, {"parameters": [{"date": "2019-02-05T10:57:11Z", "unit": - "", "name": "gate_error", "value": 0.0013735462644024476}], "qubits": [0], - "gate": "u3"}, {"parameters": [{"date": "2019-02-05T10:57:11Z", "unit": "", - "name": "gate_error", "value": 0}], "qubits": [1], "gate": "u1"}, {"parameters": - [{"date": "2019-02-05T10:57:11Z", "unit": "", "name": "gate_error", "value": - 0.00128782749692391}], "qubits": [1], "gate": "u2"}, {"parameters": [{"date": - "2019-02-05T10:57:11Z", "unit": "", "name": "gate_error", "value": 0.00257565499384782}], - "qubits": [1], "gate": "u3"}, {"parameters": [{"date": "2019-02-05T10:57:11Z", - "unit": "", "name": "gate_error", "value": 0}], "qubits": [2], "gate": "u1"}, - {"parameters": [{"date": "2019-02-05T10:57:11Z", "unit": "", "name": "gate_error", - "value": 0.00128782749692391}], "qubits": [2], "gate": "u2"}, {"parameters": - [{"date": "2019-02-05T10:57:11Z", "unit": "", "name": "gate_error", "value": - 0.00257565499384782}], "qubits": [2], "gate": "u3"}, {"parameters": [{"date": - "2019-02-05T10:57:11Z", "unit": "", "name": "gate_error", "value": 0}], "qubits": - [3], "gate": "u1"}, {"parameters": [{"date": "2019-02-05T10:57:11Z", "unit": - "", "name": "gate_error", "value": 0.001803112096824766}], "qubits": [3], - "gate": "u2"}, {"parameters": [{"date": "2019-02-05T10:57:11Z", "unit": "", - "name": "gate_error", "value": 0.003606224193649532}], "qubits": [3], "gate": - "u3"}, {"parameters": [{"date": "2019-02-05T10:57:11Z", "unit": "", "name": - "gate_error", "value": 0}], "qubits": [4], "gate": "u1"}, {"parameters": [{"date": - "2019-02-05T10:57:11Z", "unit": "", "name": "gate_error", "value": 0.006444645993361475}], - "qubits": [4], "gate": "u2"}, {"parameters": [{"date": "2019-02-05T10:57:11Z", - "unit": "", "name": "gate_error", "value": 0.01288929198672295}], "qubits": - [4], "gate": "u3"}, {"parameters": [{"date": "2019-02-05T02:25:32Z", "unit": - "", "name": "gate_error", "value": 0.03594617578113263}], "qubits": [1, 0], - "name": "CX1_0", "gate": "cx"}, {"parameters": [{"date": "2019-02-05T02:31:04Z", - "unit": "", "name": "gate_error", "value": 0.03205473341614962}], "qubits": - [2, 0], "name": "CX2_0", "gate": "cx"}, {"parameters": [{"date": "2019-02-05T02:36:21Z", - "unit": "", "name": "gate_error", "value": 0.048500617566183984}], "qubits": - [2, 1], "name": "CX2_1", "gate": "cx"}, {"parameters": [{"date": "2019-02-05T02:41:40Z", - "unit": "", "name": "gate_error", "value": 0.07474221943376097}], "qubits": - [3, 2], "name": "CX3_2", "gate": "cx"}, {"parameters": [{"date": "2019-02-05T02:47:44Z", - "unit": "", "name": "gate_error", "value": 0.07660114123887399}], "qubits": - [3, 4], "name": "CX3_4", "gate": "cx"}, {"parameters": [{"date": "2019-02-04T10:53:35Z", - "unit": "", "name": "gate_error", "value": 0.06824929220587475}], "qubits": - [4, 2], "name": "CX4_2", "gate": "cx"}], "last_update_date": "2019-02-05T02:54:28.000Z", - "general": []}'} - headers: - Access-Control-Allow-Credentials: ['true'] - Access-Control-Allow-Origin: ['https://quantumexperience.mybluemix.net/'] - Cache-Control: ['no-store, no-cache, must-revalidate, proxy-revalidate'] - Connection: [Keep-Alive] - Content-Type: [application/json; charset=utf-8] - Expires: ['0'] - Pragma: [no-cache] - Set-Cookie: dummy_cookie - Strict-Transport-Security: [max-age=86400] - Surrogate-Control: [no-store] - Transfer-Encoding: [chunked] - Vary: ['Origin, Accept-Encoding'] - X-Backside-Transport: [OK OK] - X-Content-Type-Options: [nosniff] - X-Download-Options: [noopen] - X-Frame-Options: [SAMEORIGIN] - X-Xss-Protection: [1; mode=block] - status: {code: 200, message: OK} -version: 1 diff --git a/test/python/compiler/test_transpiler.py b/test/python/compiler/test_transpiler.py index d0b26f63c358..1f4939656d1e 100644 --- a/test/python/compiler/test_transpiler.py +++ b/test/python/compiler/test_transpiler.py @@ -16,6 +16,7 @@ import math import unittest +from unittest.mock import patch from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit from qiskit import BasicAer @@ -33,8 +34,6 @@ class TestTranspile(QiskitTestCase): """Test transpile function.""" - barrier_pass = BarrierBeforeFinalMeasurements() - def test_pass_manager_none(self): """Test passing the default (None) pass manager to the transpiler. @@ -430,16 +429,16 @@ def test_parameterized_circuit_for_device(self): self.assertEqual(expected_qc, transpiled_qc) - @unittest.mock.patch.object(BarrierBeforeFinalMeasurements, 'run', wraps=barrier_pass.run) - def test_final_measurement_barrier_for_devices(self, mock_pass): + def test_final_measurement_barrier_for_devices(self): """Verify BarrierBeforeFinalMeasurements pass is called in default pipeline for devices.""" circ = QuantumCircuit.from_qasm_file(self._get_resource_path('example.qasm', Path.QASMS)) layout = Layout.generate_trivial_layout(*circ.qregs) - transpile(circ, coupling_map=FakeRueschlikon().configuration().coupling_map, - initial_layout=layout) - - self.assertTrue(mock_pass.called) + orig_pass = BarrierBeforeFinalMeasurements() + with patch.object(BarrierBeforeFinalMeasurements, 'run', wraps=orig_pass.run) as mock_pass: + transpile(circ, coupling_map=FakeRueschlikon().configuration().coupling_map, + initial_layout=layout) + self.assertTrue(mock_pass.called) def test_do_not_run_cxdirection_with_symmetric_cm(self): """When the coupling map is symmetric, do not run CXDirection.""" @@ -451,8 +450,8 @@ def test_do_not_run_cxdirection_with_symmetric_cm(self): coupling_map.append([node1, node2]) coupling_map.append([node2, node1]) - cxdir_pass = CXDirection(CouplingMap(coupling_map)) - with unittest.mock.patch.object(CXDirection, 'run', wraps=cxdir_pass.run) as mock_pass: + orig_pass = CXDirection(CouplingMap(coupling_map)) + with patch.object(CXDirection, 'run', wraps=orig_pass.run) as mock_pass: transpile(circ, coupling_map=coupling_map, initial_layout=layout) self.assertFalse(mock_pass.called) diff --git a/test/python/tools/jupyter/test_notebooks.py b/test/python/tools/jupyter/test_notebooks.py index 72176874b02c..f985e383e9e4 100644 --- a/test/python/tools/jupyter/test_notebooks.py +++ b/test/python/tools/jupyter/test_notebooks.py @@ -20,7 +20,7 @@ import nbformat from nbconvert.preprocessors import ExecutePreprocessor from qiskit.tools.visualization import HAS_MATPLOTLIB -from qiskit.test import (Path, QiskitTestCase, requires_qe_access, slow_test) +from qiskit.test import (Path, QiskitTestCase, online_test, slow_test) # Timeout (in seconds) for a single notebook. @@ -64,7 +64,7 @@ def test_jupyter_jobs_pbars(self): 'notebooks/test_pbar_status.ipynb')) @unittest.skipIf(not HAS_MATPLOTLIB, 'matplotlib not available.') - @requires_qe_access + @online_test @slow_test def test_backend_tools(self, qe_token, qe_url): """Test Jupyter backend tools.""" diff --git a/test/python/tools/monitor/test_backend_monitor.py b/test/python/tools/monitor/test_backend_monitor.py index d17a14f95633..cb0c6b92e862 100644 --- a/test/python/tools/monitor/test_backend_monitor.py +++ b/test/python/tools/monitor/test_backend_monitor.py @@ -19,17 +19,13 @@ from io import StringIO from qiskit.tools.monitor import backend_overview, backend_monitor -from qiskit.test import QiskitTestCase, requires_qe_access -from qiskit.util import _has_connection -# Check if internet connection exists -HAS_NET_CONNECTION = _has_connection('qiskit.org', 443) +from qiskit.test import QiskitTestCase, online_test class TestBackendOverview(QiskitTestCase): """Tools test case.""" - @unittest.skipIf(not HAS_NET_CONNECTION, "requries internet connection.") - @requires_qe_access + @online_test def test_backend_overview(self, qe_token, qe_url): """Test backend_overview""" from qiskit import IBMQ # pylint: disable: import-error @@ -42,8 +38,7 @@ def test_backend_overview(self, qe_token, qe_url): self.assertIn('Avg. T1:', stdout) self.assertIn('Num. Qubits:', stdout) - @unittest.skipIf(not HAS_NET_CONNECTION, "requries internet connection.") - @requires_qe_access + @online_test def test_backend_monitor(self, qe_token, qe_url): """Test backend_monitor""" from qiskit import IBMQ # pylint: disable: import-error diff --git a/tox.ini b/tox.ini index b546a4dcfa1c..ac2dcb16792f 100644 --- a/tox.ini +++ b/tox.ini @@ -17,14 +17,6 @@ deps = -r{toxinidir}/requirements.txt commands = stestr run {posargs} -[testenv:online-mock] -setenv = {[testenv]setenv} - QISKIT_TESTS=mock_online - -[testenv:recording] -setenv = {[testenv]setenv} - QISKIT_TESTS=rec - [testenv:lint] commands = pycodestyle --max-line-length=100 qiskit test
scrapy__scrapy-2929
LinkExtractor is not ignoring .m4v extension (video) by default By chance I found out that LinkExtractor is not ignoring the video extension m4v in the same way it is ignoring other video formats. https://en.wikipedia.org/wiki/M4V
[ { "content": "\"\"\"\nscrapy.linkextractors\n\nThis package contains a collection of Link Extractors.\n\nFor more info see docs/topics/link-extractors.rst\n\"\"\"\nimport re\n\nfrom six.moves.urllib.parse import urlparse\nfrom parsel.csstranslator import HTMLTranslator\nfrom w3lib.url import canonicalize_url\n\nfrom scrapy.utils.misc import arg_to_iter\nfrom scrapy.utils.url import (\n url_is_from_any_domain, url_has_any_extension,\n)\n\n\n# common file extensions that are not followed if they occur in links\nIGNORED_EXTENSIONS = [\n # images\n 'mng', 'pct', 'bmp', 'gif', 'jpg', 'jpeg', 'png', 'pst', 'psp', 'tif',\n 'tiff', 'ai', 'drw', 'dxf', 'eps', 'ps', 'svg',\n\n # audio\n 'mp3', 'wma', 'ogg', 'wav', 'ra', 'aac', 'mid', 'au', 'aiff',\n\n # video\n '3gp', 'asf', 'asx', 'avi', 'mov', 'mp4', 'mpg', 'qt', 'rm', 'swf', 'wmv',\n 'm4a',\n\n # office suites\n 'xls', 'xlsx', 'ppt', 'pptx', 'pps', 'doc', 'docx', 'odt', 'ods', 'odg',\n 'odp',\n\n # other\n 'css', 'pdf', 'exe', 'bin', 'rss', 'zip', 'rar',\n]\n\n\n_re_type = type(re.compile(\"\", 0))\n_matches = lambda url, regexs: any(r.search(url) for r in regexs)\n_is_valid_url = lambda url: url.split('://', 1)[0] in {'http', 'https', 'file'}\n\n\nclass FilteringLinkExtractor(object):\n\n _csstranslator = HTMLTranslator()\n\n def __init__(self, link_extractor, allow, deny, allow_domains, deny_domains,\n restrict_xpaths, canonicalize, deny_extensions, restrict_css):\n\n self.link_extractor = link_extractor\n\n self.allow_res = [x if isinstance(x, _re_type) else re.compile(x)\n for x in arg_to_iter(allow)]\n self.deny_res = [x if isinstance(x, _re_type) else re.compile(x)\n for x in arg_to_iter(deny)]\n\n self.allow_domains = set(arg_to_iter(allow_domains))\n self.deny_domains = set(arg_to_iter(deny_domains))\n\n self.restrict_xpaths = tuple(arg_to_iter(restrict_xpaths))\n self.restrict_xpaths += tuple(map(self._csstranslator.css_to_xpath,\n arg_to_iter(restrict_css)))\n\n self.canonicalize = canonicalize\n if deny_extensions is None:\n deny_extensions = IGNORED_EXTENSIONS\n self.deny_extensions = {'.' + e for e in arg_to_iter(deny_extensions)}\n\n def _link_allowed(self, link):\n if not _is_valid_url(link.url):\n return False\n if self.allow_res and not _matches(link.url, self.allow_res):\n return False\n if self.deny_res and _matches(link.url, self.deny_res):\n return False\n parsed_url = urlparse(link.url)\n if self.allow_domains and not url_is_from_any_domain(parsed_url, self.allow_domains):\n return False\n if self.deny_domains and url_is_from_any_domain(parsed_url, self.deny_domains):\n return False\n if self.deny_extensions and url_has_any_extension(parsed_url, self.deny_extensions):\n return False\n return True\n\n def matches(self, url):\n\n if self.allow_domains and not url_is_from_any_domain(url, self.allow_domains):\n return False\n if self.deny_domains and url_is_from_any_domain(url, self.deny_domains):\n return False\n\n allowed = (regex.search(url) for regex in self.allow_res) if self.allow_res else [True]\n denied = (regex.search(url) for regex in self.deny_res) if self.deny_res else []\n return any(allowed) and not any(denied)\n\n def _process_links(self, links):\n links = [x for x in links if self._link_allowed(x)]\n if self.canonicalize:\n for link in links:\n link.url = canonicalize_url(link.url)\n links = self.link_extractor._process_links(links)\n return links\n\n def _extract_links(self, *args, **kwargs):\n return self.link_extractor._extract_links(*args, **kwargs)\n\n\n# Top-level imports\nfrom .lxmlhtml import LxmlLinkExtractor as LinkExtractor\n", "path": "scrapy/linkextractors/__init__.py" } ]
[ { "content": "\"\"\"\nscrapy.linkextractors\n\nThis package contains a collection of Link Extractors.\n\nFor more info see docs/topics/link-extractors.rst\n\"\"\"\nimport re\n\nfrom six.moves.urllib.parse import urlparse\nfrom parsel.csstranslator import HTMLTranslator\nfrom w3lib.url import canonicalize_url\n\nfrom scrapy.utils.misc import arg_to_iter\nfrom scrapy.utils.url import (\n url_is_from_any_domain, url_has_any_extension,\n)\n\n\n# common file extensions that are not followed if they occur in links\nIGNORED_EXTENSIONS = [\n # images\n 'mng', 'pct', 'bmp', 'gif', 'jpg', 'jpeg', 'png', 'pst', 'psp', 'tif',\n 'tiff', 'ai', 'drw', 'dxf', 'eps', 'ps', 'svg',\n\n # audio\n 'mp3', 'wma', 'ogg', 'wav', 'ra', 'aac', 'mid', 'au', 'aiff',\n\n # video\n '3gp', 'asf', 'asx', 'avi', 'mov', 'mp4', 'mpg', 'qt', 'rm', 'swf', 'wmv',\n 'm4a', 'm4v',\n\n # office suites\n 'xls', 'xlsx', 'ppt', 'pptx', 'pps', 'doc', 'docx', 'odt', 'ods', 'odg',\n 'odp',\n\n # other\n 'css', 'pdf', 'exe', 'bin', 'rss', 'zip', 'rar',\n]\n\n\n_re_type = type(re.compile(\"\", 0))\n_matches = lambda url, regexs: any(r.search(url) for r in regexs)\n_is_valid_url = lambda url: url.split('://', 1)[0] in {'http', 'https', 'file'}\n\n\nclass FilteringLinkExtractor(object):\n\n _csstranslator = HTMLTranslator()\n\n def __init__(self, link_extractor, allow, deny, allow_domains, deny_domains,\n restrict_xpaths, canonicalize, deny_extensions, restrict_css):\n\n self.link_extractor = link_extractor\n\n self.allow_res = [x if isinstance(x, _re_type) else re.compile(x)\n for x in arg_to_iter(allow)]\n self.deny_res = [x if isinstance(x, _re_type) else re.compile(x)\n for x in arg_to_iter(deny)]\n\n self.allow_domains = set(arg_to_iter(allow_domains))\n self.deny_domains = set(arg_to_iter(deny_domains))\n\n self.restrict_xpaths = tuple(arg_to_iter(restrict_xpaths))\n self.restrict_xpaths += tuple(map(self._csstranslator.css_to_xpath,\n arg_to_iter(restrict_css)))\n\n self.canonicalize = canonicalize\n if deny_extensions is None:\n deny_extensions = IGNORED_EXTENSIONS\n self.deny_extensions = {'.' + e for e in arg_to_iter(deny_extensions)}\n\n def _link_allowed(self, link):\n if not _is_valid_url(link.url):\n return False\n if self.allow_res and not _matches(link.url, self.allow_res):\n return False\n if self.deny_res and _matches(link.url, self.deny_res):\n return False\n parsed_url = urlparse(link.url)\n if self.allow_domains and not url_is_from_any_domain(parsed_url, self.allow_domains):\n return False\n if self.deny_domains and url_is_from_any_domain(parsed_url, self.deny_domains):\n return False\n if self.deny_extensions and url_has_any_extension(parsed_url, self.deny_extensions):\n return False\n return True\n\n def matches(self, url):\n\n if self.allow_domains and not url_is_from_any_domain(url, self.allow_domains):\n return False\n if self.deny_domains and url_is_from_any_domain(url, self.deny_domains):\n return False\n\n allowed = (regex.search(url) for regex in self.allow_res) if self.allow_res else [True]\n denied = (regex.search(url) for regex in self.deny_res) if self.deny_res else []\n return any(allowed) and not any(denied)\n\n def _process_links(self, links):\n links = [x for x in links if self._link_allowed(x)]\n if self.canonicalize:\n for link in links:\n link.url = canonicalize_url(link.url)\n links = self.link_extractor._process_links(links)\n return links\n\n def _extract_links(self, *args, **kwargs):\n return self.link_extractor._extract_links(*args, **kwargs)\n\n\n# Top-level imports\nfrom .lxmlhtml import LxmlLinkExtractor as LinkExtractor\n", "path": "scrapy/linkextractors/__init__.py" } ]
diff --git a/scrapy/linkextractors/__init__.py b/scrapy/linkextractors/__init__.py index 8676c3b926d..2d7115cc504 100644 --- a/scrapy/linkextractors/__init__.py +++ b/scrapy/linkextractors/__init__.py @@ -28,7 +28,7 @@ # video '3gp', 'asf', 'asx', 'avi', 'mov', 'mp4', 'mpg', 'qt', 'rm', 'swf', 'wmv', - 'm4a', + 'm4a', 'm4v', # office suites 'xls', 'xlsx', 'ppt', 'pptx', 'pps', 'doc', 'docx', 'odt', 'ods', 'odg',
open-mmlab__mmocr-663
locals() should not be modified ```python args = locals() [args.pop(x, None) for x in ['kwargs', 'self']] ``` https://github.com/open-mmlab/mmocr/blob/b04775fd78ac89e32d38ef6fbd5493dedbfd76f4/mmocr/utils/ocr.py#L414
[ { "content": "#!/usr/bin/env python\n# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport os\nimport warnings\nfrom argparse import ArgumentParser, Namespace\nfrom pathlib import Path\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcv.image.misc import tensor2imgs\nfrom mmcv.runner import load_checkpoint\nfrom mmcv.utils.config import Config\n\nfrom mmocr.apis import init_detector\nfrom mmocr.apis.inference import model_inference\nfrom mmocr.core.visualize import det_recog_show_result\nfrom mmocr.datasets.kie_dataset import KIEDataset\nfrom mmocr.datasets.pipelines.crop import crop_img\nfrom mmocr.models import build_detector\nfrom mmocr.utils.box_util import stitch_boxes_into_lines\nfrom mmocr.utils.fileio import list_from_file\nfrom mmocr.utils.model import revert_sync_batchnorm\n\n\n# Parse CLI arguments\ndef parse_args():\n parser = ArgumentParser()\n parser.add_argument(\n 'img', type=str, help='Input image file or folder path.')\n parser.add_argument(\n '--output',\n type=str,\n default='',\n help='Output file/folder name for visualization')\n parser.add_argument(\n '--det',\n type=str,\n default='PANet_IC15',\n help='Pretrained text detection algorithm')\n parser.add_argument(\n '--det-config',\n type=str,\n default='',\n help='Path to the custom config file of the selected det model. It '\n 'overrides the settings in det')\n parser.add_argument(\n '--det-ckpt',\n type=str,\n default='',\n help='Path to the custom checkpoint file of the selected det model. '\n 'It overrides the settings in det')\n parser.add_argument(\n '--recog',\n type=str,\n default='SEG',\n help='Pretrained text recognition algorithm')\n parser.add_argument(\n '--recog-config',\n type=str,\n default='',\n help='Path to the custom config file of the selected recog model. It'\n 'overrides the settings in recog')\n parser.add_argument(\n '--recog-ckpt',\n type=str,\n default='',\n help='Path to the custom checkpoint file of the selected recog model. '\n 'It overrides the settings in recog')\n parser.add_argument(\n '--kie',\n type=str,\n default='',\n help='Pretrained key information extraction algorithm')\n parser.add_argument(\n '--kie-config',\n type=str,\n default='',\n help='Path to the custom config file of the selected kie model. It'\n 'overrides the settings in kie')\n parser.add_argument(\n '--kie-ckpt',\n type=str,\n default='',\n help='Path to the custom checkpoint file of the selected kie model. '\n 'It overrides the settings in kie')\n parser.add_argument(\n '--config-dir',\n type=str,\n default=os.path.join(str(Path.cwd()), 'configs/'),\n help='Path to the config directory where all the config files '\n 'are located. Defaults to \"configs/\"')\n parser.add_argument(\n '--batch-mode',\n action='store_true',\n help='Whether use batch mode for inference')\n parser.add_argument(\n '--recog-batch-size',\n type=int,\n default=0,\n help='Batch size for text recognition')\n parser.add_argument(\n '--det-batch-size',\n type=int,\n default=0,\n help='Batch size for text detection')\n parser.add_argument(\n '--single-batch-size',\n type=int,\n default=0,\n help='Batch size for separate det/recog inference')\n parser.add_argument(\n '--device', default='cuda:0', help='Device used for inference.')\n parser.add_argument(\n '--export',\n type=str,\n default='',\n help='Folder where the results of each image are exported')\n parser.add_argument(\n '--export-format',\n type=str,\n default='json',\n help='Format of the exported result file(s)')\n parser.add_argument(\n '--details',\n action='store_true',\n help='Whether include the text boxes coordinates and confidence values'\n )\n parser.add_argument(\n '--imshow',\n action='store_true',\n help='Whether show image with OpenCV.')\n parser.add_argument(\n '--print-result',\n action='store_true',\n help='Prints the recognised text')\n parser.add_argument(\n '--merge', action='store_true', help='Merge neighboring boxes')\n parser.add_argument(\n '--merge-xdist',\n type=float,\n default=20,\n help='The maximum x-axis distance to merge boxes')\n args = parser.parse_args()\n if args.det == 'None':\n args.det = None\n if args.recog == 'None':\n args.recog = None\n # Warnings\n if args.merge and not (args.det and args.recog):\n warnings.warn(\n 'Box merging will not work if the script is not'\n ' running in detection + recognition mode.', UserWarning)\n if not os.path.samefile(args.config_dir, os.path.join(str(\n Path.cwd()))) and (args.det_config != ''\n or args.recog_config != ''):\n warnings.warn(\n 'config_dir will be overridden by det-config or recog-config.',\n UserWarning)\n return args\n\n\nclass MMOCR:\n\n def __init__(self,\n det='PANet_IC15',\n det_config='',\n det_ckpt='',\n recog='SEG',\n recog_config='',\n recog_ckpt='',\n kie='',\n kie_config='',\n kie_ckpt='',\n config_dir=os.path.join(str(Path.cwd()), 'configs/'),\n device='cuda:0',\n **kwargs):\n\n textdet_models = {\n 'DB_r18': {\n 'config':\n 'dbnet/dbnet_r18_fpnc_1200e_icdar2015.py',\n 'ckpt':\n 'dbnet/'\n 'dbnet_r18_fpnc_sbn_1200e_icdar2015_20210329-ba3ab597.pth'\n },\n 'DB_r50': {\n 'config':\n 'dbnet/dbnet_r50dcnv2_fpnc_1200e_icdar2015.py',\n 'ckpt':\n 'dbnet/'\n 'dbnet_r50dcnv2_fpnc_sbn_1200e_icdar2015_20211025-9fe3b590.pth'\n },\n 'DRRG': {\n 'config':\n 'drrg/drrg_r50_fpn_unet_1200e_ctw1500.py',\n 'ckpt':\n 'drrg/drrg_r50_fpn_unet_1200e_ctw1500_20211022-fb30b001.pth'\n },\n 'FCE_IC15': {\n 'config':\n 'fcenet/fcenet_r50_fpn_1500e_icdar2015.py',\n 'ckpt':\n 'fcenet/fcenet_r50_fpn_1500e_icdar2015_20211022-daefb6ed.pth'\n },\n 'FCE_CTW_DCNv2': {\n 'config':\n 'fcenet/fcenet_r50dcnv2_fpn_1500e_ctw1500.py',\n 'ckpt':\n 'fcenet/' +\n 'fcenet_r50dcnv2_fpn_1500e_ctw1500_20211022-e326d7ec.pth'\n },\n 'MaskRCNN_CTW': {\n 'config':\n 'maskrcnn/mask_rcnn_r50_fpn_160e_ctw1500.py',\n 'ckpt':\n 'maskrcnn/'\n 'mask_rcnn_r50_fpn_160e_ctw1500_20210219-96497a76.pth'\n },\n 'MaskRCNN_IC15': {\n 'config':\n 'maskrcnn/mask_rcnn_r50_fpn_160e_icdar2015.py',\n 'ckpt':\n 'maskrcnn/'\n 'mask_rcnn_r50_fpn_160e_icdar2015_20210219-8eb340a3.pth'\n },\n 'MaskRCNN_IC17': {\n 'config':\n 'maskrcnn/mask_rcnn_r50_fpn_160e_icdar2017.py',\n 'ckpt':\n 'maskrcnn/'\n 'mask_rcnn_r50_fpn_160e_icdar2017_20210218-c6ec3ebb.pth'\n },\n 'PANet_CTW': {\n 'config':\n 'panet/panet_r18_fpem_ffm_600e_ctw1500.py',\n 'ckpt':\n 'panet/'\n 'panet_r18_fpem_ffm_sbn_600e_ctw1500_20210219-3b3a9aa3.pth'\n },\n 'PANet_IC15': {\n 'config':\n 'panet/panet_r18_fpem_ffm_600e_icdar2015.py',\n 'ckpt':\n 'panet/'\n 'panet_r18_fpem_ffm_sbn_600e_icdar2015_20210219-42dbe46a.pth'\n },\n 'PS_CTW': {\n 'config': 'psenet/psenet_r50_fpnf_600e_ctw1500.py',\n 'ckpt':\n 'psenet/psenet_r50_fpnf_600e_ctw1500_20210401-216fed50.pth'\n },\n 'PS_IC15': {\n 'config':\n 'psenet/psenet_r50_fpnf_600e_icdar2015.py',\n 'ckpt':\n 'psenet/psenet_r50_fpnf_600e_icdar2015_pretrain-eefd8fe6.pth'\n },\n 'TextSnake': {\n 'config':\n 'textsnake/textsnake_r50_fpn_unet_1200e_ctw1500.py',\n 'ckpt':\n 'textsnake/textsnake_r50_fpn_unet_1200e_ctw1500-27f65b64.pth'\n }\n }\n\n textrecog_models = {\n 'CRNN': {\n 'config': 'crnn/crnn_academic_dataset.py',\n 'ckpt': 'crnn/crnn_academic-a723a1c5.pth'\n },\n 'SAR': {\n 'config': 'sar/sar_r31_parallel_decoder_academic.py',\n 'ckpt': 'sar/sar_r31_parallel_decoder_academic-dba3a4a3.pth'\n },\n 'SAR_CN': {\n 'config':\n 'sar/sar_r31_parallel_decoder_chinese.py',\n 'ckpt':\n 'sar/sar_r31_parallel_decoder_chineseocr_20210507-b4be8214.pth'\n },\n 'NRTR_1/16-1/8': {\n 'config': 'nrtr/nrtr_r31_1by16_1by8_academic.py',\n 'ckpt':\n 'nrtr/nrtr_r31_1by16_1by8_academic_20211124-f60cebf4.pth'\n },\n 'NRTR_1/8-1/4': {\n 'config': 'nrtr/nrtr_r31_1by8_1by4_academic.py',\n 'ckpt':\n 'nrtr/nrtr_r31_1by8_1by4_academic_20211123-e1fdb322.pth'\n },\n 'RobustScanner': {\n 'config': 'robust_scanner/robustscanner_r31_academic.py',\n 'ckpt': 'robustscanner/robustscanner_r31_academic-5f05874f.pth'\n },\n 'SATRN': {\n 'config': 'satrn/satrn_academic.py',\n 'ckpt': 'satrn/satrn_academic_20211009-cb8b1580.pth'\n },\n 'SATRN_sm': {\n 'config': 'satrn/satrn_small.py',\n 'ckpt': 'satrn/satrn_small_20211009-2cf13355.pth'\n },\n 'SEG': {\n 'config': 'seg/seg_r31_1by16_fpnocr_academic.py',\n 'ckpt': 'seg/seg_r31_1by16_fpnocr_academic-72235b11.pth'\n },\n 'CRNN_TPS': {\n 'config': 'tps/crnn_tps_academic_dataset.py',\n 'ckpt': 'tps/crnn_tps_academic_dataset_20210510-d221a905.pth'\n }\n }\n\n kie_models = {\n 'SDMGR': {\n 'config': 'sdmgr/sdmgr_unet16_60e_wildreceipt.py',\n 'ckpt':\n 'sdmgr/sdmgr_unet16_60e_wildreceipt_20210520-7489e6de.pth'\n }\n }\n\n self.td = det\n self.tr = recog\n self.kie = kie\n self.device = device\n\n # Check if the det/recog model choice is valid\n if self.td and self.td not in textdet_models:\n raise ValueError(self.td,\n 'is not a supported text detection algorthm')\n elif self.tr and self.tr not in textrecog_models:\n raise ValueError(self.tr,\n 'is not a supported text recognition algorithm')\n elif self.kie:\n if self.kie not in kie_models:\n raise ValueError(\n self.kie, 'is not a supported key information extraction'\n ' algorithm')\n elif not (self.td and self.tr):\n raise NotImplementedError(\n self.kie, 'has to run together'\n ' with text detection and recognition algorithms.')\n\n self.detect_model = None\n if self.td:\n # Build detection model\n if not det_config:\n det_config = os.path.join(config_dir, 'textdet/',\n textdet_models[self.td]['config'])\n if not det_ckpt:\n det_ckpt = 'https://download.openmmlab.com/mmocr/textdet/' + \\\n textdet_models[self.td]['ckpt']\n\n self.detect_model = init_detector(\n det_config, det_ckpt, device=self.device)\n self.detect_model = revert_sync_batchnorm(self.detect_model)\n\n self.recog_model = None\n if self.tr:\n # Build recognition model\n if not recog_config:\n recog_config = os.path.join(\n config_dir, 'textrecog/',\n textrecog_models[self.tr]['config'])\n if not recog_ckpt:\n recog_ckpt = 'https://download.openmmlab.com/mmocr/' + \\\n 'textrecog/' + textrecog_models[self.tr]['ckpt']\n\n self.recog_model = init_detector(\n recog_config, recog_ckpt, device=self.device)\n self.recog_model = revert_sync_batchnorm(self.recog_model)\n\n self.kie_model = None\n if self.kie:\n # Build key information extraction model\n if not kie_config:\n kie_config = os.path.join(config_dir, 'kie/',\n kie_models[self.kie]['config'])\n if not kie_ckpt:\n kie_ckpt = 'https://download.openmmlab.com/mmocr/' + \\\n 'kie/' + kie_models[self.kie]['ckpt']\n\n kie_cfg = Config.fromfile(kie_config)\n self.kie_model = build_detector(\n kie_cfg.model, test_cfg=kie_cfg.get('test_cfg'))\n self.kie_model = revert_sync_batchnorm(self.kie_model)\n self.kie_model.cfg = kie_cfg\n load_checkpoint(self.kie_model, kie_ckpt, map_location=self.device)\n\n # Attribute check\n for model in list(filter(None, [self.recog_model, self.detect_model])):\n if hasattr(model, 'module'):\n model = model.module\n if model.cfg.data.test['type'] == 'ConcatDataset':\n model.cfg.data.test.pipeline = \\\n model.cfg.data.test['datasets'][0].pipeline\n\n def readtext(self,\n img,\n output=None,\n details=False,\n export=None,\n export_format='json',\n batch_mode=False,\n recog_batch_size=0,\n det_batch_size=0,\n single_batch_size=0,\n imshow=False,\n print_result=False,\n merge=False,\n merge_xdist=20,\n **kwargs):\n args = locals()\n [args.pop(x, None) for x in ['kwargs', 'self']]\n args = Namespace(**args)\n\n # Input and output arguments processing\n self._args_processing(args)\n self.args = args\n\n pp_result = None\n\n # Send args and models to the MMOCR model inference API\n # and call post-processing functions for the output\n if self.detect_model and self.recog_model:\n det_recog_result = self.det_recog_kie_inference(\n self.detect_model, self.recog_model, kie_model=self.kie_model)\n pp_result = self.det_recog_pp(det_recog_result)\n else:\n for model in list(\n filter(None, [self.recog_model, self.detect_model])):\n result = self.single_inference(model, args.arrays,\n args.batch_mode,\n args.single_batch_size)\n pp_result = self.single_pp(result, model)\n\n return pp_result\n\n # Post processing function for end2end ocr\n def det_recog_pp(self, result):\n final_results = []\n args = self.args\n for arr, output, export, det_recog_result in zip(\n args.arrays, args.output, args.export, result):\n if output or args.imshow:\n if self.kie_model:\n res_img = det_recog_show_result(arr, det_recog_result)\n else:\n res_img = det_recog_show_result(\n arr, det_recog_result, out_file=output)\n if args.imshow and not self.kie_model:\n mmcv.imshow(res_img, 'inference results')\n if not args.details:\n simple_res = {}\n simple_res['filename'] = det_recog_result['filename']\n simple_res['text'] = [\n x['text'] for x in det_recog_result['result']\n ]\n final_result = simple_res\n else:\n final_result = det_recog_result\n if export:\n mmcv.dump(final_result, export, indent=4)\n if args.print_result:\n print(final_result, end='\\n\\n')\n final_results.append(final_result)\n return final_results\n\n # Post processing function for separate det/recog inference\n def single_pp(self, result, model):\n for arr, output, export, res in zip(self.args.arrays, self.args.output,\n self.args.export, result):\n if export:\n mmcv.dump(res, export, indent=4)\n if output or self.args.imshow:\n res_img = model.show_result(arr, res, out_file=output)\n if self.args.imshow:\n mmcv.imshow(res_img, 'inference results')\n if self.args.print_result:\n print(res, end='\\n\\n')\n return result\n\n def generate_kie_labels(self, result, boxes, class_list):\n idx_to_cls = {}\n if class_list is not None:\n for line in list_from_file(class_list):\n class_idx, class_label = line.strip().split()\n idx_to_cls[class_idx] = class_label\n\n max_value, max_idx = torch.max(result['nodes'].detach().cpu(), -1)\n node_pred_label = max_idx.numpy().tolist()\n node_pred_score = max_value.numpy().tolist()\n labels = []\n for i in range(len(boxes)):\n pred_label = str(node_pred_label[i])\n if pred_label in idx_to_cls:\n pred_label = idx_to_cls[pred_label]\n pred_score = node_pred_score[i]\n labels.append((pred_label, pred_score))\n return labels\n\n def visualize_kie_output(self,\n model,\n data,\n result,\n out_file=None,\n show=False):\n \"\"\"Visualizes KIE output.\"\"\"\n img_tensor = data['img'].data\n img_meta = data['img_metas'].data\n gt_bboxes = data['gt_bboxes'].data.numpy().tolist()\n if img_tensor.dtype == torch.uint8:\n # The img tensor is the raw input not being normalized\n # (For SDMGR non-visual)\n img = img_tensor.cpu().numpy().transpose(1, 2, 0)\n else:\n img = tensor2imgs(\n img_tensor.unsqueeze(0), **img_meta.get('img_norm_cfg', {}))[0]\n h, w, _ = img_meta.get('img_shape', img.shape)\n img_show = img[:h, :w, :]\n model.show_result(\n img_show, result, gt_bboxes, show=show, out_file=out_file)\n\n # End2end ocr inference pipeline\n def det_recog_kie_inference(self, det_model, recog_model, kie_model=None):\n end2end_res = []\n # Find bounding boxes in the images (text detection)\n det_result = self.single_inference(det_model, self.args.arrays,\n self.args.batch_mode,\n self.args.det_batch_size)\n bboxes_list = [res['boundary_result'] for res in det_result]\n\n if kie_model:\n kie_dataset = KIEDataset(\n dict_file=kie_model.cfg.data.test.dict_file)\n\n # For each bounding box, the image is cropped and\n # sent to the recognition model either one by one\n # or all together depending on the batch_mode\n for filename, arr, bboxes, out_file in zip(self.args.filenames,\n self.args.arrays,\n bboxes_list,\n self.args.output):\n img_e2e_res = {}\n img_e2e_res['filename'] = filename\n img_e2e_res['result'] = []\n box_imgs = []\n for bbox in bboxes:\n box_res = {}\n box_res['box'] = [round(x) for x in bbox[:-1]]\n box_res['box_score'] = float(bbox[-1])\n box = bbox[:8]\n if len(bbox) > 9:\n min_x = min(bbox[0:-1:2])\n min_y = min(bbox[1:-1:2])\n max_x = max(bbox[0:-1:2])\n max_y = max(bbox[1:-1:2])\n box = [\n min_x, min_y, max_x, min_y, max_x, max_y, min_x, max_y\n ]\n box_img = crop_img(arr, box)\n if self.args.batch_mode:\n box_imgs.append(box_img)\n else:\n recog_result = model_inference(recog_model, box_img)\n text = recog_result['text']\n text_score = recog_result['score']\n if isinstance(text_score, list):\n text_score = sum(text_score) / max(1, len(text))\n box_res['text'] = text\n box_res['text_score'] = text_score\n img_e2e_res['result'].append(box_res)\n\n if self.args.batch_mode:\n recog_results = self.single_inference(\n recog_model, box_imgs, True, self.args.recog_batch_size)\n for i, recog_result in enumerate(recog_results):\n text = recog_result['text']\n text_score = recog_result['score']\n if isinstance(text_score, (list, tuple)):\n text_score = sum(text_score) / max(1, len(text))\n img_e2e_res['result'][i]['text'] = text\n img_e2e_res['result'][i]['text_score'] = text_score\n\n if self.args.merge:\n img_e2e_res['result'] = stitch_boxes_into_lines(\n img_e2e_res['result'], self.args.merge_xdist, 0.5)\n\n if kie_model:\n annotations = copy.deepcopy(img_e2e_res['result'])\n # Customized for kie_dataset, which\n # assumes that boxes are represented by only 4 points\n for i, ann in enumerate(annotations):\n min_x = min(ann['box'][::2])\n min_y = min(ann['box'][1::2])\n max_x = max(ann['box'][::2])\n max_y = max(ann['box'][1::2])\n annotations[i]['box'] = [\n min_x, min_y, max_x, min_y, max_x, max_y, min_x, max_y\n ]\n ann_info = kie_dataset._parse_anno_info(annotations)\n ann_info['ori_bboxes'] = ann_info.get('ori_bboxes',\n ann_info['bboxes'])\n ann_info['gt_bboxes'] = ann_info.get('gt_bboxes',\n ann_info['bboxes'])\n kie_result, data = model_inference(\n kie_model,\n arr,\n ann=ann_info,\n return_data=True,\n batch_mode=self.args.batch_mode)\n # visualize KIE results\n self.visualize_kie_output(\n kie_model,\n data,\n kie_result,\n out_file=out_file,\n show=self.args.imshow)\n gt_bboxes = data['gt_bboxes'].data.numpy().tolist()\n labels = self.generate_kie_labels(kie_result, gt_bboxes,\n kie_model.class_list)\n for i in range(len(gt_bboxes)):\n img_e2e_res['result'][i]['label'] = labels[i][0]\n img_e2e_res['result'][i]['label_score'] = labels[i][1]\n\n end2end_res.append(img_e2e_res)\n return end2end_res\n\n # Separate det/recog inference pipeline\n def single_inference(self, model, arrays, batch_mode, batch_size=0):\n result = []\n if batch_mode:\n if batch_size == 0:\n result = model_inference(model, arrays, batch_mode=True)\n else:\n n = batch_size\n arr_chunks = [\n arrays[i:i + n] for i in range(0, len(arrays), n)\n ]\n for chunk in arr_chunks:\n result.extend(\n model_inference(model, chunk, batch_mode=True))\n else:\n for arr in arrays:\n result.append(model_inference(model, arr, batch_mode=False))\n return result\n\n # Arguments pre-processing function\n def _args_processing(self, args):\n # Check if the input is a list/tuple that\n # contains only np arrays or strings\n if isinstance(args.img, (list, tuple)):\n img_list = args.img\n if not all([isinstance(x, (np.ndarray, str)) for x in args.img]):\n raise AssertionError('Images must be strings or numpy arrays')\n\n # Create a list of the images\n if isinstance(args.img, str):\n img_path = Path(args.img)\n if img_path.is_dir():\n img_list = [str(x) for x in img_path.glob('*')]\n else:\n img_list = [str(img_path)]\n elif isinstance(args.img, np.ndarray):\n img_list = [args.img]\n\n # Read all image(s) in advance to reduce wasted time\n # re-reading the images for visualization output\n args.arrays = [mmcv.imread(x) for x in img_list]\n\n # Create a list of filenames (used for output images and result files)\n if isinstance(img_list[0], str):\n args.filenames = [str(Path(x).stem) for x in img_list]\n else:\n args.filenames = [str(x) for x in range(len(img_list))]\n\n # If given an output argument, create a list of output image filenames\n num_res = len(img_list)\n if args.output:\n output_path = Path(args.output)\n if output_path.is_dir():\n args.output = [\n str(output_path / f'out_{x}.png') for x in args.filenames\n ]\n else:\n args.output = [str(args.output)]\n if args.batch_mode:\n raise AssertionError('Output of multiple images inference'\n ' must be a directory')\n else:\n args.output = [None] * num_res\n\n # If given an export argument, create a list of\n # result filenames for each image\n if args.export:\n export_path = Path(args.export)\n args.export = [\n str(export_path / f'out_{x}.{args.export_format}')\n for x in args.filenames\n ]\n else:\n args.export = [None] * num_res\n\n return args\n\n\n# Create an inference pipeline with parsed arguments\ndef main():\n args = parse_args()\n ocr = MMOCR(**vars(args))\n ocr.readtext(**vars(args))\n\n\nif __name__ == '__main__':\n main()\n", "path": "mmocr/utils/ocr.py" } ]
[ { "content": "#!/usr/bin/env python\n# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport os\nimport warnings\nfrom argparse import ArgumentParser, Namespace\nfrom pathlib import Path\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcv.image.misc import tensor2imgs\nfrom mmcv.runner import load_checkpoint\nfrom mmcv.utils.config import Config\n\nfrom mmocr.apis import init_detector\nfrom mmocr.apis.inference import model_inference\nfrom mmocr.core.visualize import det_recog_show_result\nfrom mmocr.datasets.kie_dataset import KIEDataset\nfrom mmocr.datasets.pipelines.crop import crop_img\nfrom mmocr.models import build_detector\nfrom mmocr.utils.box_util import stitch_boxes_into_lines\nfrom mmocr.utils.fileio import list_from_file\nfrom mmocr.utils.model import revert_sync_batchnorm\n\n\n# Parse CLI arguments\ndef parse_args():\n parser = ArgumentParser()\n parser.add_argument(\n 'img', type=str, help='Input image file or folder path.')\n parser.add_argument(\n '--output',\n type=str,\n default='',\n help='Output file/folder name for visualization')\n parser.add_argument(\n '--det',\n type=str,\n default='PANet_IC15',\n help='Pretrained text detection algorithm')\n parser.add_argument(\n '--det-config',\n type=str,\n default='',\n help='Path to the custom config file of the selected det model. It '\n 'overrides the settings in det')\n parser.add_argument(\n '--det-ckpt',\n type=str,\n default='',\n help='Path to the custom checkpoint file of the selected det model. '\n 'It overrides the settings in det')\n parser.add_argument(\n '--recog',\n type=str,\n default='SEG',\n help='Pretrained text recognition algorithm')\n parser.add_argument(\n '--recog-config',\n type=str,\n default='',\n help='Path to the custom config file of the selected recog model. It'\n 'overrides the settings in recog')\n parser.add_argument(\n '--recog-ckpt',\n type=str,\n default='',\n help='Path to the custom checkpoint file of the selected recog model. '\n 'It overrides the settings in recog')\n parser.add_argument(\n '--kie',\n type=str,\n default='',\n help='Pretrained key information extraction algorithm')\n parser.add_argument(\n '--kie-config',\n type=str,\n default='',\n help='Path to the custom config file of the selected kie model. It'\n 'overrides the settings in kie')\n parser.add_argument(\n '--kie-ckpt',\n type=str,\n default='',\n help='Path to the custom checkpoint file of the selected kie model. '\n 'It overrides the settings in kie')\n parser.add_argument(\n '--config-dir',\n type=str,\n default=os.path.join(str(Path.cwd()), 'configs/'),\n help='Path to the config directory where all the config files '\n 'are located. Defaults to \"configs/\"')\n parser.add_argument(\n '--batch-mode',\n action='store_true',\n help='Whether use batch mode for inference')\n parser.add_argument(\n '--recog-batch-size',\n type=int,\n default=0,\n help='Batch size for text recognition')\n parser.add_argument(\n '--det-batch-size',\n type=int,\n default=0,\n help='Batch size for text detection')\n parser.add_argument(\n '--single-batch-size',\n type=int,\n default=0,\n help='Batch size for separate det/recog inference')\n parser.add_argument(\n '--device', default='cuda:0', help='Device used for inference.')\n parser.add_argument(\n '--export',\n type=str,\n default='',\n help='Folder where the results of each image are exported')\n parser.add_argument(\n '--export-format',\n type=str,\n default='json',\n help='Format of the exported result file(s)')\n parser.add_argument(\n '--details',\n action='store_true',\n help='Whether include the text boxes coordinates and confidence values'\n )\n parser.add_argument(\n '--imshow',\n action='store_true',\n help='Whether show image with OpenCV.')\n parser.add_argument(\n '--print-result',\n action='store_true',\n help='Prints the recognised text')\n parser.add_argument(\n '--merge', action='store_true', help='Merge neighboring boxes')\n parser.add_argument(\n '--merge-xdist',\n type=float,\n default=20,\n help='The maximum x-axis distance to merge boxes')\n args = parser.parse_args()\n if args.det == 'None':\n args.det = None\n if args.recog == 'None':\n args.recog = None\n # Warnings\n if args.merge and not (args.det and args.recog):\n warnings.warn(\n 'Box merging will not work if the script is not'\n ' running in detection + recognition mode.', UserWarning)\n if not os.path.samefile(args.config_dir, os.path.join(str(\n Path.cwd()))) and (args.det_config != ''\n or args.recog_config != ''):\n warnings.warn(\n 'config_dir will be overridden by det-config or recog-config.',\n UserWarning)\n return args\n\n\nclass MMOCR:\n\n def __init__(self,\n det='PANet_IC15',\n det_config='',\n det_ckpt='',\n recog='SEG',\n recog_config='',\n recog_ckpt='',\n kie='',\n kie_config='',\n kie_ckpt='',\n config_dir=os.path.join(str(Path.cwd()), 'configs/'),\n device='cuda:0',\n **kwargs):\n\n textdet_models = {\n 'DB_r18': {\n 'config':\n 'dbnet/dbnet_r18_fpnc_1200e_icdar2015.py',\n 'ckpt':\n 'dbnet/'\n 'dbnet_r18_fpnc_sbn_1200e_icdar2015_20210329-ba3ab597.pth'\n },\n 'DB_r50': {\n 'config':\n 'dbnet/dbnet_r50dcnv2_fpnc_1200e_icdar2015.py',\n 'ckpt':\n 'dbnet/'\n 'dbnet_r50dcnv2_fpnc_sbn_1200e_icdar2015_20211025-9fe3b590.pth'\n },\n 'DRRG': {\n 'config':\n 'drrg/drrg_r50_fpn_unet_1200e_ctw1500.py',\n 'ckpt':\n 'drrg/drrg_r50_fpn_unet_1200e_ctw1500_20211022-fb30b001.pth'\n },\n 'FCE_IC15': {\n 'config':\n 'fcenet/fcenet_r50_fpn_1500e_icdar2015.py',\n 'ckpt':\n 'fcenet/fcenet_r50_fpn_1500e_icdar2015_20211022-daefb6ed.pth'\n },\n 'FCE_CTW_DCNv2': {\n 'config':\n 'fcenet/fcenet_r50dcnv2_fpn_1500e_ctw1500.py',\n 'ckpt':\n 'fcenet/' +\n 'fcenet_r50dcnv2_fpn_1500e_ctw1500_20211022-e326d7ec.pth'\n },\n 'MaskRCNN_CTW': {\n 'config':\n 'maskrcnn/mask_rcnn_r50_fpn_160e_ctw1500.py',\n 'ckpt':\n 'maskrcnn/'\n 'mask_rcnn_r50_fpn_160e_ctw1500_20210219-96497a76.pth'\n },\n 'MaskRCNN_IC15': {\n 'config':\n 'maskrcnn/mask_rcnn_r50_fpn_160e_icdar2015.py',\n 'ckpt':\n 'maskrcnn/'\n 'mask_rcnn_r50_fpn_160e_icdar2015_20210219-8eb340a3.pth'\n },\n 'MaskRCNN_IC17': {\n 'config':\n 'maskrcnn/mask_rcnn_r50_fpn_160e_icdar2017.py',\n 'ckpt':\n 'maskrcnn/'\n 'mask_rcnn_r50_fpn_160e_icdar2017_20210218-c6ec3ebb.pth'\n },\n 'PANet_CTW': {\n 'config':\n 'panet/panet_r18_fpem_ffm_600e_ctw1500.py',\n 'ckpt':\n 'panet/'\n 'panet_r18_fpem_ffm_sbn_600e_ctw1500_20210219-3b3a9aa3.pth'\n },\n 'PANet_IC15': {\n 'config':\n 'panet/panet_r18_fpem_ffm_600e_icdar2015.py',\n 'ckpt':\n 'panet/'\n 'panet_r18_fpem_ffm_sbn_600e_icdar2015_20210219-42dbe46a.pth'\n },\n 'PS_CTW': {\n 'config': 'psenet/psenet_r50_fpnf_600e_ctw1500.py',\n 'ckpt':\n 'psenet/psenet_r50_fpnf_600e_ctw1500_20210401-216fed50.pth'\n },\n 'PS_IC15': {\n 'config':\n 'psenet/psenet_r50_fpnf_600e_icdar2015.py',\n 'ckpt':\n 'psenet/psenet_r50_fpnf_600e_icdar2015_pretrain-eefd8fe6.pth'\n },\n 'TextSnake': {\n 'config':\n 'textsnake/textsnake_r50_fpn_unet_1200e_ctw1500.py',\n 'ckpt':\n 'textsnake/textsnake_r50_fpn_unet_1200e_ctw1500-27f65b64.pth'\n }\n }\n\n textrecog_models = {\n 'CRNN': {\n 'config': 'crnn/crnn_academic_dataset.py',\n 'ckpt': 'crnn/crnn_academic-a723a1c5.pth'\n },\n 'SAR': {\n 'config': 'sar/sar_r31_parallel_decoder_academic.py',\n 'ckpt': 'sar/sar_r31_parallel_decoder_academic-dba3a4a3.pth'\n },\n 'SAR_CN': {\n 'config':\n 'sar/sar_r31_parallel_decoder_chinese.py',\n 'ckpt':\n 'sar/sar_r31_parallel_decoder_chineseocr_20210507-b4be8214.pth'\n },\n 'NRTR_1/16-1/8': {\n 'config': 'nrtr/nrtr_r31_1by16_1by8_academic.py',\n 'ckpt':\n 'nrtr/nrtr_r31_1by16_1by8_academic_20211124-f60cebf4.pth'\n },\n 'NRTR_1/8-1/4': {\n 'config': 'nrtr/nrtr_r31_1by8_1by4_academic.py',\n 'ckpt':\n 'nrtr/nrtr_r31_1by8_1by4_academic_20211123-e1fdb322.pth'\n },\n 'RobustScanner': {\n 'config': 'robust_scanner/robustscanner_r31_academic.py',\n 'ckpt': 'robustscanner/robustscanner_r31_academic-5f05874f.pth'\n },\n 'SATRN': {\n 'config': 'satrn/satrn_academic.py',\n 'ckpt': 'satrn/satrn_academic_20211009-cb8b1580.pth'\n },\n 'SATRN_sm': {\n 'config': 'satrn/satrn_small.py',\n 'ckpt': 'satrn/satrn_small_20211009-2cf13355.pth'\n },\n 'SEG': {\n 'config': 'seg/seg_r31_1by16_fpnocr_academic.py',\n 'ckpt': 'seg/seg_r31_1by16_fpnocr_academic-72235b11.pth'\n },\n 'CRNN_TPS': {\n 'config': 'tps/crnn_tps_academic_dataset.py',\n 'ckpt': 'tps/crnn_tps_academic_dataset_20210510-d221a905.pth'\n }\n }\n\n kie_models = {\n 'SDMGR': {\n 'config': 'sdmgr/sdmgr_unet16_60e_wildreceipt.py',\n 'ckpt':\n 'sdmgr/sdmgr_unet16_60e_wildreceipt_20210520-7489e6de.pth'\n }\n }\n\n self.td = det\n self.tr = recog\n self.kie = kie\n self.device = device\n\n # Check if the det/recog model choice is valid\n if self.td and self.td not in textdet_models:\n raise ValueError(self.td,\n 'is not a supported text detection algorthm')\n elif self.tr and self.tr not in textrecog_models:\n raise ValueError(self.tr,\n 'is not a supported text recognition algorithm')\n elif self.kie:\n if self.kie not in kie_models:\n raise ValueError(\n self.kie, 'is not a supported key information extraction'\n ' algorithm')\n elif not (self.td and self.tr):\n raise NotImplementedError(\n self.kie, 'has to run together'\n ' with text detection and recognition algorithms.')\n\n self.detect_model = None\n if self.td:\n # Build detection model\n if not det_config:\n det_config = os.path.join(config_dir, 'textdet/',\n textdet_models[self.td]['config'])\n if not det_ckpt:\n det_ckpt = 'https://download.openmmlab.com/mmocr/textdet/' + \\\n textdet_models[self.td]['ckpt']\n\n self.detect_model = init_detector(\n det_config, det_ckpt, device=self.device)\n self.detect_model = revert_sync_batchnorm(self.detect_model)\n\n self.recog_model = None\n if self.tr:\n # Build recognition model\n if not recog_config:\n recog_config = os.path.join(\n config_dir, 'textrecog/',\n textrecog_models[self.tr]['config'])\n if not recog_ckpt:\n recog_ckpt = 'https://download.openmmlab.com/mmocr/' + \\\n 'textrecog/' + textrecog_models[self.tr]['ckpt']\n\n self.recog_model = init_detector(\n recog_config, recog_ckpt, device=self.device)\n self.recog_model = revert_sync_batchnorm(self.recog_model)\n\n self.kie_model = None\n if self.kie:\n # Build key information extraction model\n if not kie_config:\n kie_config = os.path.join(config_dir, 'kie/',\n kie_models[self.kie]['config'])\n if not kie_ckpt:\n kie_ckpt = 'https://download.openmmlab.com/mmocr/' + \\\n 'kie/' + kie_models[self.kie]['ckpt']\n\n kie_cfg = Config.fromfile(kie_config)\n self.kie_model = build_detector(\n kie_cfg.model, test_cfg=kie_cfg.get('test_cfg'))\n self.kie_model = revert_sync_batchnorm(self.kie_model)\n self.kie_model.cfg = kie_cfg\n load_checkpoint(self.kie_model, kie_ckpt, map_location=self.device)\n\n # Attribute check\n for model in list(filter(None, [self.recog_model, self.detect_model])):\n if hasattr(model, 'module'):\n model = model.module\n if model.cfg.data.test['type'] == 'ConcatDataset':\n model.cfg.data.test.pipeline = \\\n model.cfg.data.test['datasets'][0].pipeline\n\n def readtext(self,\n img,\n output=None,\n details=False,\n export=None,\n export_format='json',\n batch_mode=False,\n recog_batch_size=0,\n det_batch_size=0,\n single_batch_size=0,\n imshow=False,\n print_result=False,\n merge=False,\n merge_xdist=20,\n **kwargs):\n args = locals().copy()\n [args.pop(x, None) for x in ['kwargs', 'self']]\n args = Namespace(**args)\n\n # Input and output arguments processing\n self._args_processing(args)\n self.args = args\n\n pp_result = None\n\n # Send args and models to the MMOCR model inference API\n # and call post-processing functions for the output\n if self.detect_model and self.recog_model:\n det_recog_result = self.det_recog_kie_inference(\n self.detect_model, self.recog_model, kie_model=self.kie_model)\n pp_result = self.det_recog_pp(det_recog_result)\n else:\n for model in list(\n filter(None, [self.recog_model, self.detect_model])):\n result = self.single_inference(model, args.arrays,\n args.batch_mode,\n args.single_batch_size)\n pp_result = self.single_pp(result, model)\n\n return pp_result\n\n # Post processing function for end2end ocr\n def det_recog_pp(self, result):\n final_results = []\n args = self.args\n for arr, output, export, det_recog_result in zip(\n args.arrays, args.output, args.export, result):\n if output or args.imshow:\n if self.kie_model:\n res_img = det_recog_show_result(arr, det_recog_result)\n else:\n res_img = det_recog_show_result(\n arr, det_recog_result, out_file=output)\n if args.imshow and not self.kie_model:\n mmcv.imshow(res_img, 'inference results')\n if not args.details:\n simple_res = {}\n simple_res['filename'] = det_recog_result['filename']\n simple_res['text'] = [\n x['text'] for x in det_recog_result['result']\n ]\n final_result = simple_res\n else:\n final_result = det_recog_result\n if export:\n mmcv.dump(final_result, export, indent=4)\n if args.print_result:\n print(final_result, end='\\n\\n')\n final_results.append(final_result)\n return final_results\n\n # Post processing function for separate det/recog inference\n def single_pp(self, result, model):\n for arr, output, export, res in zip(self.args.arrays, self.args.output,\n self.args.export, result):\n if export:\n mmcv.dump(res, export, indent=4)\n if output or self.args.imshow:\n res_img = model.show_result(arr, res, out_file=output)\n if self.args.imshow:\n mmcv.imshow(res_img, 'inference results')\n if self.args.print_result:\n print(res, end='\\n\\n')\n return result\n\n def generate_kie_labels(self, result, boxes, class_list):\n idx_to_cls = {}\n if class_list is not None:\n for line in list_from_file(class_list):\n class_idx, class_label = line.strip().split()\n idx_to_cls[class_idx] = class_label\n\n max_value, max_idx = torch.max(result['nodes'].detach().cpu(), -1)\n node_pred_label = max_idx.numpy().tolist()\n node_pred_score = max_value.numpy().tolist()\n labels = []\n for i in range(len(boxes)):\n pred_label = str(node_pred_label[i])\n if pred_label in idx_to_cls:\n pred_label = idx_to_cls[pred_label]\n pred_score = node_pred_score[i]\n labels.append((pred_label, pred_score))\n return labels\n\n def visualize_kie_output(self,\n model,\n data,\n result,\n out_file=None,\n show=False):\n \"\"\"Visualizes KIE output.\"\"\"\n img_tensor = data['img'].data\n img_meta = data['img_metas'].data\n gt_bboxes = data['gt_bboxes'].data.numpy().tolist()\n if img_tensor.dtype == torch.uint8:\n # The img tensor is the raw input not being normalized\n # (For SDMGR non-visual)\n img = img_tensor.cpu().numpy().transpose(1, 2, 0)\n else:\n img = tensor2imgs(\n img_tensor.unsqueeze(0), **img_meta.get('img_norm_cfg', {}))[0]\n h, w, _ = img_meta.get('img_shape', img.shape)\n img_show = img[:h, :w, :]\n model.show_result(\n img_show, result, gt_bboxes, show=show, out_file=out_file)\n\n # End2end ocr inference pipeline\n def det_recog_kie_inference(self, det_model, recog_model, kie_model=None):\n end2end_res = []\n # Find bounding boxes in the images (text detection)\n det_result = self.single_inference(det_model, self.args.arrays,\n self.args.batch_mode,\n self.args.det_batch_size)\n bboxes_list = [res['boundary_result'] for res in det_result]\n\n if kie_model:\n kie_dataset = KIEDataset(\n dict_file=kie_model.cfg.data.test.dict_file)\n\n # For each bounding box, the image is cropped and\n # sent to the recognition model either one by one\n # or all together depending on the batch_mode\n for filename, arr, bboxes, out_file in zip(self.args.filenames,\n self.args.arrays,\n bboxes_list,\n self.args.output):\n img_e2e_res = {}\n img_e2e_res['filename'] = filename\n img_e2e_res['result'] = []\n box_imgs = []\n for bbox in bboxes:\n box_res = {}\n box_res['box'] = [round(x) for x in bbox[:-1]]\n box_res['box_score'] = float(bbox[-1])\n box = bbox[:8]\n if len(bbox) > 9:\n min_x = min(bbox[0:-1:2])\n min_y = min(bbox[1:-1:2])\n max_x = max(bbox[0:-1:2])\n max_y = max(bbox[1:-1:2])\n box = [\n min_x, min_y, max_x, min_y, max_x, max_y, min_x, max_y\n ]\n box_img = crop_img(arr, box)\n if self.args.batch_mode:\n box_imgs.append(box_img)\n else:\n recog_result = model_inference(recog_model, box_img)\n text = recog_result['text']\n text_score = recog_result['score']\n if isinstance(text_score, list):\n text_score = sum(text_score) / max(1, len(text))\n box_res['text'] = text\n box_res['text_score'] = text_score\n img_e2e_res['result'].append(box_res)\n\n if self.args.batch_mode:\n recog_results = self.single_inference(\n recog_model, box_imgs, True, self.args.recog_batch_size)\n for i, recog_result in enumerate(recog_results):\n text = recog_result['text']\n text_score = recog_result['score']\n if isinstance(text_score, (list, tuple)):\n text_score = sum(text_score) / max(1, len(text))\n img_e2e_res['result'][i]['text'] = text\n img_e2e_res['result'][i]['text_score'] = text_score\n\n if self.args.merge:\n img_e2e_res['result'] = stitch_boxes_into_lines(\n img_e2e_res['result'], self.args.merge_xdist, 0.5)\n\n if kie_model:\n annotations = copy.deepcopy(img_e2e_res['result'])\n # Customized for kie_dataset, which\n # assumes that boxes are represented by only 4 points\n for i, ann in enumerate(annotations):\n min_x = min(ann['box'][::2])\n min_y = min(ann['box'][1::2])\n max_x = max(ann['box'][::2])\n max_y = max(ann['box'][1::2])\n annotations[i]['box'] = [\n min_x, min_y, max_x, min_y, max_x, max_y, min_x, max_y\n ]\n ann_info = kie_dataset._parse_anno_info(annotations)\n ann_info['ori_bboxes'] = ann_info.get('ori_bboxes',\n ann_info['bboxes'])\n ann_info['gt_bboxes'] = ann_info.get('gt_bboxes',\n ann_info['bboxes'])\n kie_result, data = model_inference(\n kie_model,\n arr,\n ann=ann_info,\n return_data=True,\n batch_mode=self.args.batch_mode)\n # visualize KIE results\n self.visualize_kie_output(\n kie_model,\n data,\n kie_result,\n out_file=out_file,\n show=self.args.imshow)\n gt_bboxes = data['gt_bboxes'].data.numpy().tolist()\n labels = self.generate_kie_labels(kie_result, gt_bboxes,\n kie_model.class_list)\n for i in range(len(gt_bboxes)):\n img_e2e_res['result'][i]['label'] = labels[i][0]\n img_e2e_res['result'][i]['label_score'] = labels[i][1]\n\n end2end_res.append(img_e2e_res)\n return end2end_res\n\n # Separate det/recog inference pipeline\n def single_inference(self, model, arrays, batch_mode, batch_size=0):\n result = []\n if batch_mode:\n if batch_size == 0:\n result = model_inference(model, arrays, batch_mode=True)\n else:\n n = batch_size\n arr_chunks = [\n arrays[i:i + n] for i in range(0, len(arrays), n)\n ]\n for chunk in arr_chunks:\n result.extend(\n model_inference(model, chunk, batch_mode=True))\n else:\n for arr in arrays:\n result.append(model_inference(model, arr, batch_mode=False))\n return result\n\n # Arguments pre-processing function\n def _args_processing(self, args):\n # Check if the input is a list/tuple that\n # contains only np arrays or strings\n if isinstance(args.img, (list, tuple)):\n img_list = args.img\n if not all([isinstance(x, (np.ndarray, str)) for x in args.img]):\n raise AssertionError('Images must be strings or numpy arrays')\n\n # Create a list of the images\n if isinstance(args.img, str):\n img_path = Path(args.img)\n if img_path.is_dir():\n img_list = [str(x) for x in img_path.glob('*')]\n else:\n img_list = [str(img_path)]\n elif isinstance(args.img, np.ndarray):\n img_list = [args.img]\n\n # Read all image(s) in advance to reduce wasted time\n # re-reading the images for visualization output\n args.arrays = [mmcv.imread(x) for x in img_list]\n\n # Create a list of filenames (used for output images and result files)\n if isinstance(img_list[0], str):\n args.filenames = [str(Path(x).stem) for x in img_list]\n else:\n args.filenames = [str(x) for x in range(len(img_list))]\n\n # If given an output argument, create a list of output image filenames\n num_res = len(img_list)\n if args.output:\n output_path = Path(args.output)\n if output_path.is_dir():\n args.output = [\n str(output_path / f'out_{x}.png') for x in args.filenames\n ]\n else:\n args.output = [str(args.output)]\n if args.batch_mode:\n raise AssertionError('Output of multiple images inference'\n ' must be a directory')\n else:\n args.output = [None] * num_res\n\n # If given an export argument, create a list of\n # result filenames for each image\n if args.export:\n export_path = Path(args.export)\n args.export = [\n str(export_path / f'out_{x}.{args.export_format}')\n for x in args.filenames\n ]\n else:\n args.export = [None] * num_res\n\n return args\n\n\n# Create an inference pipeline with parsed arguments\ndef main():\n args = parse_args()\n ocr = MMOCR(**vars(args))\n ocr.readtext(**vars(args))\n\n\nif __name__ == '__main__':\n main()\n", "path": "mmocr/utils/ocr.py" } ]
diff --git a/mmocr/utils/ocr.py b/mmocr/utils/ocr.py index 1c5f342e1..5631b1ada 100755 --- a/mmocr/utils/ocr.py +++ b/mmocr/utils/ocr.py @@ -411,7 +411,7 @@ def readtext(self, merge=False, merge_xdist=20, **kwargs): - args = locals() + args = locals().copy() [args.pop(x, None) for x in ['kwargs', 'self']] args = Namespace(**args)
ibis-project__ibis-4271
bug(impala) A delimited table should be explicitly stored as textfile https://github.com/ibis-project/ibis/blob/88ffe3367cb6a34936e578f6a9b68dc30d559507/ibis/backends/impala/ddl.py#L67 when the cluster's default format is set as parquet, this will cause an exception. It should be explicitly stored as textfile. such as ```python if self.lineterminator is not None: yield f"LINES TERMINATED BY '{self.lineterminator}'" yield 'STORED AS TEXTFILE' yield f"LOCATION '{self.path}'" ```
[ { "content": "# Copyright 2014 Cloudera Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\nfrom ibis.backends.base.sql.ddl import (\n AlterTable,\n BaseDDL,\n CreateTable,\n CreateTableWithSchema,\n DropFunction,\n format_partition,\n format_schema,\n format_tblproperties,\n)\nfrom ibis.backends.base.sql.registry import type_to_sql_string\n\n\nclass CreateTableParquet(CreateTable):\n def __init__(\n self,\n table_name,\n path,\n example_file=None,\n example_table=None,\n schema=None,\n external=True,\n **kwargs,\n ):\n super().__init__(\n table_name,\n external=external,\n format='parquet',\n path=path,\n **kwargs,\n )\n self.example_file = example_file\n self.example_table = example_table\n self.schema = schema\n\n @property\n def _pieces(self):\n if self.example_file is not None:\n yield f\"LIKE PARQUET '{self.example_file}'\"\n elif self.example_table is not None:\n yield f\"LIKE {self.example_table}\"\n elif self.schema is not None:\n yield format_schema(self.schema)\n else:\n raise NotImplementedError\n\n yield self._storage()\n yield self._location()\n\n\nclass DelimitedFormat:\n def __init__(\n self,\n path,\n delimiter=None,\n escapechar=None,\n na_rep=None,\n lineterminator=None,\n ):\n self.path = path\n self.delimiter = delimiter\n self.escapechar = escapechar\n self.lineterminator = lineterminator\n self.na_rep = na_rep\n\n def to_ddl(self):\n yield 'ROW FORMAT DELIMITED'\n\n if self.delimiter is not None:\n yield f\"FIELDS TERMINATED BY '{self.delimiter}'\"\n\n if self.escapechar is not None:\n yield f\"ESCAPED BY '{self.escapechar}'\"\n\n if self.lineterminator is not None:\n yield f\"LINES TERMINATED BY '{self.lineterminator}'\"\n\n yield f\"LOCATION '{self.path}'\"\n\n if self.na_rep is not None:\n props = {'serialization.null.format': self.na_rep}\n yield format_tblproperties(props)\n\n\nclass AvroFormat:\n def __init__(self, path, avro_schema):\n self.path = path\n self.avro_schema = avro_schema\n\n def to_ddl(self):\n yield 'STORED AS AVRO'\n yield f\"LOCATION '{self.path}'\"\n\n schema = json.dumps(self.avro_schema, indent=2, sort_keys=True)\n schema = '\\n'.join(x.rstrip() for x in schema.splitlines())\n\n props = {'avro.schema.literal': schema}\n yield format_tblproperties(props)\n\n\nclass ParquetFormat:\n def __init__(self, path):\n self.path = path\n\n def to_ddl(self):\n yield 'STORED AS PARQUET'\n yield f\"LOCATION '{self.path}'\"\n\n\nclass CreateTableDelimited(CreateTableWithSchema):\n def __init__(\n self,\n table_name,\n path,\n schema,\n delimiter=None,\n escapechar=None,\n lineterminator=None,\n na_rep=None,\n external=True,\n **kwargs,\n ):\n table_format = DelimitedFormat(\n path,\n delimiter=delimiter,\n escapechar=escapechar,\n lineterminator=lineterminator,\n na_rep=na_rep,\n )\n super().__init__(\n table_name, schema, table_format, external=external, **kwargs\n )\n\n\nclass CreateTableAvro(CreateTable):\n def __init__(self, table_name, path, avro_schema, external=True, **kwargs):\n super().__init__(table_name, external=external, **kwargs)\n self.table_format = AvroFormat(path, avro_schema)\n\n @property\n def _pieces(self):\n yield '\\n'.join(self.table_format.to_ddl())\n\n\nclass LoadData(BaseDDL):\n\n \"\"\"\n Generate DDL for LOAD DATA command. Cannot be cancelled\n \"\"\"\n\n def __init__(\n self,\n table_name,\n path,\n database=None,\n partition=None,\n partition_schema=None,\n overwrite=False,\n ):\n self.table_name = table_name\n self.database = database\n self.path = path\n\n self.partition = partition\n self.partition_schema = partition_schema\n\n self.overwrite = overwrite\n\n def compile(self):\n overwrite = 'OVERWRITE ' if self.overwrite else ''\n\n if self.partition is not None:\n partition = '\\n' + format_partition(\n self.partition, self.partition_schema\n )\n else:\n partition = ''\n\n scoped_name = self._get_scoped_name(self.table_name, self.database)\n return \"LOAD DATA INPATH '{}' {}INTO TABLE {}{}\".format(\n self.path, overwrite, scoped_name, partition\n )\n\n\nclass PartitionProperties(AlterTable):\n def __init__(\n self,\n table,\n partition,\n partition_schema,\n location=None,\n format=None,\n tbl_properties=None,\n serde_properties=None,\n ):\n super().__init__(\n table,\n location=location,\n format=format,\n tbl_properties=tbl_properties,\n serde_properties=serde_properties,\n )\n self.partition = partition\n self.partition_schema = partition_schema\n\n def _compile(self, cmd, property_prefix=''):\n part = format_partition(self.partition, self.partition_schema)\n if cmd:\n part = f'{cmd} {part}'\n\n props = self._format_properties(property_prefix)\n action = f'{self.table} {part}{props}'\n return self._wrap_command(action)\n\n\nclass AddPartition(PartitionProperties):\n def __init__(self, table, partition, partition_schema, location=None):\n super().__init__(table, partition, partition_schema, location=location)\n\n def compile(self):\n return self._compile('ADD')\n\n\nclass AlterPartition(PartitionProperties):\n def compile(self):\n return self._compile('', 'SET ')\n\n\nclass DropPartition(PartitionProperties):\n def __init__(self, table, partition, partition_schema):\n super().__init__(table, partition, partition_schema)\n\n def compile(self):\n return self._compile('DROP')\n\n\nclass CacheTable(BaseDDL):\n def __init__(self, table_name, database=None, pool='default'):\n self.table_name = table_name\n self.database = database\n self.pool = pool\n\n def compile(self):\n scoped_name = self._get_scoped_name(self.table_name, self.database)\n return \"ALTER TABLE {} SET CACHED IN '{}'\".format(\n scoped_name, self.pool\n )\n\n\nclass CreateFunction(BaseDDL):\n\n _object_type = 'FUNCTION'\n\n def __init__(self, func, name=None, database=None):\n self.func = func\n self.name = name or func.name\n self.database = database\n\n def _impala_signature(self):\n scoped_name = self._get_scoped_name(self.name, self.database)\n input_sig = _impala_input_signature(self.func.inputs)\n output_sig = type_to_sql_string(self.func.output)\n\n return f'{scoped_name}({input_sig}) returns {output_sig}'\n\n\nclass CreateUDF(CreateFunction):\n def compile(self):\n create_decl = 'CREATE FUNCTION'\n impala_sig = self._impala_signature()\n param_line = \"location '{}' symbol='{}'\".format(\n self.func.lib_path, self.func.so_symbol\n )\n return ' '.join([create_decl, impala_sig, param_line])\n\n\nclass CreateUDA(CreateFunction):\n def compile(self):\n create_decl = 'CREATE AGGREGATE FUNCTION'\n impala_sig = self._impala_signature()\n tokens = [f\"location '{self.func.lib_path}'\"]\n\n fn_names = (\n 'init_fn',\n 'update_fn',\n 'merge_fn',\n 'serialize_fn',\n 'finalize_fn',\n )\n\n for fn in fn_names:\n value = getattr(self.func, fn)\n if value is not None:\n tokens.append(f\"{fn}='{value}'\")\n\n return ' '.join([create_decl, impala_sig]) + ' ' + '\\n'.join(tokens)\n\n\nclass DropFunction(DropFunction):\n def _impala_signature(self):\n full_name = self._get_scoped_name(self.name, self.database)\n input_sig = _impala_input_signature(self.inputs)\n return f'{full_name}({input_sig})'\n\n\nclass ListFunction(BaseDDL):\n def __init__(self, database, like=None, aggregate=False):\n self.database = database\n self.like = like\n self.aggregate = aggregate\n\n def compile(self):\n statement = 'SHOW '\n if self.aggregate:\n statement += 'AGGREGATE '\n statement += f'FUNCTIONS IN {self.database}'\n if self.like:\n statement += f\" LIKE '{self.like}'\"\n return statement\n\n\ndef _impala_input_signature(inputs):\n # TODO: varargs '{}...'.format(val)\n return ', '.join(map(type_to_sql_string, inputs))\n", "path": "ibis/backends/impala/ddl.py" } ]
[ { "content": "# Copyright 2014 Cloudera Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\nfrom ibis.backends.base.sql.ddl import (\n AlterTable,\n BaseDDL,\n CreateTable,\n CreateTableWithSchema,\n DropFunction,\n format_partition,\n format_schema,\n format_tblproperties,\n)\nfrom ibis.backends.base.sql.registry import type_to_sql_string\n\n\nclass CreateTableParquet(CreateTable):\n def __init__(\n self,\n table_name,\n path,\n example_file=None,\n example_table=None,\n schema=None,\n external=True,\n **kwargs,\n ):\n super().__init__(\n table_name,\n external=external,\n format='parquet',\n path=path,\n **kwargs,\n )\n self.example_file = example_file\n self.example_table = example_table\n self.schema = schema\n\n @property\n def _pieces(self):\n if self.example_file is not None:\n yield f\"LIKE PARQUET '{self.example_file}'\"\n elif self.example_table is not None:\n yield f\"LIKE {self.example_table}\"\n elif self.schema is not None:\n yield format_schema(self.schema)\n else:\n raise NotImplementedError\n\n yield self._storage()\n yield self._location()\n\n\nclass DelimitedFormat:\n def __init__(\n self,\n path,\n delimiter=None,\n escapechar=None,\n na_rep=None,\n lineterminator=None,\n ):\n self.path = path\n self.delimiter = delimiter\n self.escapechar = escapechar\n self.lineterminator = lineterminator\n self.na_rep = na_rep\n\n def to_ddl(self):\n yield 'ROW FORMAT DELIMITED'\n\n if self.delimiter is not None:\n yield f\"FIELDS TERMINATED BY '{self.delimiter}'\"\n\n if self.escapechar is not None:\n yield f\"ESCAPED BY '{self.escapechar}'\"\n\n if self.lineterminator is not None:\n yield f\"LINES TERMINATED BY '{self.lineterminator}'\"\n\n yield 'STORED AS TEXTFILE'\n yield f\"LOCATION '{self.path}'\"\n\n if self.na_rep is not None:\n props = {'serialization.null.format': self.na_rep}\n yield format_tblproperties(props)\n\n\nclass AvroFormat:\n def __init__(self, path, avro_schema):\n self.path = path\n self.avro_schema = avro_schema\n\n def to_ddl(self):\n yield 'STORED AS AVRO'\n yield f\"LOCATION '{self.path}'\"\n\n schema = json.dumps(self.avro_schema, indent=2, sort_keys=True)\n schema = '\\n'.join(x.rstrip() for x in schema.splitlines())\n\n props = {'avro.schema.literal': schema}\n yield format_tblproperties(props)\n\n\nclass ParquetFormat:\n def __init__(self, path):\n self.path = path\n\n def to_ddl(self):\n yield 'STORED AS PARQUET'\n yield f\"LOCATION '{self.path}'\"\n\n\nclass CreateTableDelimited(CreateTableWithSchema):\n def __init__(\n self,\n table_name,\n path,\n schema,\n delimiter=None,\n escapechar=None,\n lineterminator=None,\n na_rep=None,\n external=True,\n **kwargs,\n ):\n table_format = DelimitedFormat(\n path,\n delimiter=delimiter,\n escapechar=escapechar,\n lineterminator=lineterminator,\n na_rep=na_rep,\n )\n super().__init__(\n table_name, schema, table_format, external=external, **kwargs\n )\n\n\nclass CreateTableAvro(CreateTable):\n def __init__(self, table_name, path, avro_schema, external=True, **kwargs):\n super().__init__(table_name, external=external, **kwargs)\n self.table_format = AvroFormat(path, avro_schema)\n\n @property\n def _pieces(self):\n yield '\\n'.join(self.table_format.to_ddl())\n\n\nclass LoadData(BaseDDL):\n\n \"\"\"\n Generate DDL for LOAD DATA command. Cannot be cancelled\n \"\"\"\n\n def __init__(\n self,\n table_name,\n path,\n database=None,\n partition=None,\n partition_schema=None,\n overwrite=False,\n ):\n self.table_name = table_name\n self.database = database\n self.path = path\n\n self.partition = partition\n self.partition_schema = partition_schema\n\n self.overwrite = overwrite\n\n def compile(self):\n overwrite = 'OVERWRITE ' if self.overwrite else ''\n\n if self.partition is not None:\n partition = '\\n' + format_partition(\n self.partition, self.partition_schema\n )\n else:\n partition = ''\n\n scoped_name = self._get_scoped_name(self.table_name, self.database)\n return \"LOAD DATA INPATH '{}' {}INTO TABLE {}{}\".format(\n self.path, overwrite, scoped_name, partition\n )\n\n\nclass PartitionProperties(AlterTable):\n def __init__(\n self,\n table,\n partition,\n partition_schema,\n location=None,\n format=None,\n tbl_properties=None,\n serde_properties=None,\n ):\n super().__init__(\n table,\n location=location,\n format=format,\n tbl_properties=tbl_properties,\n serde_properties=serde_properties,\n )\n self.partition = partition\n self.partition_schema = partition_schema\n\n def _compile(self, cmd, property_prefix=''):\n part = format_partition(self.partition, self.partition_schema)\n if cmd:\n part = f'{cmd} {part}'\n\n props = self._format_properties(property_prefix)\n action = f'{self.table} {part}{props}'\n return self._wrap_command(action)\n\n\nclass AddPartition(PartitionProperties):\n def __init__(self, table, partition, partition_schema, location=None):\n super().__init__(table, partition, partition_schema, location=location)\n\n def compile(self):\n return self._compile('ADD')\n\n\nclass AlterPartition(PartitionProperties):\n def compile(self):\n return self._compile('', 'SET ')\n\n\nclass DropPartition(PartitionProperties):\n def __init__(self, table, partition, partition_schema):\n super().__init__(table, partition, partition_schema)\n\n def compile(self):\n return self._compile('DROP')\n\n\nclass CacheTable(BaseDDL):\n def __init__(self, table_name, database=None, pool='default'):\n self.table_name = table_name\n self.database = database\n self.pool = pool\n\n def compile(self):\n scoped_name = self._get_scoped_name(self.table_name, self.database)\n return \"ALTER TABLE {} SET CACHED IN '{}'\".format(\n scoped_name, self.pool\n )\n\n\nclass CreateFunction(BaseDDL):\n\n _object_type = 'FUNCTION'\n\n def __init__(self, func, name=None, database=None):\n self.func = func\n self.name = name or func.name\n self.database = database\n\n def _impala_signature(self):\n scoped_name = self._get_scoped_name(self.name, self.database)\n input_sig = _impala_input_signature(self.func.inputs)\n output_sig = type_to_sql_string(self.func.output)\n\n return f'{scoped_name}({input_sig}) returns {output_sig}'\n\n\nclass CreateUDF(CreateFunction):\n def compile(self):\n create_decl = 'CREATE FUNCTION'\n impala_sig = self._impala_signature()\n param_line = \"location '{}' symbol='{}'\".format(\n self.func.lib_path, self.func.so_symbol\n )\n return ' '.join([create_decl, impala_sig, param_line])\n\n\nclass CreateUDA(CreateFunction):\n def compile(self):\n create_decl = 'CREATE AGGREGATE FUNCTION'\n impala_sig = self._impala_signature()\n tokens = [f\"location '{self.func.lib_path}'\"]\n\n fn_names = (\n 'init_fn',\n 'update_fn',\n 'merge_fn',\n 'serialize_fn',\n 'finalize_fn',\n )\n\n for fn in fn_names:\n value = getattr(self.func, fn)\n if value is not None:\n tokens.append(f\"{fn}='{value}'\")\n\n return ' '.join([create_decl, impala_sig]) + ' ' + '\\n'.join(tokens)\n\n\nclass DropFunction(DropFunction):\n def _impala_signature(self):\n full_name = self._get_scoped_name(self.name, self.database)\n input_sig = _impala_input_signature(self.inputs)\n return f'{full_name}({input_sig})'\n\n\nclass ListFunction(BaseDDL):\n def __init__(self, database, like=None, aggregate=False):\n self.database = database\n self.like = like\n self.aggregate = aggregate\n\n def compile(self):\n statement = 'SHOW '\n if self.aggregate:\n statement += 'AGGREGATE '\n statement += f'FUNCTIONS IN {self.database}'\n if self.like:\n statement += f\" LIKE '{self.like}'\"\n return statement\n\n\ndef _impala_input_signature(inputs):\n # TODO: varargs '{}...'.format(val)\n return ', '.join(map(type_to_sql_string, inputs))\n", "path": "ibis/backends/impala/ddl.py" } ]
diff --git a/ibis/backends/impala/ddl.py b/ibis/backends/impala/ddl.py index 2df5daf4bf6a..8762c0c99d81 100644 --- a/ibis/backends/impala/ddl.py +++ b/ibis/backends/impala/ddl.py @@ -91,6 +91,7 @@ def to_ddl(self): if self.lineterminator is not None: yield f"LINES TERMINATED BY '{self.lineterminator}'" + yield 'STORED AS TEXTFILE' yield f"LOCATION '{self.path}'" if self.na_rep is not None: diff --git a/ibis/backends/impala/tests/test_ddl_compilation.py b/ibis/backends/impala/tests/test_ddl_compilation.py index 3c57116e90d5..6a6c75f27d44 100644 --- a/ibis/backends/impala/tests/test_ddl_compilation.py +++ b/ibis/backends/impala/tests/test_ddl_compilation.py @@ -407,6 +407,7 @@ def test_create_table_delimited(): FIELDS TERMINATED BY '|' ESCAPED BY '\\' LINES TERMINATED BY '\0' +STORED AS TEXTFILE LOCATION '{}'""".format( path )
pyca__cryptography-1530
Release Automation Fixes for Seventh Release The release script is not properly waiting for the wheel job it starts to finish before downloading. This causes it to download previous releases and attempt to upload them.
[ { "content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport getpass\nimport os\nimport time\n\nimport invoke\n\nimport requests\n\n\nJENKINS_URL = \"https://jenkins.cryptography.io/job/cryptography-wheel-builder\"\n\n\ndef wait_for_build_completed(session):\n while True:\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\",\n }\n )\n response.raise_for_status()\n if not response.json()[\"building\"]:\n assert response.json()[\"result\"] == \"SUCCESS\"\n break\n time.sleep(0.1)\n\n\ndef download_artifacts(session):\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\"\n }\n )\n response.raise_for_status()\n assert not response.json()[\"building\"]\n assert response.json()[\"result\"] == \"SUCCESS\"\n\n paths = []\n\n for run in response.json()[\"runs\"]:\n response = session.get(\n run[\"url\"] + \"api/json/\",\n headers={\n \"Accept\": \"application/json\",\n }\n )\n response.raise_for_status()\n for artifact in response.json()[\"artifacts\"]:\n response = session.get(\n \"{0}artifact/{1}\".format(run[\"url\"], artifact[\"relativePath\"])\n )\n out_path = os.path.join(\n os.path.dirname(__file__),\n \"dist\",\n artifact[\"fileName\"],\n )\n with open(out_path, \"wb\") as f:\n f.write(response.content)\n paths.append(out_path)\n return paths\n\n\[email protected]\ndef release(version):\n \"\"\"\n ``version`` should be a string like '0.4' or '1.0'.\n \"\"\"\n invoke.run(\"git tag -s {0} -m '{0} release'\".format(version))\n invoke.run(\"git push --tags\")\n\n invoke.run(\"python setup.py sdist\")\n invoke.run(\"cd vectors/ && python setup.py sdist bdist_wheel\")\n\n invoke.run(\n \"twine upload -s dist/cryptography-{0}* \"\n \"vectors/dist/cryptography_vectors-{0}*\".format(version)\n )\n\n session = requests.Session()\n\n # This tells the CDN to delete the cached response for the URL. We do this\n # so that the Jenkins builders will see the new sdist immediately when they\n # go to build the wheels.\n response = session.request(\n \"PURGE\", \"https://pypi.python.org/simple/cryptography/\"\n )\n response.raise_for_status()\n\n username = getpass.getpass(\"Input the GitHub/Jenkins username: \")\n token = getpass.getpass(\"Input the Jenkins token: \")\n response = session.post(\n \"{0}/build\".format(JENKINS_URL),\n auth=requests.auth.HTTPBasicAuth(\n username, token\n ),\n params={\n \"cause\": \"Building wheels for {0}\".format(version)\n }\n )\n response.raise_for_status()\n wait_for_build_completed(session)\n paths = download_artifacts(session)\n invoke.run(\"twine upload {0}\".format(\" \".join(paths)))\n", "path": "tasks.py" } ]
[ { "content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport getpass\nimport os\nimport time\n\nimport invoke\n\nimport requests\n\n\nJENKINS_URL = \"https://jenkins.cryptography.io/job/cryptography-wheel-builder\"\n\n\ndef wait_for_build_completed(session):\n # Wait 3 seconds before actually checking if the build is complete, to\n # ensure that it had time to really start.\n time.sleep(3)\n while True:\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\",\n }\n )\n response.raise_for_status()\n if not response.json()[\"building\"]:\n assert response.json()[\"result\"] == \"SUCCESS\"\n break\n time.sleep(0.1)\n\n\ndef download_artifacts(session):\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\"\n }\n )\n response.raise_for_status()\n assert not response.json()[\"building\"]\n assert response.json()[\"result\"] == \"SUCCESS\"\n\n paths = []\n\n for run in response.json()[\"runs\"]:\n response = session.get(\n run[\"url\"] + \"api/json/\",\n headers={\n \"Accept\": \"application/json\",\n }\n )\n response.raise_for_status()\n for artifact in response.json()[\"artifacts\"]:\n response = session.get(\n \"{0}artifact/{1}\".format(run[\"url\"], artifact[\"relativePath\"])\n )\n out_path = os.path.join(\n os.path.dirname(__file__),\n \"dist\",\n artifact[\"fileName\"],\n )\n with open(out_path, \"wb\") as f:\n f.write(response.content)\n paths.append(out_path)\n return paths\n\n\[email protected]\ndef release(version):\n \"\"\"\n ``version`` should be a string like '0.4' or '1.0'.\n \"\"\"\n invoke.run(\"git tag -s {0} -m '{0} release'\".format(version))\n invoke.run(\"git push --tags\")\n\n invoke.run(\"python setup.py sdist\")\n invoke.run(\"cd vectors/ && python setup.py sdist bdist_wheel\")\n\n invoke.run(\n \"twine upload -s dist/cryptography-{0}* \"\n \"vectors/dist/cryptography_vectors-{0}*\".format(version)\n )\n\n session = requests.Session()\n\n # This tells the CDN to delete the cached response for the URL. We do this\n # so that the Jenkins builders will see the new sdist immediately when they\n # go to build the wheels.\n response = session.request(\n \"PURGE\", \"https://pypi.python.org/simple/cryptography/\"\n )\n response.raise_for_status()\n\n username = getpass.getpass(\"Input the GitHub/Jenkins username: \")\n token = getpass.getpass(\"Input the Jenkins token: \")\n response = session.post(\n \"{0}/build\".format(JENKINS_URL),\n auth=requests.auth.HTTPBasicAuth(\n username, token\n ),\n params={\n \"cause\": \"Building wheels for {0}\".format(version)\n }\n )\n response.raise_for_status()\n wait_for_build_completed(session)\n paths = download_artifacts(session)\n invoke.run(\"twine upload {0}\".format(\" \".join(paths)))\n", "path": "tasks.py" } ]
diff --git a/tasks.py b/tasks.py index 2dd005ba2594..c109f14974b8 100644 --- a/tasks.py +++ b/tasks.py @@ -17,6 +17,9 @@ def wait_for_build_completed(session): + # Wait 3 seconds before actually checking if the build is complete, to + # ensure that it had time to really start. + time.sleep(3) while True: response = session.get( "{0}/lastBuild/api/json/".format(JENKINS_URL),
ckan__ckan-6110
Plugin order for translations is reversed **CKAN version** 2.8, 2.9, master **Describe the bug** If the developer has multiple plugins implementing ITranslation interface and has the same translation keys in them, the last plugin wins. **Steps to reproduce** Create two plugins with ITranslation interface and the same translation key. Translations from last plugin will be used. **Expected behavior** Translations from the first plugin should be used as the common convention is that the first plugin wins. **Additional details** https://github.com/vrk-kpa/ckanext-forcetranslation we made this couple years ago to circumvent this. Simple plugin which allows to choose which plugins translations to use. Related bug in https://github.com/ckan/ckanext-harvest/issues/266 which in essence is caused by the same thing.
[ { "content": "# encoding: utf-8\n\nimport os\nimport sys\nimport re\nimport time\nimport inspect\nimport itertools\nimport pkgutil\n\nfrom flask import Blueprint, send_from_directory\nfrom flask.ctx import _AppCtxGlobals\nfrom flask.sessions import SessionInterface\nfrom flask_multistatic import MultiStaticFlask\n\nimport six\nimport webob\n\nfrom werkzeug.exceptions import default_exceptions, HTTPException\nfrom werkzeug.routing import Rule\n\nfrom flask_babel import Babel\n\nfrom beaker.middleware import SessionMiddleware\nfrom ckan.common import asbool\nfrom repoze.who.config import WhoConfig\nfrom repoze.who.middleware import PluggableAuthenticationMiddleware\n\nimport ckan.model as model\nfrom ckan.lib import base\nfrom ckan.lib import helpers\nfrom ckan.lib import jinja_extensions\nfrom ckan.lib import uploader\nfrom ckan.lib import i18n\nfrom ckan.common import config, g, request, ungettext\nfrom ckan.config.middleware.common_middleware import (TrackingMiddleware,\n HostHeaderMiddleware)\nimport ckan.lib.app_globals as app_globals\nimport ckan.lib.plugins as lib_plugins\nimport ckan.plugins.toolkit as toolkit\nfrom ckan.lib.webassets_tools import get_webassets_path\n\nfrom ckan.plugins import PluginImplementations\nfrom ckan.plugins.interfaces import IBlueprint, IMiddleware, ITranslation\nfrom ckan.views import (identify_user,\n set_cors_headers_for_response,\n check_session_cookie,\n set_controller_and_action,\n set_cache_control_headers_for_response,\n handle_i18n,\n set_ckan_current_url,\n )\n\nimport logging\nfrom logging.handlers import SMTPHandler\nlog = logging.getLogger(__name__)\n\n\nclass I18nMiddleware(object):\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n\n handle_i18n(environ)\n return self.app(environ, start_response)\n\n\nclass RepozeAdapterMiddleware(object):\n \"\"\"When repoze.who interrupts requrests for anonymous user because of\n insufficient permission, it closes requrest stream and make an\n attempt to return response to user as quick as possible. But when\n werkzeug sees POST request with some payload it tries to parse\n request data and it leads to BadRequests(400), because there is no\n way to parse closed request stream. This middlewary just\n reproduces part of internal Fanstatic bevavior: don't drop request\n stream while response is written to the client.\n\n The middleware only requred because of repoze.who and it should be\n removed as soon as PluggableAuthenticationMiddlewary is replaced\n with some alternative solution.\n\n \"\"\"\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n request = webob.Request(environ)\n response = request.get_response(self.app)\n return response(environ, start_response)\n\n\nclass CKANBabel(Babel):\n def __init__(self, *pargs, **kwargs):\n super(CKANBabel, self).__init__(*pargs, **kwargs)\n self._i18n_path_idx = 0\n\n @property\n def domain(self):\n default = super(CKANBabel, self).domain\n multiple = self.app.config.get('BABEL_MULTIPLE_DOMAINS')\n if not multiple:\n return default\n domains = multiple.split(';')\n try:\n return domains[self._i18n_path_idx]\n except IndexError:\n return default\n\n @property\n def translation_directories(self):\n self._i18n_path_idx = 0\n for path in super(CKANBabel, self).translation_directories:\n yield path\n self._i18n_path_idx += 1\n\n\ndef make_flask_stack(conf):\n \"\"\" This has to pass the flask app through all the same middleware that\n Pylons used \"\"\"\n\n root = os.path.dirname(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\n debug = asbool(conf.get('debug', conf.get('DEBUG', False)))\n testing = asbool(conf.get('testing', conf.get('TESTING', False)))\n app = flask_app = CKANFlask(__name__, static_url_path='')\n\n # Register storage for accessing group images, site logo, etc.\n storage_folder = []\n storage = uploader.get_storage_path()\n if storage:\n storage_folder = [os.path.join(storage, 'storage')]\n\n # Static files folders (core and extensions)\n public_folder = config.get(u'ckan.base_public_folder')\n app.static_folder = config.get(\n 'extra_public_paths', ''\n ).split(',') + [os.path.join(root, public_folder)] + storage_folder\n\n app.jinja_options = jinja_extensions.get_jinja_env_options()\n app.jinja_env.policies['ext.i18n.trimmed'] = True\n\n app.debug = debug\n app.testing = testing\n app.template_folder = os.path.join(root, 'templates')\n app.app_ctx_globals_class = CKAN_AppCtxGlobals\n app.url_rule_class = CKAN_Rule\n\n # Update Flask config with the CKAN values. We use the common config\n # object as values might have been modified on `load_environment`\n if config:\n app.config.update(config)\n else:\n app.config.update(conf)\n\n # Do all the Flask-specific stuff before adding other middlewares\n\n # Secret key needed for flask-debug-toolbar and sessions\n if not app.config.get('SECRET_KEY'):\n app.config['SECRET_KEY'] = config.get('beaker.session.secret')\n if not app.config.get('SECRET_KEY'):\n raise RuntimeError(u'You must provide a value for the secret key'\n ' with the SECRET_KEY config option')\n\n root_path = config.get('ckan.root_path', None)\n if debug:\n from flask_debugtoolbar import DebugToolbarExtension\n app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\n debug_ext = DebugToolbarExtension()\n\n # register path that includes `ckan.site_root` before\n # initializing debug app. In such a way, our route receives\n # higher precedence.\n\n # TODO: After removal of Pylons code, switch to\n # `APPLICATION_ROOT` config value for flask application. Right\n # now it's a bad option because we are handling both pylons\n # and flask urls inside helpers and splitting this logic will\n # bring us tons of headache.\n if root_path:\n app.add_url_rule(\n root_path.replace('{{LANG}}', '').rstrip('/') +\n '/_debug_toolbar/static/<path:filename>',\n '_debug_toolbar.static', debug_ext.send_static_file\n )\n debug_ext.init_app(app)\n\n from werkzeug.debug import DebuggedApplication\n app.wsgi_app = DebuggedApplication(app.wsgi_app, True)\n\n # Use Beaker as the Flask session interface\n class BeakerSessionInterface(SessionInterface):\n def open_session(self, app, request):\n if 'beaker.session' in request.environ:\n return request.environ['beaker.session']\n\n def save_session(self, app, session, response):\n session.save()\n\n namespace = 'beaker.session.'\n session_opts = {k.replace('beaker.', ''): v\n for k, v in six.iteritems(config)\n if k.startswith(namespace)}\n if (not session_opts.get('session.data_dir') and\n session_opts.get('session.type', 'file') == 'file'):\n cache_dir = conf.get('cache_dir') or conf.get('cache.dir')\n session_opts['session.data_dir'] = '{data_dir}/sessions'.format(\n data_dir=cache_dir)\n\n app.wsgi_app = SessionMiddleware(app.wsgi_app, session_opts)\n app.session_interface = BeakerSessionInterface()\n\n # Add Jinja2 extensions and filters\n app.jinja_env.filters['empty_and_escape'] = \\\n jinja_extensions.empty_and_escape\n\n # Common handlers for all requests\n app.before_request(ckan_before_request)\n app.after_request(ckan_after_request)\n\n # Template context processors\n app.context_processor(helper_functions)\n app.context_processor(c_object)\n\n @app.context_processor\n def ungettext_alias():\n u'''\n Provide `ungettext` as an alias of `ngettext` for backwards\n compatibility\n '''\n return dict(ungettext=ungettext)\n\n # Babel\n _ckan_i18n_dir = i18n.get_ckan_i18n_dir()\n\n pairs = [\n (_ckan_i18n_dir, u'ckan')\n ] + [\n (p.i18n_directory(), p.i18n_domain())\n for p in PluginImplementations(ITranslation)\n ]\n\n i18n_dirs, i18n_domains = zip(*pairs)\n\n app.config[u'BABEL_TRANSLATION_DIRECTORIES'] = ';'.join(i18n_dirs)\n app.config[u'BABEL_DOMAIN'] = 'ckan'\n app.config[u'BABEL_MULTIPLE_DOMAINS'] = ';'.join(i18n_domains)\n\n babel = CKANBabel(app)\n\n babel.localeselector(get_locale)\n\n # WebAssets\n _setup_webassets(app)\n\n # Auto-register all blueprints defined in the `views` folder\n _register_core_blueprints(app)\n _register_error_handler(app)\n\n # Set up each IBlueprint extension as a Flask Blueprint\n for plugin in PluginImplementations(IBlueprint):\n if hasattr(plugin, 'get_blueprint'):\n plugin_blueprints = plugin.get_blueprint()\n if not isinstance(plugin_blueprints, list):\n plugin_blueprints = [plugin_blueprints]\n for blueprint in plugin_blueprints:\n app.register_extension_blueprint(blueprint)\n\n lib_plugins.register_package_blueprints(app)\n lib_plugins.register_group_blueprints(app)\n\n # Set flask routes in named_routes\n # TODO: refactor whatever helper is using this to not do it\n if 'routes.named_routes' not in config:\n config['routes.named_routes'] = {}\n for rule in app.url_map.iter_rules():\n if '.' not in rule.endpoint:\n continue\n controller, action = rule.endpoint.split('.')\n needed = list(rule.arguments - set(rule.defaults or {}))\n route = {\n rule.endpoint: {\n 'action': action,\n 'controller': controller,\n 'highlight_actions': action,\n 'needed': needed\n }\n }\n config['routes.named_routes'].update(route)\n\n # Start other middleware\n for plugin in PluginImplementations(IMiddleware):\n app = plugin.make_middleware(app, config)\n\n for plugin in PluginImplementations(IMiddleware):\n try:\n app = plugin.make_error_log_middleware(app, config)\n except AttributeError:\n log.critical('Middleware class {0} is missing the method'\n 'make_error_log_middleware.'\n .format(plugin.__class__.__name__))\n\n # Initialize repoze.who\n who_parser = WhoConfig(conf['here'])\n who_parser.parse(open(conf['who.config_file']))\n\n app = PluggableAuthenticationMiddleware(\n RepozeAdapterMiddleware(app),\n who_parser.identifiers,\n who_parser.authenticators,\n who_parser.challengers,\n who_parser.mdproviders,\n who_parser.request_classifier,\n who_parser.challenge_decider,\n logging.getLogger('repoze.who'),\n logging.WARN, # ignored\n who_parser.remote_user_key\n )\n\n # Update the main CKAN config object with the Flask specific keys\n # that were set here or autogenerated\n flask_config_keys = set(flask_app.config.keys()) - set(config.keys())\n for key in flask_config_keys:\n config[key] = flask_app.config[key]\n\n # Prevent the host from request to be added to the new header location.\n app = HostHeaderMiddleware(app)\n if six.PY3:\n app = I18nMiddleware(app)\n\n if asbool(config.get('ckan.tracking_enabled', 'false')):\n app = TrackingMiddleware(app, config)\n\n # Add a reference to the actual Flask app so it's easier to access\n app._wsgi_app = flask_app\n\n return app\n\n\ndef get_locale():\n u'''\n Return the value of the `CKAN_LANG` key of the WSGI environ,\n set by the I18nMiddleware based on the URL.\n If no value is defined, it defaults to `ckan.locale_default` or `en`.\n '''\n return request.environ.get(\n u'CKAN_LANG',\n config.get(u'ckan.locale_default', u'en'))\n\n\ndef ckan_before_request():\n u'''\n Common handler executed before all Flask requests\n\n If a response is returned by any of the functions called (\n currently ``identify_user()` only) any further processing of the\n request will be stopped and that response will be returned.\n\n '''\n response = None\n\n # Update app_globals\n app_globals.app_globals._check_uptodate()\n\n # Identify the user from the repoze cookie or the API header\n # Sets g.user and g.userobj\n response = identify_user()\n\n # Provide g.controller and g.action for backward compatibility\n # with extensions\n set_controller_and_action()\n\n set_ckan_current_url(request.environ)\n g.__timer = time.time()\n\n return response\n\n\ndef ckan_after_request(response):\n u'''Common handler executed after all Flask requests'''\n\n # Dispose of the SQLALchemy session\n model.Session.remove()\n\n # Check session cookie\n response = check_session_cookie(response)\n\n # Set CORS headers if necessary\n response = set_cors_headers_for_response(response)\n\n # Set Cache Control headers\n response = set_cache_control_headers_for_response(response)\n\n r_time = time.time() - g.__timer\n url = request.environ['PATH_INFO']\n\n log.info(' %s render time %.3f seconds' % (url, r_time))\n\n return response\n\n\ndef helper_functions():\n u'''Make helper functions (`h`) available to Flask templates'''\n if not helpers.helper_functions:\n helpers.load_plugin_helpers()\n return dict(h=helpers.helper_functions)\n\n\ndef c_object():\n u'''\n Expose `c` as an alias of `g` in templates for backwards compatibility\n '''\n return dict(c=g)\n\n\nclass CKAN_Rule(Rule):\n\n u'''Custom Flask url_rule_class.\n\n We use it to be able to flag routes defined in extensions as such\n '''\n\n def __init__(self, *args, **kwargs):\n self.ckan_core = True\n super(CKAN_Rule, self).__init__(*args, **kwargs)\n\n\nclass CKAN_AppCtxGlobals(_AppCtxGlobals):\n\n '''Custom Flask AppCtxGlobal class (flask.g).'''\n\n def __getattr__(self, name):\n '''\n If flask.g doesn't have attribute `name`, fall back to CKAN's\n app_globals object.\n If the key is also not found in there, an AttributeError will be raised\n '''\n return getattr(app_globals.app_globals, name)\n\n\nclass CKANFlask(MultiStaticFlask):\n\n '''Extend the Flask class with a special method called on incoming\n requests by AskAppDispatcherMiddleware.\n '''\n\n app_name = 'flask_app'\n\n def can_handle_request(self, environ):\n '''\n Decides whether it can handle a request with the Flask app by\n matching the request environ against the route mapper\n\n Returns (True, 'flask_app', origin) if this is the case.\n\n `origin` can be either 'core' or 'extension' depending on where\n the route was defined.\n '''\n urls = self.url_map.bind_to_environ(environ)\n\n try:\n rule, args = urls.match(return_rule=True)\n origin = 'core'\n if hasattr(rule, 'ckan_core') and not rule.ckan_core:\n origin = 'extension'\n log.debug('Flask route match, endpoint: {0}, args: {1}, '\n 'origin: {2}'.format(rule.endpoint, args, origin))\n\n # Disable built-in flask's ability to prepend site root to\n # generated url, as we are going to use locale and existing\n # logic is not flexible enough for this purpose\n environ['SCRIPT_NAME'] = ''\n\n return (True, self.app_name, origin)\n except HTTPException:\n return (False, self.app_name)\n\n def register_extension_blueprint(self, blueprint, **kwargs):\n '''\n This method should be used to register blueprints that come from\n extensions, so there's an opportunity to add extension-specific\n options.\n\n Sets the rule property `ckan_core` to False, to indicate that the rule\n applies to an extension route.\n '''\n self.register_blueprint(blueprint, **kwargs)\n\n # Get the new blueprint rules\n bp_rules = itertools.chain.from_iterable(\n v for k, v in six.iteritems(self.url_map._rules_by_endpoint)\n if k.startswith(u'{0}.'.format(blueprint.name))\n )\n\n # This compare key will ensure the rule will be near the top.\n top_compare_key = False, -100, [(-2, 0)]\n for r in bp_rules:\n r.ckan_core = False\n r.match_compare_key = lambda: top_compare_key\n\n\ndef _register_core_blueprints(app):\n u'''Register all blueprints defined in the `views` folder\n '''\n def is_blueprint(mm):\n return isinstance(mm, Blueprint)\n\n path = os.path.join(os.path.dirname(__file__), '..', '..', 'views')\n\n for loader, name, _ in pkgutil.iter_modules([path], 'ckan.views.'):\n module = loader.find_module(name).load_module(name)\n for blueprint in inspect.getmembers(module, is_blueprint):\n app.register_blueprint(blueprint[1])\n log.debug(u'Registered core blueprint: {0!r}'.format(blueprint[0]))\n\n\ndef _register_error_handler(app):\n u'''Register error handler'''\n\n def error_handler(e):\n log.error(e, exc_info=sys.exc_info)\n if isinstance(e, HTTPException):\n extra_vars = {\n u'code': e.code,\n u'content': e.description,\n u'name': e.name\n }\n\n return base.render(\n u'error_document_template.html', extra_vars), e.code\n extra_vars = {u'code': [500], u'content': u'Internal server error'}\n return base.render(u'error_document_template.html', extra_vars), 500\n\n for code in default_exceptions:\n app.register_error_handler(code, error_handler)\n if not app.debug and not app.testing:\n app.register_error_handler(Exception, error_handler)\n if config.get('email_to'):\n _setup_error_mail_handler(app)\n\n\ndef _setup_error_mail_handler(app):\n\n class ContextualFilter(logging.Filter):\n def filter(self, log_record):\n log_record.url = request.path\n log_record.method = request.method\n log_record.ip = request.environ.get(\"REMOTE_ADDR\")\n log_record.headers = request.headers\n return True\n\n smtp_server = config.get('smtp.server', 'localhost')\n mailhost = tuple(smtp_server.split(':')) \\\n if ':' in smtp_server else smtp_server\n credentials = None\n if config.get('smtp.user'):\n credentials = (config.get('smtp.user'), config.get('smtp.password'))\n secure = () if asbool(config.get('smtp.starttls')) else None\n mail_handler = SMTPHandler(\n mailhost=mailhost,\n fromaddr=config.get('error_email_from'),\n toaddrs=[config.get('email_to')],\n subject='Application Error',\n credentials=credentials,\n secure=secure\n )\n\n mail_handler.setFormatter(logging.Formatter('''\nTime: %(asctime)s\nURL: %(url)s\nMethod: %(method)s\nIP: %(ip)s\nHeaders: %(headers)s\n\n'''))\n\n context_provider = ContextualFilter()\n app.logger.addFilter(context_provider)\n app.logger.addHandler(mail_handler)\n\n\ndef _setup_webassets(app):\n app.use_x_sendfile = toolkit.asbool(\n config.get('ckan.webassets.use_x_sendfile')\n )\n\n webassets_folder = get_webassets_path()\n\n @app.route('/webassets/<path:path>', endpoint='webassets.index')\n def webassets(path):\n return send_from_directory(webassets_folder, path)\n", "path": "ckan/config/middleware/flask_app.py" } ]
[ { "content": "# encoding: utf-8\n\nimport os\nimport sys\nimport re\nimport time\nimport inspect\nimport itertools\nimport pkgutil\n\nfrom flask import Blueprint, send_from_directory\nfrom flask.ctx import _AppCtxGlobals\nfrom flask.sessions import SessionInterface\nfrom flask_multistatic import MultiStaticFlask\n\nimport six\nimport webob\n\nfrom werkzeug.exceptions import default_exceptions, HTTPException\nfrom werkzeug.routing import Rule\n\nfrom flask_babel import Babel\n\nfrom beaker.middleware import SessionMiddleware\nfrom ckan.common import asbool\nfrom repoze.who.config import WhoConfig\nfrom repoze.who.middleware import PluggableAuthenticationMiddleware\n\nimport ckan.model as model\nfrom ckan.lib import base\nfrom ckan.lib import helpers\nfrom ckan.lib import jinja_extensions\nfrom ckan.lib import uploader\nfrom ckan.lib import i18n\nfrom ckan.common import config, g, request, ungettext\nfrom ckan.config.middleware.common_middleware import (TrackingMiddleware,\n HostHeaderMiddleware)\nimport ckan.lib.app_globals as app_globals\nimport ckan.lib.plugins as lib_plugins\nimport ckan.plugins.toolkit as toolkit\nfrom ckan.lib.webassets_tools import get_webassets_path\n\nfrom ckan.plugins import PluginImplementations\nfrom ckan.plugins.interfaces import IBlueprint, IMiddleware, ITranslation\nfrom ckan.views import (identify_user,\n set_cors_headers_for_response,\n check_session_cookie,\n set_controller_and_action,\n set_cache_control_headers_for_response,\n handle_i18n,\n set_ckan_current_url,\n )\n\nimport logging\nfrom logging.handlers import SMTPHandler\nlog = logging.getLogger(__name__)\n\n\nclass I18nMiddleware(object):\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n\n handle_i18n(environ)\n return self.app(environ, start_response)\n\n\nclass RepozeAdapterMiddleware(object):\n \"\"\"When repoze.who interrupts requrests for anonymous user because of\n insufficient permission, it closes requrest stream and make an\n attempt to return response to user as quick as possible. But when\n werkzeug sees POST request with some payload it tries to parse\n request data and it leads to BadRequests(400), because there is no\n way to parse closed request stream. This middlewary just\n reproduces part of internal Fanstatic bevavior: don't drop request\n stream while response is written to the client.\n\n The middleware only requred because of repoze.who and it should be\n removed as soon as PluggableAuthenticationMiddlewary is replaced\n with some alternative solution.\n\n \"\"\"\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n request = webob.Request(environ)\n response = request.get_response(self.app)\n return response(environ, start_response)\n\n\nclass CKANBabel(Babel):\n def __init__(self, *pargs, **kwargs):\n super(CKANBabel, self).__init__(*pargs, **kwargs)\n self._i18n_path_idx = 0\n\n @property\n def domain(self):\n default = super(CKANBabel, self).domain\n multiple = self.app.config.get('BABEL_MULTIPLE_DOMAINS')\n if not multiple:\n return default\n domains = multiple.split(';')\n try:\n return domains[self._i18n_path_idx]\n except IndexError:\n return default\n\n @property\n def translation_directories(self):\n self._i18n_path_idx = 0\n for path in super(CKANBabel, self).translation_directories:\n yield path\n self._i18n_path_idx += 1\n\n\ndef make_flask_stack(conf):\n \"\"\" This has to pass the flask app through all the same middleware that\n Pylons used \"\"\"\n\n root = os.path.dirname(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\n debug = asbool(conf.get('debug', conf.get('DEBUG', False)))\n testing = asbool(conf.get('testing', conf.get('TESTING', False)))\n app = flask_app = CKANFlask(__name__, static_url_path='')\n\n # Register storage for accessing group images, site logo, etc.\n storage_folder = []\n storage = uploader.get_storage_path()\n if storage:\n storage_folder = [os.path.join(storage, 'storage')]\n\n # Static files folders (core and extensions)\n public_folder = config.get(u'ckan.base_public_folder')\n app.static_folder = config.get(\n 'extra_public_paths', ''\n ).split(',') + [os.path.join(root, public_folder)] + storage_folder\n\n app.jinja_options = jinja_extensions.get_jinja_env_options()\n app.jinja_env.policies['ext.i18n.trimmed'] = True\n\n app.debug = debug\n app.testing = testing\n app.template_folder = os.path.join(root, 'templates')\n app.app_ctx_globals_class = CKAN_AppCtxGlobals\n app.url_rule_class = CKAN_Rule\n\n # Update Flask config with the CKAN values. We use the common config\n # object as values might have been modified on `load_environment`\n if config:\n app.config.update(config)\n else:\n app.config.update(conf)\n\n # Do all the Flask-specific stuff before adding other middlewares\n\n # Secret key needed for flask-debug-toolbar and sessions\n if not app.config.get('SECRET_KEY'):\n app.config['SECRET_KEY'] = config.get('beaker.session.secret')\n if not app.config.get('SECRET_KEY'):\n raise RuntimeError(u'You must provide a value for the secret key'\n ' with the SECRET_KEY config option')\n\n root_path = config.get('ckan.root_path', None)\n if debug:\n from flask_debugtoolbar import DebugToolbarExtension\n app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\n debug_ext = DebugToolbarExtension()\n\n # register path that includes `ckan.site_root` before\n # initializing debug app. In such a way, our route receives\n # higher precedence.\n\n # TODO: After removal of Pylons code, switch to\n # `APPLICATION_ROOT` config value for flask application. Right\n # now it's a bad option because we are handling both pylons\n # and flask urls inside helpers and splitting this logic will\n # bring us tons of headache.\n if root_path:\n app.add_url_rule(\n root_path.replace('{{LANG}}', '').rstrip('/') +\n '/_debug_toolbar/static/<path:filename>',\n '_debug_toolbar.static', debug_ext.send_static_file\n )\n debug_ext.init_app(app)\n\n from werkzeug.debug import DebuggedApplication\n app.wsgi_app = DebuggedApplication(app.wsgi_app, True)\n\n # Use Beaker as the Flask session interface\n class BeakerSessionInterface(SessionInterface):\n def open_session(self, app, request):\n if 'beaker.session' in request.environ:\n return request.environ['beaker.session']\n\n def save_session(self, app, session, response):\n session.save()\n\n namespace = 'beaker.session.'\n session_opts = {k.replace('beaker.', ''): v\n for k, v in six.iteritems(config)\n if k.startswith(namespace)}\n if (not session_opts.get('session.data_dir') and\n session_opts.get('session.type', 'file') == 'file'):\n cache_dir = conf.get('cache_dir') or conf.get('cache.dir')\n session_opts['session.data_dir'] = '{data_dir}/sessions'.format(\n data_dir=cache_dir)\n\n app.wsgi_app = SessionMiddleware(app.wsgi_app, session_opts)\n app.session_interface = BeakerSessionInterface()\n\n # Add Jinja2 extensions and filters\n app.jinja_env.filters['empty_and_escape'] = \\\n jinja_extensions.empty_and_escape\n\n # Common handlers for all requests\n app.before_request(ckan_before_request)\n app.after_request(ckan_after_request)\n\n # Template context processors\n app.context_processor(helper_functions)\n app.context_processor(c_object)\n\n @app.context_processor\n def ungettext_alias():\n u'''\n Provide `ungettext` as an alias of `ngettext` for backwards\n compatibility\n '''\n return dict(ungettext=ungettext)\n\n # Babel\n _ckan_i18n_dir = i18n.get_ckan_i18n_dir()\n\n pairs = [\n (_ckan_i18n_dir, u'ckan')\n ] + [\n (p.i18n_directory(), p.i18n_domain())\n for p in reversed(list(PluginImplementations(ITranslation)))\n ]\n\n i18n_dirs, i18n_domains = zip(*pairs)\n\n app.config[u'BABEL_TRANSLATION_DIRECTORIES'] = ';'.join(i18n_dirs)\n app.config[u'BABEL_DOMAIN'] = 'ckan'\n app.config[u'BABEL_MULTIPLE_DOMAINS'] = ';'.join(i18n_domains)\n\n babel = CKANBabel(app)\n\n babel.localeselector(get_locale)\n\n # WebAssets\n _setup_webassets(app)\n\n # Auto-register all blueprints defined in the `views` folder\n _register_core_blueprints(app)\n _register_error_handler(app)\n\n # Set up each IBlueprint extension as a Flask Blueprint\n for plugin in PluginImplementations(IBlueprint):\n if hasattr(plugin, 'get_blueprint'):\n plugin_blueprints = plugin.get_blueprint()\n if not isinstance(plugin_blueprints, list):\n plugin_blueprints = [plugin_blueprints]\n for blueprint in plugin_blueprints:\n app.register_extension_blueprint(blueprint)\n\n lib_plugins.register_package_blueprints(app)\n lib_plugins.register_group_blueprints(app)\n\n # Set flask routes in named_routes\n # TODO: refactor whatever helper is using this to not do it\n if 'routes.named_routes' not in config:\n config['routes.named_routes'] = {}\n for rule in app.url_map.iter_rules():\n if '.' not in rule.endpoint:\n continue\n controller, action = rule.endpoint.split('.')\n needed = list(rule.arguments - set(rule.defaults or {}))\n route = {\n rule.endpoint: {\n 'action': action,\n 'controller': controller,\n 'highlight_actions': action,\n 'needed': needed\n }\n }\n config['routes.named_routes'].update(route)\n\n # Start other middleware\n for plugin in PluginImplementations(IMiddleware):\n app = plugin.make_middleware(app, config)\n\n for plugin in PluginImplementations(IMiddleware):\n try:\n app = plugin.make_error_log_middleware(app, config)\n except AttributeError:\n log.critical('Middleware class {0} is missing the method'\n 'make_error_log_middleware.'\n .format(plugin.__class__.__name__))\n\n # Initialize repoze.who\n who_parser = WhoConfig(conf['here'])\n who_parser.parse(open(conf['who.config_file']))\n\n app = PluggableAuthenticationMiddleware(\n RepozeAdapterMiddleware(app),\n who_parser.identifiers,\n who_parser.authenticators,\n who_parser.challengers,\n who_parser.mdproviders,\n who_parser.request_classifier,\n who_parser.challenge_decider,\n logging.getLogger('repoze.who'),\n logging.WARN, # ignored\n who_parser.remote_user_key\n )\n\n # Update the main CKAN config object with the Flask specific keys\n # that were set here or autogenerated\n flask_config_keys = set(flask_app.config.keys()) - set(config.keys())\n for key in flask_config_keys:\n config[key] = flask_app.config[key]\n\n # Prevent the host from request to be added to the new header location.\n app = HostHeaderMiddleware(app)\n if six.PY3:\n app = I18nMiddleware(app)\n\n if asbool(config.get('ckan.tracking_enabled', 'false')):\n app = TrackingMiddleware(app, config)\n\n # Add a reference to the actual Flask app so it's easier to access\n app._wsgi_app = flask_app\n\n return app\n\n\ndef get_locale():\n u'''\n Return the value of the `CKAN_LANG` key of the WSGI environ,\n set by the I18nMiddleware based on the URL.\n If no value is defined, it defaults to `ckan.locale_default` or `en`.\n '''\n return request.environ.get(\n u'CKAN_LANG',\n config.get(u'ckan.locale_default', u'en'))\n\n\ndef ckan_before_request():\n u'''\n Common handler executed before all Flask requests\n\n If a response is returned by any of the functions called (\n currently ``identify_user()` only) any further processing of the\n request will be stopped and that response will be returned.\n\n '''\n response = None\n\n # Update app_globals\n app_globals.app_globals._check_uptodate()\n\n # Identify the user from the repoze cookie or the API header\n # Sets g.user and g.userobj\n response = identify_user()\n\n # Provide g.controller and g.action for backward compatibility\n # with extensions\n set_controller_and_action()\n\n set_ckan_current_url(request.environ)\n g.__timer = time.time()\n\n return response\n\n\ndef ckan_after_request(response):\n u'''Common handler executed after all Flask requests'''\n\n # Dispose of the SQLALchemy session\n model.Session.remove()\n\n # Check session cookie\n response = check_session_cookie(response)\n\n # Set CORS headers if necessary\n response = set_cors_headers_for_response(response)\n\n # Set Cache Control headers\n response = set_cache_control_headers_for_response(response)\n\n r_time = time.time() - g.__timer\n url = request.environ['PATH_INFO']\n\n log.info(' %s render time %.3f seconds' % (url, r_time))\n\n return response\n\n\ndef helper_functions():\n u'''Make helper functions (`h`) available to Flask templates'''\n if not helpers.helper_functions:\n helpers.load_plugin_helpers()\n return dict(h=helpers.helper_functions)\n\n\ndef c_object():\n u'''\n Expose `c` as an alias of `g` in templates for backwards compatibility\n '''\n return dict(c=g)\n\n\nclass CKAN_Rule(Rule):\n\n u'''Custom Flask url_rule_class.\n\n We use it to be able to flag routes defined in extensions as such\n '''\n\n def __init__(self, *args, **kwargs):\n self.ckan_core = True\n super(CKAN_Rule, self).__init__(*args, **kwargs)\n\n\nclass CKAN_AppCtxGlobals(_AppCtxGlobals):\n\n '''Custom Flask AppCtxGlobal class (flask.g).'''\n\n def __getattr__(self, name):\n '''\n If flask.g doesn't have attribute `name`, fall back to CKAN's\n app_globals object.\n If the key is also not found in there, an AttributeError will be raised\n '''\n return getattr(app_globals.app_globals, name)\n\n\nclass CKANFlask(MultiStaticFlask):\n\n '''Extend the Flask class with a special method called on incoming\n requests by AskAppDispatcherMiddleware.\n '''\n\n app_name = 'flask_app'\n\n def can_handle_request(self, environ):\n '''\n Decides whether it can handle a request with the Flask app by\n matching the request environ against the route mapper\n\n Returns (True, 'flask_app', origin) if this is the case.\n\n `origin` can be either 'core' or 'extension' depending on where\n the route was defined.\n '''\n urls = self.url_map.bind_to_environ(environ)\n\n try:\n rule, args = urls.match(return_rule=True)\n origin = 'core'\n if hasattr(rule, 'ckan_core') and not rule.ckan_core:\n origin = 'extension'\n log.debug('Flask route match, endpoint: {0}, args: {1}, '\n 'origin: {2}'.format(rule.endpoint, args, origin))\n\n # Disable built-in flask's ability to prepend site root to\n # generated url, as we are going to use locale and existing\n # logic is not flexible enough for this purpose\n environ['SCRIPT_NAME'] = ''\n\n return (True, self.app_name, origin)\n except HTTPException:\n return (False, self.app_name)\n\n def register_extension_blueprint(self, blueprint, **kwargs):\n '''\n This method should be used to register blueprints that come from\n extensions, so there's an opportunity to add extension-specific\n options.\n\n Sets the rule property `ckan_core` to False, to indicate that the rule\n applies to an extension route.\n '''\n self.register_blueprint(blueprint, **kwargs)\n\n # Get the new blueprint rules\n bp_rules = itertools.chain.from_iterable(\n v for k, v in six.iteritems(self.url_map._rules_by_endpoint)\n if k.startswith(u'{0}.'.format(blueprint.name))\n )\n\n # This compare key will ensure the rule will be near the top.\n top_compare_key = False, -100, [(-2, 0)]\n for r in bp_rules:\n r.ckan_core = False\n r.match_compare_key = lambda: top_compare_key\n\n\ndef _register_core_blueprints(app):\n u'''Register all blueprints defined in the `views` folder\n '''\n def is_blueprint(mm):\n return isinstance(mm, Blueprint)\n\n path = os.path.join(os.path.dirname(__file__), '..', '..', 'views')\n\n for loader, name, _ in pkgutil.iter_modules([path], 'ckan.views.'):\n module = loader.find_module(name).load_module(name)\n for blueprint in inspect.getmembers(module, is_blueprint):\n app.register_blueprint(blueprint[1])\n log.debug(u'Registered core blueprint: {0!r}'.format(blueprint[0]))\n\n\ndef _register_error_handler(app):\n u'''Register error handler'''\n\n def error_handler(e):\n log.error(e, exc_info=sys.exc_info)\n if isinstance(e, HTTPException):\n extra_vars = {\n u'code': e.code,\n u'content': e.description,\n u'name': e.name\n }\n\n return base.render(\n u'error_document_template.html', extra_vars), e.code\n extra_vars = {u'code': [500], u'content': u'Internal server error'}\n return base.render(u'error_document_template.html', extra_vars), 500\n\n for code in default_exceptions:\n app.register_error_handler(code, error_handler)\n if not app.debug and not app.testing:\n app.register_error_handler(Exception, error_handler)\n if config.get('email_to'):\n _setup_error_mail_handler(app)\n\n\ndef _setup_error_mail_handler(app):\n\n class ContextualFilter(logging.Filter):\n def filter(self, log_record):\n log_record.url = request.path\n log_record.method = request.method\n log_record.ip = request.environ.get(\"REMOTE_ADDR\")\n log_record.headers = request.headers\n return True\n\n smtp_server = config.get('smtp.server', 'localhost')\n mailhost = tuple(smtp_server.split(':')) \\\n if ':' in smtp_server else smtp_server\n credentials = None\n if config.get('smtp.user'):\n credentials = (config.get('smtp.user'), config.get('smtp.password'))\n secure = () if asbool(config.get('smtp.starttls')) else None\n mail_handler = SMTPHandler(\n mailhost=mailhost,\n fromaddr=config.get('error_email_from'),\n toaddrs=[config.get('email_to')],\n subject='Application Error',\n credentials=credentials,\n secure=secure\n )\n\n mail_handler.setFormatter(logging.Formatter('''\nTime: %(asctime)s\nURL: %(url)s\nMethod: %(method)s\nIP: %(ip)s\nHeaders: %(headers)s\n\n'''))\n\n context_provider = ContextualFilter()\n app.logger.addFilter(context_provider)\n app.logger.addHandler(mail_handler)\n\n\ndef _setup_webassets(app):\n app.use_x_sendfile = toolkit.asbool(\n config.get('ckan.webassets.use_x_sendfile')\n )\n\n webassets_folder = get_webassets_path()\n\n @app.route('/webassets/<path:path>', endpoint='webassets.index')\n def webassets(path):\n return send_from_directory(webassets_folder, path)\n", "path": "ckan/config/middleware/flask_app.py" } ]
diff --git a/ckan/config/middleware/flask_app.py b/ckan/config/middleware/flask_app.py index 2f15bcfe271..abc82b7cf9d 100644 --- a/ckan/config/middleware/flask_app.py +++ b/ckan/config/middleware/flask_app.py @@ -238,7 +238,7 @@ def ungettext_alias(): (_ckan_i18n_dir, u'ckan') ] + [ (p.i18n_directory(), p.i18n_domain()) - for p in PluginImplementations(ITranslation) + for p in reversed(list(PluginImplementations(ITranslation))) ] i18n_dirs, i18n_domains = zip(*pairs)
dask__dask-3076
Slicing list/array issue Someone reported this slicing issue da.zeros(5,chunks=5)[:,None][:,[0]*2].compute() leads to: ``` Traceback (most recent call last): File "<ipython-input-154-e9a840dad4ae>", line 1, in <module> da.zeros(5,chunks=5)[:,None][:,[0]*2].compute() File "/dask/dask/base.py", line 135, in compute (result,) = compute(self, traverse=False, **kwargs) File "/dask/dask/base.py", line 329, in compute dsk = collections_to_dsk(variables, optimize_graph, **kwargs) File "/dask/dask/base.py", line 240, in collections_to_dsk for opt, (dsk, keys) in groups.items())) File "/dask/dask/base.py", line 240, in <genexpr> for opt, (dsk, keys) in groups.items())) File "/dask/dask/array/optimization.py", line 46, in optimize dsk5 = optimize_slices(dsk4) File "/dask/dask/array/optimization.py", line 134, in optimize_slices c_index = fuse_slice(b_index, a_index) File "/dask/dask/array/optimization.py", line 285, in fuse_slice result.append(fuse_slice(a[i], b[j])) # Common case File "/dask/dask/array/optimization.py", line 223, in fuse_slice if a is None and b == slice(None, None): ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all() ```
[ { "content": "from __future__ import absolute_import, division, print_function\n\nfrom operator import getitem\n\nimport numpy as np\n\nfrom .core import getter, getter_nofancy, getter_inline\nfrom ..compatibility import zip_longest\nfrom ..core import flatten, reverse_dict\nfrom ..optimize import cull, fuse, inline_functions\nfrom ..utils import ensure_dict\n\n\n# All get* functions the optimizations know about\nGETTERS = (getter, getter_nofancy, getter_inline, getitem)\n# These get* functions aren't ever completely removed from the graph,\n# even if the index should be a no-op by numpy semantics. Some array-like's\n# don't completely follow semantics, making indexing always necessary.\nGETNOREMOVE = (getter, getter_nofancy)\n\n\ndef optimize(dsk, keys, fuse_keys=None, fast_functions=None,\n inline_functions_fast_functions=(getter_inline,), rename_fused_keys=True,\n **kwargs):\n \"\"\" Optimize dask for array computation\n\n 1. Cull tasks not necessary to evaluate keys\n 2. Remove full slicing, e.g. x[:]\n 3. Inline fast functions like getitem and np.transpose\n \"\"\"\n dsk = ensure_dict(dsk)\n keys = list(flatten(keys))\n if fast_functions is not None:\n inline_functions_fast_functions = fast_functions\n\n dsk2, dependencies = cull(dsk, keys)\n hold = hold_keys(dsk2, dependencies)\n\n dsk3, dependencies = fuse(dsk2, hold + keys + (fuse_keys or []),\n dependencies, rename_keys=rename_fused_keys)\n if inline_functions_fast_functions:\n dsk4 = inline_functions(dsk3, keys, dependencies=dependencies,\n fast_functions=inline_functions_fast_functions)\n else:\n dsk4 = dsk3\n dsk5 = optimize_slices(dsk4)\n\n return dsk5\n\n\ndef hold_keys(dsk, dependencies):\n \"\"\" Find keys to avoid fusion\n\n We don't want to fuse data present in the graph because it is easier to\n serialize as a raw value.\n\n We don't want to fuse chains after getitem/GETTERS because we want to\n move around only small pieces of data, rather than the underlying arrays.\n \"\"\"\n dependents = reverse_dict(dependencies)\n data = {k for k, v in dsk.items() if type(v) not in (tuple, str)}\n\n hold_keys = list(data)\n for dat in data:\n deps = dependents[dat]\n for dep in deps:\n task = dsk[dep]\n # If the task is a get* function, we walk up the chain, and stop\n # when there's either more than one dependent, or the dependent is\n # no longer a get* function or an alias. We then add the final\n # key to the list of keys not to fuse.\n if type(task) is tuple and task and task[0] in GETTERS:\n try:\n while len(dependents[dep]) == 1:\n new_dep = next(iter(dependents[dep]))\n new_task = dsk[new_dep]\n # If the task is a get* or an alias, continue up the\n # linear chain\n if new_task[0] in GETTERS or new_task in dsk:\n dep = new_dep\n else:\n break\n except (IndexError, TypeError):\n pass\n hold_keys.append(dep)\n return hold_keys\n\n\ndef optimize_slices(dsk):\n \"\"\" Optimize slices\n\n 1. Fuse repeated slices, like x[5:][2:6] -> x[7:11]\n 2. Remove full slices, like x[:] -> x\n\n See also:\n fuse_slice_dict\n \"\"\"\n fancy_ind_types = (list, np.ndarray)\n dsk = dsk.copy()\n for k, v in dsk.items():\n if type(v) is tuple and v[0] in GETTERS and len(v) in (3, 5):\n if len(v) == 3:\n get, a, a_index = v\n # getter defaults to asarray=True, getitem is semantically False\n a_asarray = get is not getitem\n a_lock = None\n else:\n get, a, a_index, a_asarray, a_lock = v\n while type(a) is tuple and a[0] in GETTERS and len(a) in (3, 5):\n if len(a) == 3:\n f2, b, b_index = a\n b_asarray = f2 is not getitem\n b_lock = None\n else:\n f2, b, b_index, b_asarray, b_lock = a\n\n if a_lock and a_lock is not b_lock:\n break\n if (type(a_index) is tuple) != (type(b_index) is tuple):\n break\n if type(a_index) is tuple:\n indices = b_index + a_index\n if (len(a_index) != len(b_index) and\n any(i is None for i in indices)):\n break\n if (f2 is getter_nofancy and\n any(isinstance(i, fancy_ind_types) for i in indices)):\n break\n elif (f2 is getter_nofancy and\n (type(a_index) in fancy_ind_types or\n type(b_index) in fancy_ind_types)):\n break\n try:\n c_index = fuse_slice(b_index, a_index)\n # rely on fact that nested gets never decrease in\n # strictness e.g. `(getter_nofancy, (getter, ...))` never\n # happens\n get = getter if f2 is getter_inline else f2\n except NotImplementedError:\n break\n a, a_index, a_lock = b, c_index, b_lock\n a_asarray |= b_asarray\n\n # Skip the get call if not from from_array and nothing to do\n if (get not in GETNOREMOVE and\n ((type(a_index) is slice and not a_index.start and\n a_index.stop is None and a_index.step is None) or\n (type(a_index) is tuple and\n all(type(s) is slice and not s.start and s.stop is None and\n s.step is None for s in a_index)))):\n dsk[k] = a\n elif get is getitem or (a_asarray and not a_lock):\n # default settings are fine, drop the extra parameters Since we\n # always fallback to inner `get` functions, `get is getitem`\n # can only occur if all gets are getitem, meaning all\n # parameters must be getitem defaults.\n dsk[k] = (get, a, a_index)\n else:\n dsk[k] = (get, a, a_index, a_asarray, a_lock)\n\n return dsk\n\n\ndef normalize_slice(s):\n \"\"\" Replace Nones in slices with integers\n\n >>> normalize_slice(slice(None, None, None))\n slice(0, None, 1)\n \"\"\"\n start, stop, step = s.start, s.stop, s.step\n if start is None:\n start = 0\n if step is None:\n step = 1\n if start < 0 or step < 0 or stop is not None and stop < 0:\n raise NotImplementedError()\n return slice(start, stop, step)\n\n\ndef check_for_nonfusible_fancy_indexing(fancy, normal):\n # Check for fancy indexing and normal indexing, where the fancy\n # indexed dimensions != normal indexed dimensions with integers. E.g.:\n # disallow things like:\n # x[:, [1, 2], :][0, :, :] -> x[0, [1, 2], :] or\n # x[0, :, :][:, [1, 2], :] -> x[0, [1, 2], :]\n for f, n in zip_longest(fancy, normal, fillvalue=slice(None)):\n if type(f) is not list and isinstance(n, int):\n raise NotImplementedError(\"Can't handle normal indexing with \"\n \"integers and fancy indexing if the \"\n \"integers and fancy indices don't \"\n \"align with the same dimensions.\")\n\n\ndef fuse_slice(a, b):\n \"\"\" Fuse stacked slices together\n\n Fuse a pair of repeated slices into a single slice:\n\n >>> fuse_slice(slice(1000, 2000), slice(10, 15))\n slice(1010, 1015, None)\n\n This also works for tuples of slices\n\n >>> fuse_slice((slice(100, 200), slice(100, 200, 10)),\n ... (slice(10, 15), [5, 2]))\n (slice(110, 115, None), [150, 120])\n\n And a variety of other interesting cases\n\n >>> fuse_slice(slice(1000, 2000), 10) # integers\n 1010\n\n >>> fuse_slice(slice(1000, 2000, 5), slice(10, 20, 2))\n slice(1050, 1100, 10)\n\n >>> fuse_slice(slice(1000, 2000, 5), [1, 2, 3]) # lists\n [1005, 1010, 1015]\n\n >>> fuse_slice(None, slice(None, None)) # doctest: +SKIP\n None\n \"\"\"\n # None only works if the second side is a full slice\n if a is None and b == slice(None, None):\n return None\n\n # Replace None with 0 and one in start and step\n if isinstance(a, slice):\n a = normalize_slice(a)\n if isinstance(b, slice):\n b = normalize_slice(b)\n\n if isinstance(a, slice) and isinstance(b, int):\n if b < 0:\n raise NotImplementedError()\n return a.start + b * a.step\n\n if isinstance(a, slice) and isinstance(b, slice):\n start = a.start + a.step * b.start\n if b.stop is not None:\n stop = a.start + a.step * b.stop\n else:\n stop = None\n if a.stop is not None:\n if stop is not None:\n stop = min(a.stop, stop)\n else:\n stop = a.stop\n step = a.step * b.step\n if step == 1:\n step = None\n return slice(start, stop, step)\n\n if isinstance(b, list):\n return [fuse_slice(a, bb) for bb in b]\n if isinstance(a, list) and isinstance(b, (int, slice)):\n return a[b]\n\n if isinstance(a, tuple) and not isinstance(b, tuple):\n b = (b,)\n\n # If given two tuples walk through both, being mindful of uneven sizes\n # and newaxes\n if isinstance(a, tuple) and isinstance(b, tuple):\n\n # Check for non-fusible cases with fancy-indexing\n a_has_lists = any(isinstance(item, list) for item in a)\n b_has_lists = any(isinstance(item, list) for item in b)\n if a_has_lists and b_has_lists:\n raise NotImplementedError(\"Can't handle multiple list indexing\")\n elif a_has_lists:\n check_for_nonfusible_fancy_indexing(a, b)\n elif b_has_lists:\n check_for_nonfusible_fancy_indexing(b, a)\n\n j = 0\n result = list()\n for i in range(len(a)):\n # axis ceased to exist or we're out of b\n if isinstance(a[i], int) or j == len(b):\n result.append(a[i])\n continue\n while b[j] is None: # insert any Nones on the rhs\n result.append(None)\n j += 1\n result.append(fuse_slice(a[i], b[j])) # Common case\n j += 1\n while j < len(b): # anything leftover on the right?\n result.append(b[j])\n j += 1\n return tuple(result)\n raise NotImplementedError()\n", "path": "dask/array/optimization.py" } ]
[ { "content": "from __future__ import absolute_import, division, print_function\n\nfrom operator import getitem\n\nimport numpy as np\n\nfrom .core import getter, getter_nofancy, getter_inline\nfrom ..compatibility import zip_longest\nfrom ..core import flatten, reverse_dict\nfrom ..optimize import cull, fuse, inline_functions\nfrom ..utils import ensure_dict\n\n\n# All get* functions the optimizations know about\nGETTERS = (getter, getter_nofancy, getter_inline, getitem)\n# These get* functions aren't ever completely removed from the graph,\n# even if the index should be a no-op by numpy semantics. Some array-like's\n# don't completely follow semantics, making indexing always necessary.\nGETNOREMOVE = (getter, getter_nofancy)\n\n\ndef optimize(dsk, keys, fuse_keys=None, fast_functions=None,\n inline_functions_fast_functions=(getter_inline,), rename_fused_keys=True,\n **kwargs):\n \"\"\" Optimize dask for array computation\n\n 1. Cull tasks not necessary to evaluate keys\n 2. Remove full slicing, e.g. x[:]\n 3. Inline fast functions like getitem and np.transpose\n \"\"\"\n dsk = ensure_dict(dsk)\n keys = list(flatten(keys))\n if fast_functions is not None:\n inline_functions_fast_functions = fast_functions\n\n dsk2, dependencies = cull(dsk, keys)\n hold = hold_keys(dsk2, dependencies)\n\n dsk3, dependencies = fuse(dsk2, hold + keys + (fuse_keys or []),\n dependencies, rename_keys=rename_fused_keys)\n if inline_functions_fast_functions:\n dsk4 = inline_functions(dsk3, keys, dependencies=dependencies,\n fast_functions=inline_functions_fast_functions)\n else:\n dsk4 = dsk3\n dsk5 = optimize_slices(dsk4)\n\n return dsk5\n\n\ndef hold_keys(dsk, dependencies):\n \"\"\" Find keys to avoid fusion\n\n We don't want to fuse data present in the graph because it is easier to\n serialize as a raw value.\n\n We don't want to fuse chains after getitem/GETTERS because we want to\n move around only small pieces of data, rather than the underlying arrays.\n \"\"\"\n dependents = reverse_dict(dependencies)\n data = {k for k, v in dsk.items() if type(v) not in (tuple, str)}\n\n hold_keys = list(data)\n for dat in data:\n deps = dependents[dat]\n for dep in deps:\n task = dsk[dep]\n # If the task is a get* function, we walk up the chain, and stop\n # when there's either more than one dependent, or the dependent is\n # no longer a get* function or an alias. We then add the final\n # key to the list of keys not to fuse.\n if type(task) is tuple and task and task[0] in GETTERS:\n try:\n while len(dependents[dep]) == 1:\n new_dep = next(iter(dependents[dep]))\n new_task = dsk[new_dep]\n # If the task is a get* or an alias, continue up the\n # linear chain\n if new_task[0] in GETTERS or new_task in dsk:\n dep = new_dep\n else:\n break\n except (IndexError, TypeError):\n pass\n hold_keys.append(dep)\n return hold_keys\n\n\ndef optimize_slices(dsk):\n \"\"\" Optimize slices\n\n 1. Fuse repeated slices, like x[5:][2:6] -> x[7:11]\n 2. Remove full slices, like x[:] -> x\n\n See also:\n fuse_slice_dict\n \"\"\"\n fancy_ind_types = (list, np.ndarray)\n dsk = dsk.copy()\n for k, v in dsk.items():\n if type(v) is tuple and v[0] in GETTERS and len(v) in (3, 5):\n if len(v) == 3:\n get, a, a_index = v\n # getter defaults to asarray=True, getitem is semantically False\n a_asarray = get is not getitem\n a_lock = None\n else:\n get, a, a_index, a_asarray, a_lock = v\n while type(a) is tuple and a[0] in GETTERS and len(a) in (3, 5):\n if len(a) == 3:\n f2, b, b_index = a\n b_asarray = f2 is not getitem\n b_lock = None\n else:\n f2, b, b_index, b_asarray, b_lock = a\n\n if a_lock and a_lock is not b_lock:\n break\n if (type(a_index) is tuple) != (type(b_index) is tuple):\n break\n if type(a_index) is tuple:\n indices = b_index + a_index\n if (len(a_index) != len(b_index) and\n any(i is None for i in indices)):\n break\n if (f2 is getter_nofancy and\n any(isinstance(i, fancy_ind_types) for i in indices)):\n break\n elif (f2 is getter_nofancy and\n (type(a_index) in fancy_ind_types or\n type(b_index) in fancy_ind_types)):\n break\n try:\n c_index = fuse_slice(b_index, a_index)\n # rely on fact that nested gets never decrease in\n # strictness e.g. `(getter_nofancy, (getter, ...))` never\n # happens\n get = getter if f2 is getter_inline else f2\n except NotImplementedError:\n break\n a, a_index, a_lock = b, c_index, b_lock\n a_asarray |= b_asarray\n\n # Skip the get call if not from from_array and nothing to do\n if (get not in GETNOREMOVE and\n ((type(a_index) is slice and not a_index.start and\n a_index.stop is None and a_index.step is None) or\n (type(a_index) is tuple and\n all(type(s) is slice and not s.start and s.stop is None and\n s.step is None for s in a_index)))):\n dsk[k] = a\n elif get is getitem or (a_asarray and not a_lock):\n # default settings are fine, drop the extra parameters Since we\n # always fallback to inner `get` functions, `get is getitem`\n # can only occur if all gets are getitem, meaning all\n # parameters must be getitem defaults.\n dsk[k] = (get, a, a_index)\n else:\n dsk[k] = (get, a, a_index, a_asarray, a_lock)\n\n return dsk\n\n\ndef normalize_slice(s):\n \"\"\" Replace Nones in slices with integers\n\n >>> normalize_slice(slice(None, None, None))\n slice(0, None, 1)\n \"\"\"\n start, stop, step = s.start, s.stop, s.step\n if start is None:\n start = 0\n if step is None:\n step = 1\n if start < 0 or step < 0 or stop is not None and stop < 0:\n raise NotImplementedError()\n return slice(start, stop, step)\n\n\ndef check_for_nonfusible_fancy_indexing(fancy, normal):\n # Check for fancy indexing and normal indexing, where the fancy\n # indexed dimensions != normal indexed dimensions with integers. E.g.:\n # disallow things like:\n # x[:, [1, 2], :][0, :, :] -> x[0, [1, 2], :] or\n # x[0, :, :][:, [1, 2], :] -> x[0, [1, 2], :]\n for f, n in zip_longest(fancy, normal, fillvalue=slice(None)):\n if type(f) is not list and isinstance(n, int):\n raise NotImplementedError(\"Can't handle normal indexing with \"\n \"integers and fancy indexing if the \"\n \"integers and fancy indices don't \"\n \"align with the same dimensions.\")\n\n\ndef fuse_slice(a, b):\n \"\"\" Fuse stacked slices together\n\n Fuse a pair of repeated slices into a single slice:\n\n >>> fuse_slice(slice(1000, 2000), slice(10, 15))\n slice(1010, 1015, None)\n\n This also works for tuples of slices\n\n >>> fuse_slice((slice(100, 200), slice(100, 200, 10)),\n ... (slice(10, 15), [5, 2]))\n (slice(110, 115, None), [150, 120])\n\n And a variety of other interesting cases\n\n >>> fuse_slice(slice(1000, 2000), 10) # integers\n 1010\n\n >>> fuse_slice(slice(1000, 2000, 5), slice(10, 20, 2))\n slice(1050, 1100, 10)\n\n >>> fuse_slice(slice(1000, 2000, 5), [1, 2, 3]) # lists\n [1005, 1010, 1015]\n\n >>> fuse_slice(None, slice(None, None)) # doctest: +SKIP\n None\n \"\"\"\n # None only works if the second side is a full slice\n if a is None and isinstance(b, slice) and b == slice(None, None):\n return None\n\n # Replace None with 0 and one in start and step\n if isinstance(a, slice):\n a = normalize_slice(a)\n if isinstance(b, slice):\n b = normalize_slice(b)\n\n if isinstance(a, slice) and isinstance(b, int):\n if b < 0:\n raise NotImplementedError()\n return a.start + b * a.step\n\n if isinstance(a, slice) and isinstance(b, slice):\n start = a.start + a.step * b.start\n if b.stop is not None:\n stop = a.start + a.step * b.stop\n else:\n stop = None\n if a.stop is not None:\n if stop is not None:\n stop = min(a.stop, stop)\n else:\n stop = a.stop\n step = a.step * b.step\n if step == 1:\n step = None\n return slice(start, stop, step)\n\n if isinstance(b, list):\n return [fuse_slice(a, bb) for bb in b]\n if isinstance(a, list) and isinstance(b, (int, slice)):\n return a[b]\n\n if isinstance(a, tuple) and not isinstance(b, tuple):\n b = (b,)\n\n # If given two tuples walk through both, being mindful of uneven sizes\n # and newaxes\n if isinstance(a, tuple) and isinstance(b, tuple):\n\n # Check for non-fusible cases with fancy-indexing\n a_has_lists = any(isinstance(item, list) for item in a)\n b_has_lists = any(isinstance(item, list) for item in b)\n if a_has_lists and b_has_lists:\n raise NotImplementedError(\"Can't handle multiple list indexing\")\n elif a_has_lists:\n check_for_nonfusible_fancy_indexing(a, b)\n elif b_has_lists:\n check_for_nonfusible_fancy_indexing(b, a)\n\n j = 0\n result = list()\n for i in range(len(a)):\n # axis ceased to exist or we're out of b\n if isinstance(a[i], int) or j == len(b):\n result.append(a[i])\n continue\n while b[j] is None: # insert any Nones on the rhs\n result.append(None)\n j += 1\n result.append(fuse_slice(a[i], b[j])) # Common case\n j += 1\n while j < len(b): # anything leftover on the right?\n result.append(b[j])\n j += 1\n return tuple(result)\n raise NotImplementedError()\n", "path": "dask/array/optimization.py" } ]
diff --git a/dask/array/optimization.py b/dask/array/optimization.py index bf49a32c94b..06d11ed1fa8 100644 --- a/dask/array/optimization.py +++ b/dask/array/optimization.py @@ -220,7 +220,7 @@ def fuse_slice(a, b): None """ # None only works if the second side is a full slice - if a is None and b == slice(None, None): + if a is None and isinstance(b, slice) and b == slice(None, None): return None # Replace None with 0 and one in start and step diff --git a/dask/array/tests/test_optimization.py b/dask/array/tests/test_optimization.py index 3e43c5ba176..6c203ca0873 100644 --- a/dask/array/tests/test_optimization.py +++ b/dask/array/tests/test_optimization.py @@ -139,6 +139,9 @@ def test_fuse_slice(): with pytest.raises(NotImplementedError): fuse_slice(slice(10, 15, 2), -1) + # Regression test for #3076 + with pytest.raises(NotImplementedError): + fuse_slice(None, np.array([0, 0])) def test_fuse_slice_with_lists(): diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst index f2014106dfb..0f9c0a3b2e6 100644 --- a/docs/source/changelog.rst +++ b/docs/source/changelog.rst @@ -10,6 +10,7 @@ Array - Update error handling when len is called with empty chunks (:issue:`3058`) `Xander Johnson`_ - Fixes a metadata bug with ``store``'s ``return_stored`` option (:pr:`3064`) `John A Kirkham`_ +- Fix a bug in ``optimization.fuse_slice`` to properly handle when first input is ``None`` (:pr:`3076`) `James Bourbeau`_ DataFrame +++++++++
airctic__icevision-733
Better Record __repr__ to show ClassMap when it is stored internally ## 🚀 Feature **Is your feature request related to a problem? Please describe.** More informative to show `class_map` when a `Record` object is storing it Take for example a record loaded from the main repo: ```python from icevision.all import * data_dir = Path("~/icevision/samples/") class_map = icedata.coco.class_map() parser = parsers.COCOMaskParser(annotations_filepath=data_dir/'annotations.json', img_dir=data_dir/'images') records = parser.parse(data_splitter=SingleSplitSplitter())[0] record = records[0] print(record) ## Output: BaseRecord common: - Filepath: /Users/rahulsomani/git/icevision-orig/samples/images/000000343934.jpg - Image: None - Image size ImgSize(width=640, height=480) - Image ID: 0 detection: - Masks: <EncodedRLEs with 1 objects> - Labels: [4] - Areas: [43522.80595] - BBoxes: [<BBox (xmin:175.14, ymin:175.68, xmax:496.21999999999997, ymax:415.68)>] - Is Crowds: [0] ``` This record internally has access to the `class_map` via `record.detection.class_map`, which is great, but not known when you print the record. Additionally, if you print `record.components`, you get: ```python {<icevision.core.record_components.AreasRecordComponent at 0x7fbb5b54a4d0>, <icevision.core.record_components.BBoxesRecordComponent at 0x7fbb5b54acd0>, <icevision.core.record_components.FilepathRecordComponent at 0x7fbb5b54a690>, <icevision.core.record_components.InstancesLabelsRecordComponent at 0x7fbb5b54a7d0>, <icevision.core.record_components.IsCrowdsRecordComponent at 0x7fbb5b54ad90>, <icevision.core.record_components.MasksRecordComponent at 0x7fbb5b54a150>, <icevision.core.record_components.RecordIDRecordComponent at 0x7fbb5b54a9d0>, <icevision.core.record_components.SizeRecordComponent at 0x7fbb5b54a810>} ``` I'd have expected `ClassMapRecordComponent` to be in there as well?
[ { "content": "__all__ = [\n \"RecordComponent\",\n \"ClassMapRecordComponent\",\n \"RecordIDRecordComponent\",\n \"ImageRecordComponent\",\n \"FilepathRecordComponent\",\n \"SizeRecordComponent\",\n \"BaseLabelsRecordComponent\",\n \"InstancesLabelsRecordComponent\",\n \"ClassificationLabelsRecordComponent\",\n \"BBoxesRecordComponent\",\n \"MasksRecordComponent\",\n \"AreasRecordComponent\",\n \"IsCrowdsRecordComponent\",\n \"KeyPointsRecordComponent\",\n \"ScoresRecordComponent\",\n \"LossesRecordComponent\",\n]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\nfrom icevision.core.components import *\nfrom icevision.core.bbox import *\nfrom icevision.core.mask import *\nfrom icevision.core.exceptions import *\nfrom icevision.core.keypoints import *\nfrom icevision.core.class_map import *\nfrom icevision.core import tasks\n\n\nclass RecordComponent(TaskComponent):\n # TODO: as_dict is only necessary because of backwards compatibility\n @property\n def record(self):\n return self.composite\n\n def as_dict(self) -> dict:\n return {}\n\n def _load(self) -> None:\n return\n\n def _unload(self) -> None:\n return\n\n def _num_annotations(self) -> Dict[str, int]:\n return {}\n\n def _autofix(self) -> Dict[str, bool]:\n return {}\n\n def _remove_annotation(self, i) -> None:\n return\n\n def _aggregate_objects(self) -> Dict[str, List[dict]]:\n return {}\n\n def _repr(self) -> List[str]:\n return []\n\n def builder_template(self) -> List[str]:\n return self._format_builder_template(self._builder_template())\n\n def _builder_template(self) -> List[str]:\n return []\n\n def _format_builder_template(self, lines):\n task = f\".{self.task.name}.\" if self.task != tasks.common else \".\"\n return [line.format(task=task) for line in lines]\n\n def setup_transform(self, tfm) -> None:\n pass\n\n\nclass ClassMapRecordComponent(RecordComponent):\n def __init__(self, task):\n super().__init__(task=task)\n self.class_map = None\n\n def set_class_map(self, class_map: ClassMap):\n self.class_map = class_map\n\n def as_dict(self) -> dict:\n return {\"class_map\": self.class_map}\n\n def _builder_template(self) -> List[str]:\n return [\"record{task}set_class_map(<ClassMap>)\"]\n\n\nclass RecordIDRecordComponent(RecordComponent):\n def __init__(self, task=tasks.common):\n super().__init__(task=task)\n self.record_id = None\n\n def set_record_id(self, record_id: int):\n self.record_id = record_id\n\n def _repr(self) -> List[str]:\n return [f\"Image ID: {self.record_id}\"]\n\n def as_dict(self) -> dict:\n return {\"record_id\": self.record_id}\n\n\n# TODO: we need a way to combine filepath and image mixin\n# TODO: rename to ImageArrayRecordComponent\nclass ImageRecordComponent(RecordComponent):\n def __init__(self, task=tasks.common):\n super().__init__(task=task)\n self.img = None\n\n def set_img(self, img: np.ndarray):\n self.img = img\n height, width, _ = self.img.shape\n # this should set on SizeRecordComponent\n self.composite.set_img_size(ImgSize(width=width, height=height), original=True)\n\n def _repr(self) -> List[str]:\n if self.img is not None:\n ndims = len(self.img.shape)\n if ndims == 3: # RGB, RGBA\n height, width, channels = self.img.shape\n elif ndims == 2: # Grayscale\n height, width, channels = [*self.img.shape, 1]\n else:\n raise ValueError(\n f\"Expected image to have 2 or 3 dimensions, got {ndims} instead\"\n )\n return [f\"Image: {width}x{height}x{channels} <np.ndarray> Image\"]\n else:\n return [f\"Image: {self.img}\"]\n\n def as_dict(self) -> dict:\n return {\"img\": self.img}\n\n def setup_transform(self, tfm) -> None:\n tfm.setup_img(self)\n\n\nclass FilepathRecordComponent(ImageRecordComponent):\n def __init__(self, task=tasks.common):\n super().__init__(task=task)\n self.filepath = None\n\n def set_filepath(self, filepath: Union[str, Path]):\n self.filepath = Path(filepath)\n\n def _load(self):\n img = open_img(self.filepath)\n self.set_img(img)\n\n def _unload(self):\n self.img = None\n\n def _autofix(self) -> Dict[str, bool]:\n exists = self.filepath.exists()\n if not exists:\n raise AutofixAbort(f\"File '{self.filepath}' does not exist\")\n\n return super()._autofix()\n\n def _repr(self) -> List[str]:\n return [f\"Filepath: {self.filepath}\", *super()._repr()]\n\n def as_dict(self) -> dict:\n return {\"filepath\": self.filepath, **super().as_dict()}\n\n def _builder_template(self) -> List[str]:\n return [\"record{task}set_filepath(<Union[str, Path]>)\"]\n\n\nclass SizeRecordComponent(RecordComponent):\n def __init__(self, task=tasks.common):\n super().__init__(task=task)\n self.img_size = None\n\n def set_image_size(self, width: int, height: int):\n # TODO: use ImgSize\n self.img_size = ImgSize(width=width, height=height)\n self.width, self.height = width, height\n\n def set_img_size(self, size: ImgSize, original: bool = False):\n self.img_size = size\n self.width, self.height = size\n\n if original:\n self.original_img_size = size\n\n def setup_transform(self, tfm) -> None:\n tfm.setup_size(self)\n\n def _repr(self) -> List[str]:\n return [\n f\"Image size {self.img_size}\",\n ]\n\n def as_dict(self) -> dict:\n return {\"width\": self.width, \"height\": self.height}\n\n def _aggregate_objects(self) -> Dict[str, List[dict]]:\n info = [{\"img_width\": self.width, \"img_height\": self.height}]\n return {\"img_size\": info}\n\n def _builder_template(self) -> List[str]:\n return [\"record{task}set_img_size(<ImgSize>)\"]\n\n\n### Annotation parsers ###\nclass BaseLabelsRecordComponent(ClassMapRecordComponent):\n def __init__(self, task=tasks.common):\n super().__init__(task=task)\n self.label_ids: List[int] = []\n self.labels: List[Hashable] = []\n\n # TODO: rename to labels_ids\n def set_labels_by_id(self, labels: Sequence[int]):\n self.label_ids = list(labels)\n # TODO, HACK: necessary because `Dataset.from_images` has no class_map\n if self.class_map is not None:\n self.labels = self._labels_ids_to_names(labels)\n\n def add_labels_by_id(self, labels: Sequence[int]):\n self.label_ids.extend(labels)\n if self.class_map is not None:\n self.labels.extend(self._labels_ids_to_names(labels))\n\n def set_labels(self, labels_names: Sequence[Hashable]):\n self.labels = list(labels_names)\n self.label_ids = self._labels_names_to_ids(labels_names)\n\n def add_labels(self, labels_names: Sequence[Hashable]):\n self.labels.extend(labels_names)\n self.label_ids.extend(self._labels_names_to_ids(labels_names))\n\n def is_valid(self) -> List[bool]:\n return [True for _ in self.label_ids]\n\n def _labels_ids_to_names(self, labels_ids):\n return [self.class_map.get_by_id(id) for id in labels_ids]\n\n def _labels_names_to_ids(self, labels_names):\n return [self.class_map.get_by_name(name) for name in labels_names]\n\n def _num_annotations(self) -> Dict[str, int]:\n return {\"labels\": len(self.label_ids)}\n\n def _autofix(self) -> Dict[str, bool]:\n return {\"labels\": [True] * len(self.label_ids)}\n\n def _remove_annotation(self, i):\n self.label_ids.pop(i)\n\n def _aggregate_objects(self) -> Dict[str, List[dict]]:\n return {**super()._aggregate_objects(), \"labels\": self.label_ids}\n\n def _repr(self) -> List[str]:\n return [*super()._repr(), f\"Labels: {self.label_ids}\"]\n\n def as_dict(self) -> dict:\n return {\"labels\": self.label_ids}\n\n def _builder_template(self) -> List[str]:\n return [\n *super()._builder_template(),\n \"record{task}add_labels(<Sequence[Hashable]>)\",\n ]\n\n\nclass InstancesLabelsRecordComponent(BaseLabelsRecordComponent):\n def __init__(self, task=tasks.detection):\n super().__init__(task=task)\n\n def setup_transform(self, tfm) -> None:\n tfm.setup_instances_labels(self)\n\n\nclass ClassificationLabelsRecordComponent(BaseLabelsRecordComponent):\n def __init__(self, task=tasks.classification):\n super().__init__(task=task)\n\n\nclass BBoxesRecordComponent(RecordComponent):\n def __init__(self, task=tasks.detection):\n super().__init__(task=task)\n self.bboxes: List[BBox] = []\n\n def set_bboxes(self, bboxes: Sequence[BBox]):\n self.bboxes = list(bboxes)\n\n def add_bboxes(self, bboxes: Sequence[BBox]):\n self.bboxes.extend(bboxes)\n\n def _autofix(self) -> Dict[str, bool]:\n success = []\n for bbox in self.bboxes:\n try:\n autofixed = bbox.autofix(\n img_w=self.composite.width, img_h=self.composite.height\n )\n success.append(True)\n except InvalidDataError as e:\n logger.log(\"AUTOFIX-FAIL\", \"{}\", str(e))\n success.append(False)\n\n return {\"bboxes\": success}\n\n def _num_annotations(self) -> Dict[str, int]:\n return {\"bboxes\": len(self.bboxes)}\n\n def _remove_annotation(self, i):\n self.bboxes.pop(i)\n\n def _aggregate_objects(self) -> Dict[str, List[dict]]:\n objects = []\n for bbox in self.bboxes:\n x, y, w, h = bbox.xywh\n objects.append(\n {\n \"bbox_x\": x,\n \"bbox_y\": y,\n \"bbox_width\": w,\n \"bbox_height\": h,\n \"bbox_sqrt_area\": bbox.area ** 0.5,\n \"bbox_aspect_ratio\": w / h,\n }\n )\n\n return {\"bboxes\": objects}\n\n def _repr(self) -> List[str]:\n return [f\"BBoxes: {self.bboxes}\"]\n\n def as_dict(self) -> dict:\n return {\"bboxes\": self.bboxes}\n\n def setup_transform(self, tfm) -> None:\n tfm.setup_bboxes(self)\n\n def _builder_template(self) -> List[str]:\n return [\"record{task}add_bboxes(<Sequence[BBox]>)\"]\n\n\nclass MasksRecordComponent(RecordComponent):\n def __init__(self, task=tasks.detection):\n super().__init__(task=task)\n self.masks = EncodedRLEs()\n\n def set_masks(self, masks: Sequence[Mask]):\n self.masks = masks\n\n def add_masks(self, masks: Sequence[Mask]):\n self.masks.extend(self._masks_to_erle(masks))\n\n def setup_transform(self, tfm) -> None:\n tfm.setup_masks(self)\n\n def _masks_to_erle(self, masks: Sequence[Mask]) -> List[Mask]:\n width, height = self.composite.img_size\n return [mask.to_erles(h=height, w=width) for mask in masks]\n\n def _load(self):\n self._encoded_masks = self.masks\n self.masks = MaskArray.from_masks(\n self.masks, self.composite.height, self.composite.width\n )\n\n def _unload(self):\n self.masks = self._encoded_masks\n\n def _num_annotations(self) -> Dict[str, int]:\n return {\"masks\": len(self.masks)}\n\n def _remove_annotation(self, i):\n self.masks.pop(i)\n\n def _repr(self) -> List[str]:\n return [f\"Masks: {self.masks}\"]\n\n def as_dict(self) -> dict:\n return {\"masks\": self.masks}\n\n\nclass AreasRecordComponent(RecordComponent):\n def __init__(self, task=tasks.detection):\n super().__init__(task=task)\n self.areas: List[float] = []\n\n def set_areas(self, areas: Sequence[float]):\n self.areas = list(areas)\n\n def add_areas(self, areas: Sequence[float]):\n self.areas.extend(areas)\n\n def setup_transform(self, tfm) -> None:\n tfm.setup_areas(self)\n\n def _num_annotations(self) -> Dict[str, int]:\n return {\"areas\": len(self.areas)}\n\n def _remove_annotation(self, i):\n self.areas.pop(i)\n\n def _repr(self) -> List[str]:\n return [f\"Areas: {self.areas}\"]\n\n def as_dict(self) -> dict:\n return {\"areas\": self.areas}\n\n\nclass IsCrowdsRecordComponent(RecordComponent):\n def __init__(self, task=tasks.detection):\n super().__init__(task=task)\n self.iscrowds: List[bool] = []\n\n def set_iscrowds(self, iscrowds: Sequence[bool]):\n self.iscrowds = list(iscrowds)\n\n def add_iscrowds(self, iscrowds: Sequence[bool]):\n self.iscrowds.extend(iscrowds)\n\n def setup_transform(self, tfm) -> None:\n tfm.setup_iscrowds(self)\n\n def _num_annotations(self) -> Dict[str, int]:\n return {\"iscrowds\": len(self.iscrowds)}\n\n def _remove_annotation(self, i):\n self.iscrowds.pop(i)\n\n def _aggregate_objects(self) -> Dict[str, List[dict]]:\n return {\"iscrowds\": self.iscrowds}\n\n def _repr(self) -> List[str]:\n return [f\"Is Crowds: {self.iscrowds}\"]\n\n def as_dict(self) -> dict:\n return {\"iscrowds\": self.iscrowds}\n\n\nclass KeyPointsRecordComponent(RecordComponent):\n def __init__(self, task=tasks.detection):\n super().__init__(task=task)\n self.keypoints: List[KeyPoints] = []\n\n def set_keypoints(self, keypoints: Sequence[KeyPoints]):\n self.keypoints = list(keypoints)\n\n def add_keypoints(self, keypoints: Sequence[KeyPoints]):\n self.keypoints.extend(keypoints)\n\n def setup_transform(self, tfm) -> None:\n tfm.setup_keypoints(self)\n\n def as_dict(self) -> dict:\n return {\"keypoints\": self.keypoints}\n\n def _aggregate_objects(self) -> Dict[str, List[dict]]:\n objects = [\n {\"keypoint_x\": kpt.x, \"keypoint_y\": kpt.y, \"keypoint_visible\": kpt.v}\n for kpt in self.keypoints\n ]\n return {\"keypoints\": objects}\n\n def _repr(self) -> List[str]:\n return {f\"KeyPoints: {self.keypoints}\"}\n\n\nclass ScoresRecordComponent(RecordComponent):\n def __init__(self, task=tasks.detection):\n super().__init__(task=task)\n self.scores = None\n\n def set_scores(self, scores: Sequence[float]):\n self.scores = scores\n\n def _repr(self) -> List[str]:\n return [f\"Scores: {self.scores}\"]\n\n def as_dict(self) -> dict:\n return {\"scores\": self.scores}\n\n\nclass LossesRecordComponent(RecordComponent):\n def __init__(self, task=tasks.common):\n super().__init__(task=task)\n self.losses = None\n\n def set_losses(self, losses: Dict):\n self.losses = losses\n", "path": "icevision/core/record_components.py" } ]
[ { "content": "__all__ = [\n \"RecordComponent\",\n \"ClassMapRecordComponent\",\n \"RecordIDRecordComponent\",\n \"ImageRecordComponent\",\n \"FilepathRecordComponent\",\n \"SizeRecordComponent\",\n \"BaseLabelsRecordComponent\",\n \"InstancesLabelsRecordComponent\",\n \"ClassificationLabelsRecordComponent\",\n \"BBoxesRecordComponent\",\n \"MasksRecordComponent\",\n \"AreasRecordComponent\",\n \"IsCrowdsRecordComponent\",\n \"KeyPointsRecordComponent\",\n \"ScoresRecordComponent\",\n \"LossesRecordComponent\",\n]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\nfrom icevision.core.components import *\nfrom icevision.core.bbox import *\nfrom icevision.core.mask import *\nfrom icevision.core.exceptions import *\nfrom icevision.core.keypoints import *\nfrom icevision.core.class_map import *\nfrom icevision.core import tasks\n\n\nclass RecordComponent(TaskComponent):\n # TODO: as_dict is only necessary because of backwards compatibility\n @property\n def record(self):\n return self.composite\n\n def as_dict(self) -> dict:\n return {}\n\n def _load(self) -> None:\n return\n\n def _unload(self) -> None:\n return\n\n def _num_annotations(self) -> Dict[str, int]:\n return {}\n\n def _autofix(self) -> Dict[str, bool]:\n return {}\n\n def _remove_annotation(self, i) -> None:\n return\n\n def _aggregate_objects(self) -> Dict[str, List[dict]]:\n return {}\n\n def _repr(self) -> List[str]:\n return []\n\n def builder_template(self) -> List[str]:\n return self._format_builder_template(self._builder_template())\n\n def _builder_template(self) -> List[str]:\n return []\n\n def _format_builder_template(self, lines):\n task = f\".{self.task.name}.\" if self.task != tasks.common else \".\"\n return [line.format(task=task) for line in lines]\n\n def setup_transform(self, tfm) -> None:\n pass\n\n\nclass ClassMapRecordComponent(RecordComponent):\n def __init__(self, task):\n super().__init__(task=task)\n self.class_map = None\n\n def set_class_map(self, class_map: ClassMap):\n self.class_map = class_map\n\n def _repr(self) -> List[str]:\n return [f\"Class Map: {self.class_map}\"]\n\n def as_dict(self) -> dict:\n return {\"class_map\": self.class_map}\n\n def _builder_template(self) -> List[str]:\n return [\"record{task}set_class_map(<ClassMap>)\"]\n\n\nclass RecordIDRecordComponent(RecordComponent):\n def __init__(self, task=tasks.common):\n super().__init__(task=task)\n self.record_id = None\n\n def set_record_id(self, record_id: int):\n self.record_id = record_id\n\n def _repr(self) -> List[str]:\n return [f\"Image ID: {self.record_id}\"]\n\n def as_dict(self) -> dict:\n return {\"record_id\": self.record_id}\n\n\n# TODO: we need a way to combine filepath and image mixin\n# TODO: rename to ImageArrayRecordComponent\nclass ImageRecordComponent(RecordComponent):\n def __init__(self, task=tasks.common):\n super().__init__(task=task)\n self.img = None\n\n def set_img(self, img: np.ndarray):\n self.img = img\n height, width, _ = self.img.shape\n # this should set on SizeRecordComponent\n self.composite.set_img_size(ImgSize(width=width, height=height), original=True)\n\n def _repr(self) -> List[str]:\n if self.img is not None:\n ndims = len(self.img.shape)\n if ndims == 3: # RGB, RGBA\n height, width, channels = self.img.shape\n elif ndims == 2: # Grayscale\n height, width, channels = [*self.img.shape, 1]\n else:\n raise ValueError(\n f\"Expected image to have 2 or 3 dimensions, got {ndims} instead\"\n )\n return [f\"Image: {width}x{height}x{channels} <np.ndarray> Image\"]\n else:\n return [f\"Image: {self.img}\"]\n\n def as_dict(self) -> dict:\n return {\"img\": self.img}\n\n def setup_transform(self, tfm) -> None:\n tfm.setup_img(self)\n\n\nclass FilepathRecordComponent(ImageRecordComponent):\n def __init__(self, task=tasks.common):\n super().__init__(task=task)\n self.filepath = None\n\n def set_filepath(self, filepath: Union[str, Path]):\n self.filepath = Path(filepath)\n\n def _load(self):\n img = open_img(self.filepath)\n self.set_img(img)\n\n def _unload(self):\n self.img = None\n\n def _autofix(self) -> Dict[str, bool]:\n exists = self.filepath.exists()\n if not exists:\n raise AutofixAbort(f\"File '{self.filepath}' does not exist\")\n\n return super()._autofix()\n\n def _repr(self) -> List[str]:\n return [f\"Filepath: {self.filepath}\", *super()._repr()]\n\n def as_dict(self) -> dict:\n return {\"filepath\": self.filepath, **super().as_dict()}\n\n def _builder_template(self) -> List[str]:\n return [\"record{task}set_filepath(<Union[str, Path]>)\"]\n\n\nclass SizeRecordComponent(RecordComponent):\n def __init__(self, task=tasks.common):\n super().__init__(task=task)\n self.img_size = None\n\n def set_image_size(self, width: int, height: int):\n # TODO: use ImgSize\n self.img_size = ImgSize(width=width, height=height)\n self.width, self.height = width, height\n\n def set_img_size(self, size: ImgSize, original: bool = False):\n self.img_size = size\n self.width, self.height = size\n\n if original:\n self.original_img_size = size\n\n def setup_transform(self, tfm) -> None:\n tfm.setup_size(self)\n\n def _repr(self) -> List[str]:\n return [\n f\"Image size {self.img_size}\",\n ]\n\n def as_dict(self) -> dict:\n return {\"width\": self.width, \"height\": self.height}\n\n def _aggregate_objects(self) -> Dict[str, List[dict]]:\n info = [{\"img_width\": self.width, \"img_height\": self.height}]\n return {\"img_size\": info}\n\n def _builder_template(self) -> List[str]:\n return [\"record{task}set_img_size(<ImgSize>)\"]\n\n\n### Annotation parsers ###\nclass BaseLabelsRecordComponent(ClassMapRecordComponent):\n def __init__(self, task=tasks.common):\n super().__init__(task=task)\n self.label_ids: List[int] = []\n self.labels: List[Hashable] = []\n\n # TODO: rename to labels_ids\n def set_labels_by_id(self, labels: Sequence[int]):\n self.label_ids = list(labels)\n # TODO, HACK: necessary because `Dataset.from_images` has no class_map\n if self.class_map is not None:\n self.labels = self._labels_ids_to_names(labels)\n\n def add_labels_by_id(self, labels: Sequence[int]):\n self.label_ids.extend(labels)\n if self.class_map is not None:\n self.labels.extend(self._labels_ids_to_names(labels))\n\n def set_labels(self, labels_names: Sequence[Hashable]):\n self.labels = list(labels_names)\n self.label_ids = self._labels_names_to_ids(labels_names)\n\n def add_labels(self, labels_names: Sequence[Hashable]):\n self.labels.extend(labels_names)\n self.label_ids.extend(self._labels_names_to_ids(labels_names))\n\n def is_valid(self) -> List[bool]:\n return [True for _ in self.label_ids]\n\n def _labels_ids_to_names(self, labels_ids):\n return [self.class_map.get_by_id(id) for id in labels_ids]\n\n def _labels_names_to_ids(self, labels_names):\n return [self.class_map.get_by_name(name) for name in labels_names]\n\n def _num_annotations(self) -> Dict[str, int]:\n return {\"labels\": len(self.label_ids)}\n\n def _autofix(self) -> Dict[str, bool]:\n return {\"labels\": [True] * len(self.label_ids)}\n\n def _remove_annotation(self, i):\n self.label_ids.pop(i)\n\n def _aggregate_objects(self) -> Dict[str, List[dict]]:\n return {**super()._aggregate_objects(), \"labels\": self.label_ids}\n\n def _repr(self) -> List[str]:\n return [*super()._repr(), f\"Labels: {self.label_ids}\"]\n\n def as_dict(self) -> dict:\n return {\"labels\": self.label_ids}\n\n def _builder_template(self) -> List[str]:\n return [\n *super()._builder_template(),\n \"record{task}add_labels(<Sequence[Hashable]>)\",\n ]\n\n\nclass InstancesLabelsRecordComponent(BaseLabelsRecordComponent):\n def __init__(self, task=tasks.detection):\n super().__init__(task=task)\n\n def setup_transform(self, tfm) -> None:\n tfm.setup_instances_labels(self)\n\n\nclass ClassificationLabelsRecordComponent(BaseLabelsRecordComponent):\n def __init__(self, task=tasks.classification):\n super().__init__(task=task)\n\n\nclass BBoxesRecordComponent(RecordComponent):\n def __init__(self, task=tasks.detection):\n super().__init__(task=task)\n self.bboxes: List[BBox] = []\n\n def set_bboxes(self, bboxes: Sequence[BBox]):\n self.bboxes = list(bboxes)\n\n def add_bboxes(self, bboxes: Sequence[BBox]):\n self.bboxes.extend(bboxes)\n\n def _autofix(self) -> Dict[str, bool]:\n success = []\n for bbox in self.bboxes:\n try:\n autofixed = bbox.autofix(\n img_w=self.composite.width, img_h=self.composite.height\n )\n success.append(True)\n except InvalidDataError as e:\n logger.log(\"AUTOFIX-FAIL\", \"{}\", str(e))\n success.append(False)\n\n return {\"bboxes\": success}\n\n def _num_annotations(self) -> Dict[str, int]:\n return {\"bboxes\": len(self.bboxes)}\n\n def _remove_annotation(self, i):\n self.bboxes.pop(i)\n\n def _aggregate_objects(self) -> Dict[str, List[dict]]:\n objects = []\n for bbox in self.bboxes:\n x, y, w, h = bbox.xywh\n objects.append(\n {\n \"bbox_x\": x,\n \"bbox_y\": y,\n \"bbox_width\": w,\n \"bbox_height\": h,\n \"bbox_sqrt_area\": bbox.area ** 0.5,\n \"bbox_aspect_ratio\": w / h,\n }\n )\n\n return {\"bboxes\": objects}\n\n def _repr(self) -> List[str]:\n return [f\"BBoxes: {self.bboxes}\"]\n\n def as_dict(self) -> dict:\n return {\"bboxes\": self.bboxes}\n\n def setup_transform(self, tfm) -> None:\n tfm.setup_bboxes(self)\n\n def _builder_template(self) -> List[str]:\n return [\"record{task}add_bboxes(<Sequence[BBox]>)\"]\n\n\nclass MasksRecordComponent(RecordComponent):\n def __init__(self, task=tasks.detection):\n super().__init__(task=task)\n self.masks = EncodedRLEs()\n\n def set_masks(self, masks: Sequence[Mask]):\n self.masks = masks\n\n def add_masks(self, masks: Sequence[Mask]):\n self.masks.extend(self._masks_to_erle(masks))\n\n def setup_transform(self, tfm) -> None:\n tfm.setup_masks(self)\n\n def _masks_to_erle(self, masks: Sequence[Mask]) -> List[Mask]:\n width, height = self.composite.img_size\n return [mask.to_erles(h=height, w=width) for mask in masks]\n\n def _load(self):\n self._encoded_masks = self.masks\n self.masks = MaskArray.from_masks(\n self.masks, self.composite.height, self.composite.width\n )\n\n def _unload(self):\n self.masks = self._encoded_masks\n\n def _num_annotations(self) -> Dict[str, int]:\n return {\"masks\": len(self.masks)}\n\n def _remove_annotation(self, i):\n self.masks.pop(i)\n\n def _repr(self) -> List[str]:\n return [f\"Masks: {self.masks}\"]\n\n def as_dict(self) -> dict:\n return {\"masks\": self.masks}\n\n\nclass AreasRecordComponent(RecordComponent):\n def __init__(self, task=tasks.detection):\n super().__init__(task=task)\n self.areas: List[float] = []\n\n def set_areas(self, areas: Sequence[float]):\n self.areas = list(areas)\n\n def add_areas(self, areas: Sequence[float]):\n self.areas.extend(areas)\n\n def setup_transform(self, tfm) -> None:\n tfm.setup_areas(self)\n\n def _num_annotations(self) -> Dict[str, int]:\n return {\"areas\": len(self.areas)}\n\n def _remove_annotation(self, i):\n self.areas.pop(i)\n\n def _repr(self) -> List[str]:\n return [f\"Areas: {self.areas}\"]\n\n def as_dict(self) -> dict:\n return {\"areas\": self.areas}\n\n\nclass IsCrowdsRecordComponent(RecordComponent):\n def __init__(self, task=tasks.detection):\n super().__init__(task=task)\n self.iscrowds: List[bool] = []\n\n def set_iscrowds(self, iscrowds: Sequence[bool]):\n self.iscrowds = list(iscrowds)\n\n def add_iscrowds(self, iscrowds: Sequence[bool]):\n self.iscrowds.extend(iscrowds)\n\n def setup_transform(self, tfm) -> None:\n tfm.setup_iscrowds(self)\n\n def _num_annotations(self) -> Dict[str, int]:\n return {\"iscrowds\": len(self.iscrowds)}\n\n def _remove_annotation(self, i):\n self.iscrowds.pop(i)\n\n def _aggregate_objects(self) -> Dict[str, List[dict]]:\n return {\"iscrowds\": self.iscrowds}\n\n def _repr(self) -> List[str]:\n return [f\"Is Crowds: {self.iscrowds}\"]\n\n def as_dict(self) -> dict:\n return {\"iscrowds\": self.iscrowds}\n\n\nclass KeyPointsRecordComponent(RecordComponent):\n def __init__(self, task=tasks.detection):\n super().__init__(task=task)\n self.keypoints: List[KeyPoints] = []\n\n def set_keypoints(self, keypoints: Sequence[KeyPoints]):\n self.keypoints = list(keypoints)\n\n def add_keypoints(self, keypoints: Sequence[KeyPoints]):\n self.keypoints.extend(keypoints)\n\n def setup_transform(self, tfm) -> None:\n tfm.setup_keypoints(self)\n\n def as_dict(self) -> dict:\n return {\"keypoints\": self.keypoints}\n\n def _aggregate_objects(self) -> Dict[str, List[dict]]:\n objects = [\n {\"keypoint_x\": kpt.x, \"keypoint_y\": kpt.y, \"keypoint_visible\": kpt.v}\n for kpt in self.keypoints\n ]\n return {\"keypoints\": objects}\n\n def _repr(self) -> List[str]:\n return {f\"KeyPoints: {self.keypoints}\"}\n\n\nclass ScoresRecordComponent(RecordComponent):\n def __init__(self, task=tasks.detection):\n super().__init__(task=task)\n self.scores = None\n\n def set_scores(self, scores: Sequence[float]):\n self.scores = scores\n\n def _repr(self) -> List[str]:\n return [f\"Scores: {self.scores}\"]\n\n def as_dict(self) -> dict:\n return {\"scores\": self.scores}\n\n\nclass LossesRecordComponent(RecordComponent):\n def __init__(self, task=tasks.common):\n super().__init__(task=task)\n self.losses = None\n\n def set_losses(self, losses: Dict):\n self.losses = losses\n", "path": "icevision/core/record_components.py" } ]
diff --git a/icevision/core/record_components.py b/icevision/core/record_components.py index 65238d214..db2c0e6a5 100644 --- a/icevision/core/record_components.py +++ b/icevision/core/record_components.py @@ -80,6 +80,9 @@ def __init__(self, task): def set_class_map(self, class_map: ClassMap): self.class_map = class_map + def _repr(self) -> List[str]: + return [f"Class Map: {self.class_map}"] + def as_dict(self) -> dict: return {"class_map": self.class_map}
microsoft__MLOS-477
SMAC optimizer messes up mlos_bench logging SMAC optimizer completely overrides our logging setup and installs its own formatter, output handler, and so on. As a result, as soon as SMAC optimizer is initialized, mlos_bench stops writing to its log file, and all logging goes to stdout, in different format, and at different log level (always INFO). We need to find a way to make SMAC use our logger instead of setting up its own from scratch
[ { "content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nContains the wrapper class for SMAC Bayesian optimizers.\nSee Also: <https://automl.github.io/SMAC3/main/index.html>\n\"\"\"\n\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, TYPE_CHECKING\nfrom tempfile import TemporaryDirectory\n\nimport ConfigSpace\nimport numpy.typing as npt\nimport pandas as pd\n\nfrom mlos_core.optimizers.bayesian_optimizers.bayesian_optimizer import BaseBayesianOptimizer\nfrom mlos_core.spaces.adapters.adapter import BaseSpaceAdapter\n\n\nclass SmacOptimizer(BaseBayesianOptimizer):\n \"\"\"Wrapper class for SMAC based Bayesian optimization.\n\n Parameters\n ----------\n parameter_space : ConfigSpace.ConfigurationSpace\n The parameter space to optimize.\n\n space_adapter : BaseSpaceAdapter\n The space adapter class to employ for parameter space transformations.\n\n seed : Optional[int]\n By default SMAC uses a known seed (0) to keep results reproducible.\n However, if a `None` seed is explicitly provided, we let a random seed be produced by SMAC.\n\n run_name : Optional[str]\n Name of this run. This is used to easily distinguish across different runs.\n If set to `None` (default), SMAC will generate a hash from metadata.\n\n output_directory : Optional[str]\n The directory where SMAC output will saved. If set to `None` (default), a temporary dir will be used.\n\n max_trials : int\n Maximum number of trials (i.e., function evaluations) to be run. Defaults to 100.\n Note that modifying this value directly affects the value of `n_random_init`, if latter is set to `None`.\n\n n_random_init : Optional[int]\n Number of points evaluated at start to bootstrap the optimizer. Defaults to 10.\n\n n_random_probability: Optional[float]\n Probability of choosing to evaluate a random configuration during optimization.\n Defaults to `0.1`. Setting this to a higher value favors exploration over exploitation.\n \"\"\"\n\n def __init__(self, *, # pylint: disable=too-many-locals\n parameter_space: ConfigSpace.ConfigurationSpace,\n space_adapter: Optional[BaseSpaceAdapter] = None,\n seed: Optional[int] = 0,\n run_name: Optional[str] = None,\n output_directory: Optional[str] = None,\n max_trials: int = 100,\n n_random_init: Optional[int] = 10,\n n_random_probability: Optional[float] = 0.1):\n\n super().__init__(\n parameter_space=parameter_space,\n space_adapter=space_adapter,\n )\n\n # Declare at the top because we need it in __del__/cleanup()\n self._temp_output_directory: Optional[TemporaryDirectory] = None\n\n # pylint: disable=import-outside-toplevel\n from smac import HyperparameterOptimizationFacade as Optimizer_Smac\n from smac import Scenario\n from smac.intensifier.abstract_intensifier import AbstractIntensifier\n from smac.initial_design import LatinHypercubeInitialDesign\n from smac.main.config_selector import ConfigSelector\n from smac.random_design.probability_design import ProbabilityRandomDesign\n from smac.runhistory import TrialInfo\n\n # Store for TrialInfo instances returned by .ask()\n self.trial_info_map: Dict[ConfigSpace.Configuration, TrialInfo] = {}\n\n # The default when not specified is to use a known seed (0) to keep results reproducible.\n # However, if a `None` seed is explicitly provided, we let a random seed be produced by SMAC.\n # https://automl.github.io/SMAC3/main/api/smac.scenario.html#smac.scenario.Scenario\n seed = -1 if seed is None else seed\n\n # Create temporary directory for SMAC output (if none provided)\n if output_directory is None:\n # pylint: disable=consider-using-with\n try:\n self._temp_output_directory = TemporaryDirectory(ignore_cleanup_errors=True) # Argument added in Python 3.10\n except TypeError:\n self._temp_output_directory = TemporaryDirectory()\n output_directory = self._temp_output_directory.name\n\n scenario: Scenario = Scenario(\n self.optimizer_parameter_space,\n name=run_name,\n output_directory=Path(output_directory),\n deterministic=True,\n n_trials=max_trials,\n seed=seed or -1, # if -1, SMAC will generate a random seed internally\n n_workers=1, # Use a single thread for evaluating trials\n )\n intensifier: AbstractIntensifier = Optimizer_Smac.get_intensifier(scenario, max_config_calls=1)\n config_selector: ConfigSelector = ConfigSelector(scenario, retrain_after=1)\n\n initial_design: Optional[LatinHypercubeInitialDesign] = None\n if n_random_init is not None:\n initial_design = LatinHypercubeInitialDesign(scenario=scenario, n_configs=n_random_init)\n random_design: Optional[ProbabilityRandomDesign] = None\n if n_random_probability is not None:\n random_design = ProbabilityRandomDesign(probability=n_random_probability)\n\n self.base_optimizer = Optimizer_Smac(\n scenario,\n SmacOptimizer._dummy_target_func,\n initial_design=initial_design,\n intensifier=intensifier,\n random_design=random_design,\n config_selector=config_selector,\n overwrite=True,\n )\n\n def __del__(self) -> None:\n # Best-effort attempt to clean up, in case the user forgets to call .cleanup()\n self.cleanup()\n\n @staticmethod\n def _dummy_target_func(config: ConfigSpace.Configuration, seed: int = 0) -> None:\n \"\"\"Dummy target function for SMAC optimizer.\n\n Since we only use the ask-and-tell interface, this is never called.\n\n Parameters\n ----------\n config : ConfigSpace.Configuration\n Configuration to evaluate.\n\n seed : int\n Random seed to use for the target function. Not actually used.\n \"\"\"\n # NOTE: Providing a target function when using the ask-and-tell interface is an imperfection of the API\n # -- this planned to be fixed in some future release: https://github.com/automl/SMAC3/issues/946\n raise RuntimeError('This function should never be called.')\n\n def _register(self, configurations: pd.DataFrame, scores: pd.Series, context: Optional[pd.DataFrame] = None) -> None:\n \"\"\"Registers the given configurations and scores.\n\n Parameters\n ----------\n configurations : pd.DataFrame\n Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.\n\n scores : pd.Series\n Scores from running the configurations. The index is the same as the index of the configurations.\n\n context : pd.DataFrame\n Not Yet Implemented.\n \"\"\"\n from smac.runhistory import StatusType, TrialInfo, TrialValue # pylint: disable=import-outside-toplevel\n\n if context is not None:\n raise NotImplementedError()\n\n # Register each trial (one-by-one)\n for config, score in zip(self._to_configspace_configs(configurations), scores.tolist()):\n # Retrieve previously generated TrialInfo (returned by .ask()) or create new TrialInfo instance\n info: TrialInfo = self.trial_info_map.get(config, TrialInfo(config=config, seed=self.base_optimizer.scenario.seed))\n value: TrialValue = TrialValue(cost=score, time=0.0, status=StatusType.SUCCESS)\n self.base_optimizer.tell(info, value, save=False)\n\n # Save optimizer once we register all configs\n self.base_optimizer.optimizer.save()\n\n def _suggest(self, context: Optional[pd.DataFrame] = None) -> pd.DataFrame:\n \"\"\"Suggests a new configuration.\n\n Parameters\n ----------\n context : pd.DataFrame\n Not Yet Implemented.\n\n Returns\n -------\n configuration : pd.DataFrame\n Pandas dataframe with a single row. Column names are the parameter names.\n \"\"\"\n if TYPE_CHECKING:\n from smac.runhistory import TrialInfo # pylint: disable=import-outside-toplevel\n\n if context is not None:\n raise NotImplementedError()\n\n trial: TrialInfo = self.base_optimizer.ask()\n self.trial_info_map[trial.config] = trial\n return pd.DataFrame([trial.config], columns=list(self.optimizer_parameter_space.keys()))\n\n def register_pending(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> None:\n raise NotImplementedError()\n\n def surrogate_predict(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> npt.NDArray:\n from smac.utils.configspace import convert_configurations_to_array # pylint: disable=import-outside-toplevel\n\n if context is not None:\n raise NotImplementedError()\n if self._space_adapter:\n raise NotImplementedError()\n\n # pylint: disable=protected-access\n if len(self._observations) < self.base_optimizer._initial_design._n_configs:\n raise RuntimeError('Surrogate model can make predictions *only* after all initial points have been evaluated')\n if self.base_optimizer._config_selector._model is None:\n raise RuntimeError('Surrogate model is not yet trained')\n\n configs: npt.NDArray = convert_configurations_to_array(self._to_configspace_configs(configurations))\n mean_predictions, _ = self.base_optimizer._config_selector._model.predict(configs)\n return mean_predictions.reshape(-1,)\n\n def acquisition_function(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> npt.NDArray:\n if context is not None:\n raise NotImplementedError()\n if self._space_adapter:\n raise NotImplementedError()\n\n # pylint: disable=protected-access\n if self.base_optimizer._config_selector._acquisition_function is None:\n raise RuntimeError('Acquisition function is not yet initialized')\n\n configs: list = self._to_configspace_configs(configurations)\n return self.base_optimizer._config_selector._acquisition_function(configs).reshape(-1,)\n\n def cleanup(self) -> None:\n if self._temp_output_directory is not None:\n self._temp_output_directory.cleanup()\n self._temp_output_directory = None\n\n def _to_configspace_configs(self, configurations: pd.DataFrame) -> List[ConfigSpace.Configuration]:\n \"\"\"Convert a dataframe of configurations to a list of ConfigSpace configurations.\n\n Parameters\n ----------\n configurations : pd.DataFrame\n Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.\n\n Returns\n -------\n configurations : list\n List of ConfigSpace configurations.\n \"\"\"\n return [\n ConfigSpace.Configuration(self.optimizer_parameter_space, values=config.to_dict())\n for (_, config) in configurations.iterrows()\n ]\n", "path": "mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py" } ]
[ { "content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nContains the wrapper class for SMAC Bayesian optimizers.\nSee Also: <https://automl.github.io/SMAC3/main/index.html>\n\"\"\"\n\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, TYPE_CHECKING\nfrom tempfile import TemporaryDirectory\n\nimport ConfigSpace\nimport numpy.typing as npt\nimport pandas as pd\n\nfrom mlos_core.optimizers.bayesian_optimizers.bayesian_optimizer import BaseBayesianOptimizer\nfrom mlos_core.spaces.adapters.adapter import BaseSpaceAdapter\n\n\nclass SmacOptimizer(BaseBayesianOptimizer):\n \"\"\"Wrapper class for SMAC based Bayesian optimization.\n\n Parameters\n ----------\n parameter_space : ConfigSpace.ConfigurationSpace\n The parameter space to optimize.\n\n space_adapter : BaseSpaceAdapter\n The space adapter class to employ for parameter space transformations.\n\n seed : Optional[int]\n By default SMAC uses a known seed (0) to keep results reproducible.\n However, if a `None` seed is explicitly provided, we let a random seed be produced by SMAC.\n\n run_name : Optional[str]\n Name of this run. This is used to easily distinguish across different runs.\n If set to `None` (default), SMAC will generate a hash from metadata.\n\n output_directory : Optional[str]\n The directory where SMAC output will saved. If set to `None` (default), a temporary dir will be used.\n\n max_trials : int\n Maximum number of trials (i.e., function evaluations) to be run. Defaults to 100.\n Note that modifying this value directly affects the value of `n_random_init`, if latter is set to `None`.\n\n n_random_init : Optional[int]\n Number of points evaluated at start to bootstrap the optimizer. Defaults to 10.\n\n n_random_probability: Optional[float]\n Probability of choosing to evaluate a random configuration during optimization.\n Defaults to `0.1`. Setting this to a higher value favors exploration over exploitation.\n \"\"\"\n\n def __init__(self, *, # pylint: disable=too-many-locals\n parameter_space: ConfigSpace.ConfigurationSpace,\n space_adapter: Optional[BaseSpaceAdapter] = None,\n seed: Optional[int] = 0,\n run_name: Optional[str] = None,\n output_directory: Optional[str] = None,\n max_trials: int = 100,\n n_random_init: Optional[int] = 10,\n n_random_probability: Optional[float] = 0.1):\n\n super().__init__(\n parameter_space=parameter_space,\n space_adapter=space_adapter,\n )\n\n # Declare at the top because we need it in __del__/cleanup()\n self._temp_output_directory: Optional[TemporaryDirectory] = None\n\n # pylint: disable=import-outside-toplevel\n from smac import HyperparameterOptimizationFacade as Optimizer_Smac\n from smac import Scenario\n from smac.intensifier.abstract_intensifier import AbstractIntensifier\n from smac.initial_design import LatinHypercubeInitialDesign\n from smac.main.config_selector import ConfigSelector\n from smac.random_design.probability_design import ProbabilityRandomDesign\n from smac.runhistory import TrialInfo\n\n # Store for TrialInfo instances returned by .ask()\n self.trial_info_map: Dict[ConfigSpace.Configuration, TrialInfo] = {}\n\n # The default when not specified is to use a known seed (0) to keep results reproducible.\n # However, if a `None` seed is explicitly provided, we let a random seed be produced by SMAC.\n # https://automl.github.io/SMAC3/main/api/smac.scenario.html#smac.scenario.Scenario\n seed = -1 if seed is None else seed\n\n # Create temporary directory for SMAC output (if none provided)\n if output_directory is None:\n # pylint: disable=consider-using-with\n try:\n self._temp_output_directory = TemporaryDirectory(ignore_cleanup_errors=True) # Argument added in Python 3.10\n except TypeError:\n self._temp_output_directory = TemporaryDirectory()\n output_directory = self._temp_output_directory.name\n\n scenario: Scenario = Scenario(\n self.optimizer_parameter_space,\n name=run_name,\n output_directory=Path(output_directory),\n deterministic=True,\n n_trials=max_trials,\n seed=seed or -1, # if -1, SMAC will generate a random seed internally\n n_workers=1, # Use a single thread for evaluating trials\n )\n intensifier: AbstractIntensifier = Optimizer_Smac.get_intensifier(scenario, max_config_calls=1)\n config_selector: ConfigSelector = ConfigSelector(scenario, retrain_after=1)\n\n initial_design: Optional[LatinHypercubeInitialDesign] = None\n if n_random_init is not None:\n initial_design = LatinHypercubeInitialDesign(scenario=scenario, n_configs=n_random_init)\n random_design: Optional[ProbabilityRandomDesign] = None\n if n_random_probability is not None:\n random_design = ProbabilityRandomDesign(probability=n_random_probability)\n\n self.base_optimizer = Optimizer_Smac(\n scenario,\n SmacOptimizer._dummy_target_func,\n initial_design=initial_design,\n intensifier=intensifier,\n random_design=random_design,\n config_selector=config_selector,\n overwrite=True,\n logging_level=False, # Use the existing logger\n )\n\n def __del__(self) -> None:\n # Best-effort attempt to clean up, in case the user forgets to call .cleanup()\n self.cleanup()\n\n @staticmethod\n def _dummy_target_func(config: ConfigSpace.Configuration, seed: int = 0) -> None:\n \"\"\"Dummy target function for SMAC optimizer.\n\n Since we only use the ask-and-tell interface, this is never called.\n\n Parameters\n ----------\n config : ConfigSpace.Configuration\n Configuration to evaluate.\n\n seed : int\n Random seed to use for the target function. Not actually used.\n \"\"\"\n # NOTE: Providing a target function when using the ask-and-tell interface is an imperfection of the API\n # -- this planned to be fixed in some future release: https://github.com/automl/SMAC3/issues/946\n raise RuntimeError('This function should never be called.')\n\n def _register(self, configurations: pd.DataFrame, scores: pd.Series, context: Optional[pd.DataFrame] = None) -> None:\n \"\"\"Registers the given configurations and scores.\n\n Parameters\n ----------\n configurations : pd.DataFrame\n Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.\n\n scores : pd.Series\n Scores from running the configurations. The index is the same as the index of the configurations.\n\n context : pd.DataFrame\n Not Yet Implemented.\n \"\"\"\n from smac.runhistory import StatusType, TrialInfo, TrialValue # pylint: disable=import-outside-toplevel\n\n if context is not None:\n raise NotImplementedError()\n\n # Register each trial (one-by-one)\n for config, score in zip(self._to_configspace_configs(configurations), scores.tolist()):\n # Retrieve previously generated TrialInfo (returned by .ask()) or create new TrialInfo instance\n info: TrialInfo = self.trial_info_map.get(config, TrialInfo(config=config, seed=self.base_optimizer.scenario.seed))\n value: TrialValue = TrialValue(cost=score, time=0.0, status=StatusType.SUCCESS)\n self.base_optimizer.tell(info, value, save=False)\n\n # Save optimizer once we register all configs\n self.base_optimizer.optimizer.save()\n\n def _suggest(self, context: Optional[pd.DataFrame] = None) -> pd.DataFrame:\n \"\"\"Suggests a new configuration.\n\n Parameters\n ----------\n context : pd.DataFrame\n Not Yet Implemented.\n\n Returns\n -------\n configuration : pd.DataFrame\n Pandas dataframe with a single row. Column names are the parameter names.\n \"\"\"\n if TYPE_CHECKING:\n from smac.runhistory import TrialInfo # pylint: disable=import-outside-toplevel\n\n if context is not None:\n raise NotImplementedError()\n\n trial: TrialInfo = self.base_optimizer.ask()\n self.trial_info_map[trial.config] = trial\n return pd.DataFrame([trial.config], columns=list(self.optimizer_parameter_space.keys()))\n\n def register_pending(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> None:\n raise NotImplementedError()\n\n def surrogate_predict(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> npt.NDArray:\n from smac.utils.configspace import convert_configurations_to_array # pylint: disable=import-outside-toplevel\n\n if context is not None:\n raise NotImplementedError()\n if self._space_adapter:\n raise NotImplementedError()\n\n # pylint: disable=protected-access\n if len(self._observations) < self.base_optimizer._initial_design._n_configs:\n raise RuntimeError('Surrogate model can make predictions *only* after all initial points have been evaluated')\n if self.base_optimizer._config_selector._model is None:\n raise RuntimeError('Surrogate model is not yet trained')\n\n configs: npt.NDArray = convert_configurations_to_array(self._to_configspace_configs(configurations))\n mean_predictions, _ = self.base_optimizer._config_selector._model.predict(configs)\n return mean_predictions.reshape(-1,)\n\n def acquisition_function(self, configurations: pd.DataFrame, context: Optional[pd.DataFrame] = None) -> npt.NDArray:\n if context is not None:\n raise NotImplementedError()\n if self._space_adapter:\n raise NotImplementedError()\n\n # pylint: disable=protected-access\n if self.base_optimizer._config_selector._acquisition_function is None:\n raise RuntimeError('Acquisition function is not yet initialized')\n\n configs: list = self._to_configspace_configs(configurations)\n return self.base_optimizer._config_selector._acquisition_function(configs).reshape(-1,)\n\n def cleanup(self) -> None:\n if self._temp_output_directory is not None:\n self._temp_output_directory.cleanup()\n self._temp_output_directory = None\n\n def _to_configspace_configs(self, configurations: pd.DataFrame) -> List[ConfigSpace.Configuration]:\n \"\"\"Convert a dataframe of configurations to a list of ConfigSpace configurations.\n\n Parameters\n ----------\n configurations : pd.DataFrame\n Dataframe of configurations / parameters. The columns are parameter names and the rows are the configurations.\n\n Returns\n -------\n configurations : list\n List of ConfigSpace configurations.\n \"\"\"\n return [\n ConfigSpace.Configuration(self.optimizer_parameter_space, values=config.to_dict())\n for (_, config) in configurations.iterrows()\n ]\n", "path": "mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py" } ]
diff --git a/mlos_bench/mlos_bench/tests/config/cli/mock-bench.jsonc b/mlos_bench/mlos_bench/tests/config/cli/mock-bench.jsonc index cff09de12ba..d4179baa8e2 100644 --- a/mlos_bench/mlos_bench/tests/config/cli/mock-bench.jsonc +++ b/mlos_bench/mlos_bench/tests/config/cli/mock-bench.jsonc @@ -13,7 +13,6 @@ ], "environment": "environments/mock/mock_env.jsonc", - "storage": "storage/in-memory.jsonc", "tunable_values": [ "tunable-values/tunable-values-example.jsonc" diff --git a/mlos_bench/mlos_bench/tests/config/cli/mock-opt.jsonc b/mlos_bench/mlos_bench/tests/config/cli/mock-opt.jsonc new file mode 100644 index 00000000000..4d30d58d5e0 --- /dev/null +++ b/mlos_bench/mlos_bench/tests/config/cli/mock-opt.jsonc @@ -0,0 +1,28 @@ +// +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +// + +// *** DO *NOT* CHANGE! This config is used for tests! *** +{ + "$schema": "https://raw.githubusercontent.com/microsoft/MLOS/main/mlos_bench/mlos_bench/config/schemas/cli/cli-schema.json", + + "config_path": [ + // relative to the root of the repo (for now), where this is expected to be executed from + "mlos_bench/mlos_bench/tests/config", + "mlos_bench/mlos_bench/config" + ], + + "environment": "environments/mock/mock_env.jsonc", + "optimizer": "optimizers/mlos_core_default_opt.jsonc", + + // "globals": ["global_config.json"], + + "experiment_id": "MockExperiment", + "trial_id": 1, + + "teardown": true, + + // "log_file": "mock-opt.log", + "log_level": "DEBUG" +} diff --git a/mlos_bench/mlos_bench/tests/launcher_test.py b/mlos_bench/mlos_bench/tests/launcher_test.py index 32a917d782e..8850477b8ca 100644 --- a/mlos_bench/mlos_bench/tests/launcher_test.py +++ b/mlos_bench/mlos_bench/tests/launcher_test.py @@ -6,6 +6,8 @@ Unit tests to check the main CLI launcher. """ import os +import re +from typing import List import pytest @@ -37,27 +39,73 @@ def local_exec_service() -> LocalExecService: })) -def test_launch_main_app(root_path: str, - local_exec_service: LocalExecService) -> None: +def _launch_main_app(root_path: str, local_exec_service: LocalExecService, + cli_config: str, re_expected: List[str]) -> None: """ - Run mlos_bench command-line application with mock config and check the results in the log. + Run mlos_bench command-line application with given config + and check the results in the log. """ with local_exec_service.temp_dir_context() as temp_dir: - log_path = path_join(temp_dir, "mock-bench.log") - cmd = "./mlos_bench/mlos_bench/run.py" + \ - " --config mlos_bench/mlos_bench/tests/config/cli/mock-bench.jsonc" + \ - f" --log_file '{log_path}'" - (return_code, _stdout, _stderr) = local_exec_service.local_exec([cmd], cwd=root_path) - + # Test developers note: for local debugging, + # uncomment the following line to use a known file path that can be examined: + # temp_dir = '/tmp' + log_path = path_join(temp_dir, "mock-test.log") + (return_code, _stdout, _stderr) = local_exec_service.local_exec( + [f"./mlos_bench/mlos_bench/run.py {cli_config} --log_file '{log_path}'"], + cwd=root_path) assert return_code == 0 - with open(log_path, "rt", encoding="utf-8") as fh_out: - best_score_lines = [ - ln.strip() for ln in fh_out.readlines() - if " INFO Env: Mock environment best score: " in ln - ] - assert len([ - ln for ln in best_score_lines - if " best score: 65.67" in ln - ]) == 1 + try: + iter_expected = iter(re_expected) + re_log = re.compile(next(iter_expected)) + with open(log_path, "rt", encoding="utf-8") as fh_out: + for ln in fh_out: + if re_log.match(ln): + re_log = re.compile(next(iter_expected)) + assert False, f"Pattern not found: '{re_log.pattern}'" + except StopIteration: + pass # Success: all patterns found + + +_RE_DATE = r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}" + + +def test_launch_main_app_bench(root_path: str, local_exec_service: LocalExecService) -> None: + """ + Run mlos_bench command-line application with mock benchmark config + and check the results in the log. + """ + _launch_main_app( + root_path, local_exec_service, + "--config mlos_bench/mlos_bench/tests/config/cli/mock-bench.jsonc", + [ + f"^{_RE_DATE} run\\.py:\\d+ " + + r"_optimize INFO Env: Mock environment best score: 65\.67\d+\s*$", + ] + ) + + +def test_launch_main_app_opt(root_path: str, local_exec_service: LocalExecService) -> None: + """ + Run mlos_bench command-line application with mock optimization config + and check the results in the log. + """ + _launch_main_app( + root_path, local_exec_service, + "--config mlos_bench/mlos_bench/tests/config/cli/mock-opt.jsonc --max_iterations 3", + [ + # Iteration 1: Expect first value to be the baseline + f"^{_RE_DATE} mlos_core_optimizer\\.py:\\d+ " + + r"register DEBUG Score: 65\.67\d+ Dataframe:\s*$", + # Iteration 2: The result may not always be deterministic + f"^{_RE_DATE} mlos_core_optimizer\\.py:\\d+ " + + r"register DEBUG Score: \d+\.\d+ Dataframe:\s*$", + # Iteration 3: non-deterministic (depends on the optimizer) + f"^{_RE_DATE} mlos_core_optimizer\\.py:\\d+ " + + r"register DEBUG Score: \d+\.\d+ Dataframe:\s*$", + # Final result: baseline is the optimum for the mock environment + f"^{_RE_DATE} run\\.py:\\d+ " + + r"_optimize INFO Env: Mock environment best score: 65\.67\d+\s*$", + ] + ) diff --git a/mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py b/mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py index 867abd73c66..bb222b78e18 100644 --- a/mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py +++ b/mlos_core/mlos_core/optimizers/bayesian_optimizers/smac_optimizer.py @@ -124,6 +124,7 @@ def __init__(self, *, # pylint: disable=too-many-locals random_design=random_design, config_selector=config_selector, overwrite=True, + logging_level=False, # Use the existing logger ) def __del__(self) -> None:
WeblateOrg__weblate-1655
File download is outdated ### Steps to reproduce 1. Edit string. 2. Donwload original translation file (without conversion). ### Actual behaviour The file does not have recent changes. ### Expected behaviour All changes should be reflected. ### Server configuration Current master
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright © 2012 - 2017 Michal Čihař <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n\"\"\"Helper methods for views.\"\"\"\n\nfrom django.http import HttpResponse, Http404\nfrom django.shortcuts import get_object_or_404\nimport django.utils.translation\nfrom django.utils.translation import trans_real, ugettext as _\n\nfrom weblate.utils import messages\nfrom weblate.permissions.helpers import check_access\nfrom weblate.trans.exporters import get_exporter\nfrom weblate.trans.models import Project, SubProject, Translation\n\n\ndef get_translation(request, project, subproject, lang, skip_acl=False):\n \"\"\"Return translation matching parameters.\"\"\"\n translation = get_object_or_404(\n Translation.objects.prefetch(),\n language__code=lang,\n subproject__slug=subproject,\n subproject__project__slug=project,\n enabled=True\n )\n if not skip_acl:\n check_access(request, translation.subproject.project)\n return translation\n\n\ndef get_subproject(request, project, subproject, skip_acl=False):\n \"\"\"Return subproject matching parameters.\"\"\"\n subproject = get_object_or_404(\n SubProject.objects.prefetch(),\n project__slug=project,\n slug=subproject\n )\n if not skip_acl:\n check_access(request, subproject.project)\n return subproject\n\n\ndef get_project(request, project, skip_acl=False):\n \"\"\"Return project matching parameters.\"\"\"\n project = get_object_or_404(\n Project,\n slug=project,\n )\n if not skip_acl:\n check_access(request, project)\n return project\n\n\ndef get_project_translation(request, project=None, subproject=None, lang=None):\n \"\"\"Return project, subproject, translation tuple for given parameters.\"\"\"\n\n if lang is not None and subproject is not None:\n # Language defined? We can get all\n translation = get_translation(request, project, subproject, lang)\n subproject = translation.subproject\n project = subproject.project\n else:\n translation = None\n if subproject is not None:\n # Component defined?\n subproject = get_subproject(request, project, subproject)\n project = subproject.project\n elif project is not None:\n # Only project defined?\n project = get_project(request, project)\n\n # Return tuple\n return project, subproject, translation\n\n\ndef try_set_language(lang):\n \"\"\"Try to activate language\"\"\"\n\n try:\n django.utils.translation.activate(lang)\n # workaround for https://code.djangoproject.com/ticket/26050\n # pylint: disable=W0212\n if trans_real.catalog()._catalog is None:\n raise Exception('Invalid language!')\n except Exception:\n # Ignore failure on activating language\n django.utils.translation.activate('en')\n\n\ndef import_message(request, count, message_none, message_ok):\n if count == 0:\n messages.warning(request, message_none)\n else:\n messages.success(request, message_ok % count)\n\n\ndef download_translation_file(translation, fmt=None):\n if fmt is not None:\n try:\n exporter = get_exporter(fmt)(translation=translation)\n except KeyError:\n raise Http404('File format not supported')\n exporter.add_units(translation)\n return exporter.get_response(\n '{{project}}-{0}-{{language}}.{{extension}}'.format(\n translation.subproject.slug\n )\n )\n\n srcfilename = translation.get_filename()\n\n # Construct file name (do not use real filename as it is usually not\n # that useful)\n filename = '{0}-{1}-{2}.{3}'.format(\n translation.subproject.project.slug,\n translation.subproject.slug,\n translation.language.code,\n translation.store.extension\n )\n\n # Create response\n with open(srcfilename) as handle:\n response = HttpResponse(\n handle.read(),\n content_type=translation.store.mimetype\n )\n\n # Fill in response headers\n response['Content-Disposition'] = 'attachment; filename={0}'.format(\n filename\n )\n\n return response\n\n\ndef show_form_errors(request, form):\n \"\"\"Show all form errors as a message.\"\"\"\n for error in form.non_field_errors():\n messages.error(request, error)\n for field in form:\n for error in field.errors:\n messages.error(\n request,\n _('Error in parameter %(field)s: %(error)s') % {\n 'field': field.name,\n 'error': error\n }\n )\n", "path": "weblate/trans/views/helper.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright © 2012 - 2017 Michal Čihař <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n\"\"\"Helper methods for views.\"\"\"\n\nfrom django.http import HttpResponse, Http404\nfrom django.shortcuts import get_object_or_404\nimport django.utils.translation\nfrom django.utils.translation import trans_real, ugettext as _\n\nfrom weblate.utils import messages\nfrom weblate.permissions.helpers import check_access\nfrom weblate.trans.exporters import get_exporter\nfrom weblate.trans.models import Project, SubProject, Translation\n\n\ndef get_translation(request, project, subproject, lang, skip_acl=False):\n \"\"\"Return translation matching parameters.\"\"\"\n translation = get_object_or_404(\n Translation.objects.prefetch(),\n language__code=lang,\n subproject__slug=subproject,\n subproject__project__slug=project,\n enabled=True\n )\n if not skip_acl:\n check_access(request, translation.subproject.project)\n return translation\n\n\ndef get_subproject(request, project, subproject, skip_acl=False):\n \"\"\"Return subproject matching parameters.\"\"\"\n subproject = get_object_or_404(\n SubProject.objects.prefetch(),\n project__slug=project,\n slug=subproject\n )\n if not skip_acl:\n check_access(request, subproject.project)\n return subproject\n\n\ndef get_project(request, project, skip_acl=False):\n \"\"\"Return project matching parameters.\"\"\"\n project = get_object_or_404(\n Project,\n slug=project,\n )\n if not skip_acl:\n check_access(request, project)\n return project\n\n\ndef get_project_translation(request, project=None, subproject=None, lang=None):\n \"\"\"Return project, subproject, translation tuple for given parameters.\"\"\"\n\n if lang is not None and subproject is not None:\n # Language defined? We can get all\n translation = get_translation(request, project, subproject, lang)\n subproject = translation.subproject\n project = subproject.project\n else:\n translation = None\n if subproject is not None:\n # Component defined?\n subproject = get_subproject(request, project, subproject)\n project = subproject.project\n elif project is not None:\n # Only project defined?\n project = get_project(request, project)\n\n # Return tuple\n return project, subproject, translation\n\n\ndef try_set_language(lang):\n \"\"\"Try to activate language\"\"\"\n\n try:\n django.utils.translation.activate(lang)\n # workaround for https://code.djangoproject.com/ticket/26050\n # pylint: disable=W0212\n if trans_real.catalog()._catalog is None:\n raise Exception('Invalid language!')\n except Exception:\n # Ignore failure on activating language\n django.utils.translation.activate('en')\n\n\ndef import_message(request, count, message_none, message_ok):\n if count == 0:\n messages.warning(request, message_none)\n else:\n messages.success(request, message_ok % count)\n\n\ndef download_translation_file(translation, fmt=None):\n if fmt is not None:\n try:\n exporter = get_exporter(fmt)(translation=translation)\n except KeyError:\n raise Http404('File format not supported')\n exporter.add_units(translation)\n return exporter.get_response(\n '{{project}}-{0}-{{language}}.{{extension}}'.format(\n translation.subproject.slug\n )\n )\n\n # Force flushing pending units\n author = translation.get_last_author(True)\n translation.update_units(author)\n\n srcfilename = translation.get_filename()\n\n # Construct file name (do not use real filename as it is usually not\n # that useful)\n filename = '{0}-{1}-{2}.{3}'.format(\n translation.subproject.project.slug,\n translation.subproject.slug,\n translation.language.code,\n translation.store.extension\n )\n\n # Create response\n with open(srcfilename) as handle:\n response = HttpResponse(\n handle.read(),\n content_type=translation.store.mimetype\n )\n\n # Fill in response headers\n response['Content-Disposition'] = 'attachment; filename={0}'.format(\n filename\n )\n\n return response\n\n\ndef show_form_errors(request, form):\n \"\"\"Show all form errors as a message.\"\"\"\n for error in form.non_field_errors():\n messages.error(request, error)\n for field in form:\n for error in field.errors:\n messages.error(\n request,\n _('Error in parameter %(field)s: %(error)s') % {\n 'field': field.name,\n 'error': error\n }\n )\n", "path": "weblate/trans/views/helper.py" } ]
diff --git a/weblate/trans/tests/test_files.py b/weblate/trans/tests/test_files.py index 816cb79a628f..0a3cce423e3d 100644 --- a/weblate/trans/tests/test_files.py +++ b/weblate/trans/tests/test_files.py @@ -360,6 +360,7 @@ def test_export(self): ) ) self.assertContains(response, 'Weblate Hello World 2016') + self.assertContains(response, 'Nazdar svete!') self.assertEqual( response['Content-Disposition'], 'attachment; filename=test-test-cs.po' diff --git a/weblate/trans/views/helper.py b/weblate/trans/views/helper.py index ac817a219989..1c5c9b4df663 100644 --- a/weblate/trans/views/helper.py +++ b/weblate/trans/views/helper.py @@ -123,6 +123,10 @@ def download_translation_file(translation, fmt=None): ) ) + # Force flushing pending units + author = translation.get_last_author(True) + translation.update_units(author) + srcfilename = translation.get_filename() # Construct file name (do not use real filename as it is usually not
dask__distributed-2975
dask.distributed.progress no longer callable in 2.3.0? We've used the progress() function from dask.distributed a bunch in the past to display a progress bar in JupyterLab, but it seems to have stopped working after upgrading to Dask 2.3.0: ``` from dask.distributed import Client, progress import dask.dataframe as dd df = dd.demo.make_timeseries('2010', '2016', {'value': float, 'name': str, 'id': int}, freq='10s', partition_freq='7d', seed=1) df = df.persist() progress(df) ``` Executing this in a single cell in JupyterLab (with an existing Dask cluster already running) results in: ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-1-16af814d7204> in <module> 7 8 df = df.persist() ----> 9 progress(df) TypeError: 'module' object is not callable ``` Let me know if I can provide any more info. Thanks!
[ { "content": "from . import config\nfrom dask.config import config\nfrom .actor import Actor, ActorFuture\nfrom .core import connect, rpc\nfrom .deploy import LocalCluster, Adaptive, SpecCluster\nfrom .diagnostics import progress\nfrom .client import (\n Client,\n Executor,\n CompatibleExecutor,\n wait,\n as_completed,\n default_client,\n fire_and_forget,\n Future,\n futures_of,\n get_task_stream,\n)\nfrom .lock import Lock\nfrom .nanny import Nanny\nfrom .pubsub import Pub, Sub\nfrom .queues import Queue\nfrom .scheduler import Scheduler\nfrom .threadpoolexecutor import rejoin\nfrom .utils import sync\nfrom .variable import Variable\nfrom .worker import Worker, get_worker, get_client, secede, Reschedule\nfrom .worker_client import local_client, worker_client\n\nfrom tornado.gen import TimeoutError\n\nfrom ._version import get_versions\n\nversions = get_versions()\n__version__ = versions[\"version\"]\n__git_revision__ = versions[\"full-revisionid\"]\ndel get_versions, versions\n", "path": "distributed/__init__.py" } ]
[ { "content": "from . import config\nfrom dask.config import config\nfrom .actor import Actor, ActorFuture\nfrom .core import connect, rpc\nfrom .deploy import LocalCluster, Adaptive, SpecCluster\nfrom .diagnostics.progressbar import progress\nfrom .client import (\n Client,\n Executor,\n CompatibleExecutor,\n wait,\n as_completed,\n default_client,\n fire_and_forget,\n Future,\n futures_of,\n get_task_stream,\n)\nfrom .lock import Lock\nfrom .nanny import Nanny\nfrom .pubsub import Pub, Sub\nfrom .queues import Queue\nfrom .scheduler import Scheduler\nfrom .threadpoolexecutor import rejoin\nfrom .utils import sync\nfrom .variable import Variable\nfrom .worker import Worker, get_worker, get_client, secede, Reschedule\nfrom .worker_client import local_client, worker_client\n\nfrom tornado.gen import TimeoutError\n\nfrom ._version import get_versions\n\nversions = get_versions()\n__version__ = versions[\"version\"]\n__git_revision__ = versions[\"full-revisionid\"]\ndel get_versions, versions\n", "path": "distributed/__init__.py" } ]
diff --git a/distributed/__init__.py b/distributed/__init__.py index ca36613c815..d79993dfef7 100644 --- a/distributed/__init__.py +++ b/distributed/__init__.py @@ -3,7 +3,7 @@ from .actor import Actor, ActorFuture from .core import connect, rpc from .deploy import LocalCluster, Adaptive, SpecCluster -from .diagnostics import progress +from .diagnostics.progressbar import progress from .client import ( Client, Executor,
numpy__numpy-15672
FIXME in `numpy/__init__.py` related to `numpy.lib` imports There is a FIXME comment in `numpy/__init__.py` that doesn't seem to have a corresponding issue on GitHub, at least not one that I noticed with a cursory search of the issues. https://github.com/numpy/numpy/blob/eb167a3fe540780f397a14817f54a95333fbcc6c/numpy/__init__.py#L140-L145 There is additional code in `numpy/__init__.py` related to this FIXME: https://github.com/numpy/numpy/blob/eb167a3fe540780f397a14817f54a95333fbcc6c/numpy/__init__.py#L178-L184 My intent is getting this into the issue tracker so that it can be discussed/documented and synced up with the code comments. If there is an existing issue that I missed, I'd recommend updating the comment in `numpy/__init__.py` to point there.
[ { "content": "\"\"\"\nNumPy\n=====\n\nProvides\n 1. An array object of arbitrary homogeneous items\n 2. Fast mathematical operations over arrays\n 3. Linear Algebra, Fourier Transforms, Random Number Generation\n\nHow to use the documentation\n----------------------------\nDocumentation is available in two forms: docstrings provided\nwith the code, and a loose standing reference guide, available from\n`the NumPy homepage <https://www.scipy.org>`_.\n\nWe recommend exploring the docstrings using\n`IPython <https://ipython.org>`_, an advanced Python shell with\nTAB-completion and introspection capabilities. See below for further\ninstructions.\n\nThe docstring examples assume that `numpy` has been imported as `np`::\n\n >>> import numpy as np\n\nCode snippets are indicated by three greater-than signs::\n\n >>> x = 42\n >>> x = x + 1\n\nUse the built-in ``help`` function to view a function's docstring::\n\n >>> help(np.sort)\n ... # doctest: +SKIP\n\nFor some objects, ``np.info(obj)`` may provide additional help. This is\nparticularly true if you see the line \"Help on ufunc object:\" at the top\nof the help() page. Ufuncs are implemented in C, not Python, for speed.\nThe native Python help() does not know how to view their help, but our\nnp.info() function does.\n\nTo search for documents containing a keyword, do::\n\n >>> np.lookfor('keyword')\n ... # doctest: +SKIP\n\nGeneral-purpose documents like a glossary and help on the basic concepts\nof numpy are available under the ``doc`` sub-module::\n\n >>> from numpy import doc\n >>> help(doc)\n ... # doctest: +SKIP\n\nAvailable subpackages\n---------------------\ndoc\n Topical documentation on broadcasting, indexing, etc.\nlib\n Basic functions used by several sub-packages.\nrandom\n Core Random Tools\nlinalg\n Core Linear Algebra Tools\nfft\n Core FFT routines\npolynomial\n Polynomial tools\ntesting\n NumPy testing tools\nf2py\n Fortran to Python Interface Generator.\ndistutils\n Enhancements to distutils with support for\n Fortran compilers support and more.\n\nUtilities\n---------\ntest\n Run numpy unittests\nshow_config\n Show numpy build configuration\ndual\n Overwrite certain functions with high-performance Scipy tools\nmatlib\n Make everything matrices.\n__version__\n NumPy version string\n\nViewing documentation using IPython\n-----------------------------------\nStart IPython with the NumPy profile (``ipython -p numpy``), which will\nimport `numpy` under the alias `np`. Then, use the ``cpaste`` command to\npaste examples into the shell. To see which functions are available in\n`numpy`, type ``np.<TAB>`` (where ``<TAB>`` refers to the TAB key), or use\n``np.*cos*?<ENTER>`` (where ``<ENTER>`` refers to the ENTER key) to narrow\ndown the list. To view the docstring for a function, use\n``np.cos?<ENTER>`` (to view the docstring) and ``np.cos??<ENTER>`` (to view\nthe source code).\n\nCopies vs. in-place operation\n-----------------------------\nMost of the functions in `numpy` return a copy of the array argument\n(e.g., `np.sort`). In-place versions of these functions are often\navailable as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.\nExceptions to this rule are documented.\n\n\"\"\"\nimport sys\nimport warnings\n\nfrom ._globals import ModuleDeprecationWarning, VisibleDeprecationWarning\nfrom ._globals import _NoValue\n\n# We first need to detect if we're being called as part of the numpy setup\n# procedure itself in a reliable manner.\ntry:\n __NUMPY_SETUP__\nexcept NameError:\n __NUMPY_SETUP__ = False\n\nif __NUMPY_SETUP__:\n sys.stderr.write('Running from numpy source directory.\\n')\nelse:\n try:\n from numpy.__config__ import show as show_config\n except ImportError:\n msg = \"\"\"Error importing numpy: you should not try to import numpy from\n its source directory; please exit the numpy source tree, and relaunch\n your python interpreter from there.\"\"\"\n raise ImportError(msg)\n\n from .version import git_revision as __git_revision__\n from .version import version as __version__\n\n __all__ = ['ModuleDeprecationWarning',\n 'VisibleDeprecationWarning']\n\n # Allow distributors to run custom init code\n from . import _distributor_init\n\n from . import core\n from .core import *\n from . import compat\n from . import lib\n # FIXME: why have numpy.lib if everything is imported here??\n from .lib import *\n\n from . import linalg\n from . import fft\n from . import polynomial\n from . import random\n from . import ctypeslib\n from . import ma\n from . import matrixlib as _mat\n from .matrixlib import *\n from .compat import long\n\n # Make these accessible from numpy name-space\n # but not imported in from numpy import *\n # TODO[gh-6103]: Deprecate these\n from builtins import bool, int, float, complex, object, str\n unicode = str\n\n from .core import round, abs, max, min\n # now that numpy modules are imported, can initialize limits\n core.getlimits._register_known_types()\n\n __all__.extend(['__version__', 'show_config'])\n __all__.extend(core.__all__)\n __all__.extend(_mat.__all__)\n __all__.extend(lib.__all__)\n __all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])\n\n # These are added by `from .core import *` and `core.__all__`, but we\n # overwrite them above with builtins we do _not_ want to export.\n __all__.remove('long')\n __all__.remove('unicode')\n\n # Remove things that are in the numpy.lib but not in the numpy namespace\n # Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace)\n # that prevents adding more things to the main namespace by accident.\n # The list below will grow until the `from .lib import *` fixme above is\n # taken care of\n __all__.remove('Arrayterator')\n del Arrayterator\n\n # Filter out Cython harmless warnings\n warnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\n warnings.filterwarnings(\"ignore\", message=\"numpy.ufunc size changed\")\n warnings.filterwarnings(\"ignore\", message=\"numpy.ndarray size changed\")\n\n # oldnumeric and numarray were removed in 1.9. In case some packages import\n # but do not use them, we define them here for backward compatibility.\n oldnumeric = 'removed'\n numarray = 'removed'\n\n if sys.version_info[:2] >= (3, 7):\n # Importing Tester requires importing all of UnitTest which is not a\n # cheap import Since it is mainly used in test suits, we lazy import it\n # here to save on the order of 10 ms of import time for most users\n #\n # The previous way Tester was imported also had a side effect of adding\n # the full `numpy.testing` namespace\n #\n # module level getattr is only supported in 3.7 onwards\n # https://www.python.org/dev/peps/pep-0562/\n def __getattr__(attr):\n if attr == 'testing':\n import numpy.testing as testing\n return testing\n elif attr == 'Tester':\n from .testing import Tester\n return Tester\n else:\n raise AttributeError(\"module {!r} has no attribute \"\n \"{!r}\".format(__name__, attr))\n\n def __dir__():\n return list(globals().keys() | {'Tester', 'testing'})\n\n else:\n # We don't actually use this ourselves anymore, but I'm not 100% sure that\n # no-one else in the world is using it (though I hope not)\n from .testing import Tester\n\n # Pytest testing\n from numpy._pytesttester import PytestTester\n test = PytestTester(__name__)\n del PytestTester\n\n\n def _sanity_check():\n \"\"\"\n Quick sanity checks for common bugs caused by environment.\n There are some cases e.g. with wrong BLAS ABI that cause wrong\n results under specific runtime conditions that are not necessarily\n achieved during test suite runs, and it is useful to catch those early.\n\n See https://github.com/numpy/numpy/issues/8577 and other\n similar bug reports.\n\n \"\"\"\n try:\n x = ones(2, dtype=float32)\n if not abs(x.dot(x) - 2.0) < 1e-5:\n raise AssertionError()\n except AssertionError:\n msg = (\"The current Numpy installation ({!r}) fails to \"\n \"pass simple sanity checks. This can be caused for example \"\n \"by incorrect BLAS library being linked in, or by mixing \"\n \"package managers (pip, conda, apt, ...). Search closed \"\n \"numpy issues for similar problems.\")\n raise RuntimeError(msg.format(__file__))\n\n _sanity_check()\n del _sanity_check\n", "path": "numpy/__init__.py" } ]
[ { "content": "\"\"\"\nNumPy\n=====\n\nProvides\n 1. An array object of arbitrary homogeneous items\n 2. Fast mathematical operations over arrays\n 3. Linear Algebra, Fourier Transforms, Random Number Generation\n\nHow to use the documentation\n----------------------------\nDocumentation is available in two forms: docstrings provided\nwith the code, and a loose standing reference guide, available from\n`the NumPy homepage <https://www.scipy.org>`_.\n\nWe recommend exploring the docstrings using\n`IPython <https://ipython.org>`_, an advanced Python shell with\nTAB-completion and introspection capabilities. See below for further\ninstructions.\n\nThe docstring examples assume that `numpy` has been imported as `np`::\n\n >>> import numpy as np\n\nCode snippets are indicated by three greater-than signs::\n\n >>> x = 42\n >>> x = x + 1\n\nUse the built-in ``help`` function to view a function's docstring::\n\n >>> help(np.sort)\n ... # doctest: +SKIP\n\nFor some objects, ``np.info(obj)`` may provide additional help. This is\nparticularly true if you see the line \"Help on ufunc object:\" at the top\nof the help() page. Ufuncs are implemented in C, not Python, for speed.\nThe native Python help() does not know how to view their help, but our\nnp.info() function does.\n\nTo search for documents containing a keyword, do::\n\n >>> np.lookfor('keyword')\n ... # doctest: +SKIP\n\nGeneral-purpose documents like a glossary and help on the basic concepts\nof numpy are available under the ``doc`` sub-module::\n\n >>> from numpy import doc\n >>> help(doc)\n ... # doctest: +SKIP\n\nAvailable subpackages\n---------------------\ndoc\n Topical documentation on broadcasting, indexing, etc.\nlib\n Basic functions used by several sub-packages.\nrandom\n Core Random Tools\nlinalg\n Core Linear Algebra Tools\nfft\n Core FFT routines\npolynomial\n Polynomial tools\ntesting\n NumPy testing tools\nf2py\n Fortran to Python Interface Generator.\ndistutils\n Enhancements to distutils with support for\n Fortran compilers support and more.\n\nUtilities\n---------\ntest\n Run numpy unittests\nshow_config\n Show numpy build configuration\ndual\n Overwrite certain functions with high-performance Scipy tools\nmatlib\n Make everything matrices.\n__version__\n NumPy version string\n\nViewing documentation using IPython\n-----------------------------------\nStart IPython with the NumPy profile (``ipython -p numpy``), which will\nimport `numpy` under the alias `np`. Then, use the ``cpaste`` command to\npaste examples into the shell. To see which functions are available in\n`numpy`, type ``np.<TAB>`` (where ``<TAB>`` refers to the TAB key), or use\n``np.*cos*?<ENTER>`` (where ``<ENTER>`` refers to the ENTER key) to narrow\ndown the list. To view the docstring for a function, use\n``np.cos?<ENTER>`` (to view the docstring) and ``np.cos??<ENTER>`` (to view\nthe source code).\n\nCopies vs. in-place operation\n-----------------------------\nMost of the functions in `numpy` return a copy of the array argument\n(e.g., `np.sort`). In-place versions of these functions are often\navailable as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.\nExceptions to this rule are documented.\n\n\"\"\"\nimport sys\nimport warnings\n\nfrom ._globals import ModuleDeprecationWarning, VisibleDeprecationWarning\nfrom ._globals import _NoValue\n\n# We first need to detect if we're being called as part of the numpy setup\n# procedure itself in a reliable manner.\ntry:\n __NUMPY_SETUP__\nexcept NameError:\n __NUMPY_SETUP__ = False\n\nif __NUMPY_SETUP__:\n sys.stderr.write('Running from numpy source directory.\\n')\nelse:\n try:\n from numpy.__config__ import show as show_config\n except ImportError:\n msg = \"\"\"Error importing numpy: you should not try to import numpy from\n its source directory; please exit the numpy source tree, and relaunch\n your python interpreter from there.\"\"\"\n raise ImportError(msg)\n\n from .version import git_revision as __git_revision__\n from .version import version as __version__\n\n __all__ = ['ModuleDeprecationWarning',\n 'VisibleDeprecationWarning']\n\n # Allow distributors to run custom init code\n from . import _distributor_init\n\n from . import core\n from .core import *\n from . import compat\n from . import lib\n # NOTE: to be revisited following future namespace cleanup.\n # See gh-14454 and gh-15672 for discussion.\n from .lib import *\n\n from . import linalg\n from . import fft\n from . import polynomial\n from . import random\n from . import ctypeslib\n from . import ma\n from . import matrixlib as _mat\n from .matrixlib import *\n from .compat import long\n\n # Make these accessible from numpy name-space\n # but not imported in from numpy import *\n # TODO[gh-6103]: Deprecate these\n from builtins import bool, int, float, complex, object, str\n unicode = str\n\n from .core import round, abs, max, min\n # now that numpy modules are imported, can initialize limits\n core.getlimits._register_known_types()\n\n __all__.extend(['__version__', 'show_config'])\n __all__.extend(core.__all__)\n __all__.extend(_mat.__all__)\n __all__.extend(lib.__all__)\n __all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])\n\n # These are added by `from .core import *` and `core.__all__`, but we\n # overwrite them above with builtins we do _not_ want to export.\n __all__.remove('long')\n __all__.remove('unicode')\n\n # Remove things that are in the numpy.lib but not in the numpy namespace\n # Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace)\n # that prevents adding more things to the main namespace by accident.\n # The list below will grow until the `from .lib import *` fixme above is\n # taken care of\n __all__.remove('Arrayterator')\n del Arrayterator\n\n # Filter out Cython harmless warnings\n warnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\n warnings.filterwarnings(\"ignore\", message=\"numpy.ufunc size changed\")\n warnings.filterwarnings(\"ignore\", message=\"numpy.ndarray size changed\")\n\n # oldnumeric and numarray were removed in 1.9. In case some packages import\n # but do not use them, we define them here for backward compatibility.\n oldnumeric = 'removed'\n numarray = 'removed'\n\n if sys.version_info[:2] >= (3, 7):\n # Importing Tester requires importing all of UnitTest which is not a\n # cheap import Since it is mainly used in test suits, we lazy import it\n # here to save on the order of 10 ms of import time for most users\n #\n # The previous way Tester was imported also had a side effect of adding\n # the full `numpy.testing` namespace\n #\n # module level getattr is only supported in 3.7 onwards\n # https://www.python.org/dev/peps/pep-0562/\n def __getattr__(attr):\n if attr == 'testing':\n import numpy.testing as testing\n return testing\n elif attr == 'Tester':\n from .testing import Tester\n return Tester\n else:\n raise AttributeError(\"module {!r} has no attribute \"\n \"{!r}\".format(__name__, attr))\n\n def __dir__():\n return list(globals().keys() | {'Tester', 'testing'})\n\n else:\n # We don't actually use this ourselves anymore, but I'm not 100% sure that\n # no-one else in the world is using it (though I hope not)\n from .testing import Tester\n\n # Pytest testing\n from numpy._pytesttester import PytestTester\n test = PytestTester(__name__)\n del PytestTester\n\n\n def _sanity_check():\n \"\"\"\n Quick sanity checks for common bugs caused by environment.\n There are some cases e.g. with wrong BLAS ABI that cause wrong\n results under specific runtime conditions that are not necessarily\n achieved during test suite runs, and it is useful to catch those early.\n\n See https://github.com/numpy/numpy/issues/8577 and other\n similar bug reports.\n\n \"\"\"\n try:\n x = ones(2, dtype=float32)\n if not abs(x.dot(x) - 2.0) < 1e-5:\n raise AssertionError()\n except AssertionError:\n msg = (\"The current Numpy installation ({!r}) fails to \"\n \"pass simple sanity checks. This can be caused for example \"\n \"by incorrect BLAS library being linked in, or by mixing \"\n \"package managers (pip, conda, apt, ...). Search closed \"\n \"numpy issues for similar problems.\")\n raise RuntimeError(msg.format(__file__))\n\n _sanity_check()\n del _sanity_check\n", "path": "numpy/__init__.py" } ]
diff --git a/numpy/__init__.py b/numpy/__init__.py index c5c58b0200ca..ba35224e6ad5 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -141,7 +141,8 @@ from .core import * from . import compat from . import lib - # FIXME: why have numpy.lib if everything is imported here?? + # NOTE: to be revisited following future namespace cleanup. + # See gh-14454 and gh-15672 for discussion. from .lib import * from . import linalg
PrefectHQ__prefect-10669
Email notifications example form input needs adjusted ### First check - [X] I added a descriptive title to this issue. - [X] I used the GitHub search to find a similar issue and didn't find it. - [X] I searched the Prefect documentation for this issue. - [X] I checked that this issue is related to Prefect and not one of its dependencies. ### Bug summary Here's the default example in a Prefect server UI. <img width="1185" alt="Screenshot 2023-09-07 at 1 51 05 PM" src="https://github.com/PrefectHQ/prefect/assets/7703961/270dcce8-c9f8-484f-aeab-11dfebdabb92"> But this doesn't work. <img width="1187" alt="Screenshot 2023-09-07 at 1 51 24 PM" src="https://github.com/PrefectHQ/prefect/assets/7703961/d4e38e4f-9aa3-4c04-8615-3c5cf2144e50"> Quotes are required around the address. ### Reproduction ```python3 see above ``` ### Error _No response_ ### Versions ```Text Version: 2.12.1 API version: 0.8.4 Python version: 3.10.8 Git commit: f5eed67c Built: Fri, Sep 1, 2023 4:01 PM OS/Arch: darwin/arm64 Profile: local Server type: ephemeral Server: Database: sqlite SQLite version: 3.40.0 ``` ### Additional context _No response_
[ { "content": "from abc import ABC\nfrom typing import Dict, List, Optional\n\nfrom pydantic import AnyHttpUrl, Field, SecretStr\nfrom typing_extensions import Literal\n\nfrom prefect.blocks.abstract import NotificationBlock\nfrom prefect.blocks.fields import SecretDict\nfrom prefect.events.instrument import instrument_instance_method_call\nfrom prefect.utilities.asyncutils import sync_compatible\nfrom prefect.utilities.templating import apply_values, find_placeholders\n\nPREFECT_NOTIFY_TYPE_DEFAULT = \"prefect_default\"\n\n\nclass AbstractAppriseNotificationBlock(NotificationBlock, ABC):\n \"\"\"\n An abstract class for sending notifications using Apprise.\n \"\"\"\n\n notify_type: Literal[\"prefect_default\", \"info\", \"success\", \"warning\", \"failure\"] = (\n Field(\n default=PREFECT_NOTIFY_TYPE_DEFAULT,\n description=(\n \"The type of notification being performed; the prefect_default \"\n \"is a plain notification that does not attach an image.\"\n ),\n )\n )\n\n def __init__(self, *args, **kwargs):\n import apprise\n\n if PREFECT_NOTIFY_TYPE_DEFAULT not in apprise.NOTIFY_TYPES:\n apprise.NOTIFY_TYPES += (PREFECT_NOTIFY_TYPE_DEFAULT,)\n\n super().__init__(*args, **kwargs)\n\n def _start_apprise_client(self, url: SecretStr):\n from apprise import Apprise, AppriseAsset\n\n # A custom `AppriseAsset` that ensures Prefect Notifications\n # appear correctly across multiple messaging platforms\n prefect_app_data = AppriseAsset(\n app_id=\"Prefect Notifications\",\n app_desc=\"Prefect Notifications\",\n app_url=\"https://prefect.io\",\n )\n\n self._apprise_client = Apprise(asset=prefect_app_data)\n self._apprise_client.add(url.get_secret_value())\n\n def block_initialization(self) -> None:\n self._start_apprise_client(self.url)\n\n @sync_compatible\n @instrument_instance_method_call()\n async def notify(self, body: str, subject: Optional[str] = None):\n await self._apprise_client.async_notify(\n body=body, title=subject, notify_type=self.notify_type\n )\n\n\nclass AppriseNotificationBlock(AbstractAppriseNotificationBlock, ABC):\n \"\"\"\n A base class for sending notifications using Apprise, through webhook URLs.\n \"\"\"\n\n _documentation_url = \"https://docs.prefect.io/ui/notifications/\"\n url: SecretStr = Field(\n default=...,\n title=\"Webhook URL\",\n description=\"Incoming webhook URL used to send notifications.\",\n example=\"https://hooks.example.com/XXX\",\n )\n\n\n# TODO: Move to prefect-slack once collection block auto-registration is\n# available\nclass SlackWebhook(AppriseNotificationBlock):\n \"\"\"\n Enables sending notifications via a provided Slack webhook.\n\n Examples:\n Load a saved Slack webhook and send a message:\n ```python\n from prefect.blocks.notifications import SlackWebhook\n\n slack_webhook_block = SlackWebhook.load(\"BLOCK_NAME\")\n slack_webhook_block.notify(\"Hello from Prefect!\")\n ```\n \"\"\"\n\n _block_type_name = \"Slack Webhook\"\n _logo_url = \"https://images.ctfassets.net/gm98wzqotmnx/7dkzINU9r6j44giEFuHuUC/85d4cd321ad60c1b1e898bc3fbd28580/5cb480cd5f1b6d3fbadece79.png?h=250\"\n _documentation_url = \"https://docs.prefect.io/api-ref/prefect/blocks/notifications/#prefect.blocks.notifications.SlackWebhook\"\n\n url: SecretStr = Field(\n default=...,\n title=\"Webhook URL\",\n description=\"Slack incoming webhook URL used to send notifications.\",\n example=\"https://hooks.slack.com/XXX\",\n )\n\n\nclass MicrosoftTeamsWebhook(AppriseNotificationBlock):\n \"\"\"\n Enables sending notifications via a provided Microsoft Teams webhook.\n\n Examples:\n Load a saved Teams webhook and send a message:\n ```python\n from prefect.blocks.notifications import MicrosoftTeamsWebhook\n teams_webhook_block = MicrosoftTeamsWebhook.load(\"BLOCK_NAME\")\n teams_webhook_block.notify(\"Hello from Prefect!\")\n ```\n \"\"\"\n\n _block_type_name = \"Microsoft Teams Webhook\"\n _block_type_slug = \"ms-teams-webhook\"\n _logo_url = \"https://images.ctfassets.net/gm98wzqotmnx/6n0dSTBzwoVPhX8Vgg37i7/9040e07a62def4f48242be3eae6d3719/teams_logo.png?h=250\"\n _documentation_url = \"https://docs.prefect.io/api-ref/prefect/blocks/notifications/#prefect.blocks.notifications.MicrosoftTeamsWebhook\"\n\n url: SecretStr = Field(\n ...,\n title=\"Webhook URL\",\n description=\"The Teams incoming webhook URL used to send notifications.\",\n example=(\n \"https://your-org.webhook.office.com/webhookb2/XXX/IncomingWebhook/YYY/ZZZ\"\n ),\n )\n\n\nclass PagerDutyWebHook(AbstractAppriseNotificationBlock):\n \"\"\"\n Enables sending notifications via a provided PagerDuty webhook.\n See [Apprise notify_pagerduty docs](https://github.com/caronc/apprise/wiki/Notify_pagerduty)\n for more info on formatting the URL.\n\n Examples:\n Load a saved PagerDuty webhook and send a message:\n ```python\n from prefect.blocks.notifications import PagerDutyWebHook\n pagerduty_webhook_block = PagerDutyWebHook.load(\"BLOCK_NAME\")\n pagerduty_webhook_block.notify(\"Hello from Prefect!\")\n ```\n \"\"\"\n\n _description = \"Enables sending notifications via a provided PagerDuty webhook.\"\n\n _block_type_name = \"Pager Duty Webhook\"\n _block_type_slug = \"pager-duty-webhook\"\n _logo_url = \"https://images.ctfassets.net/gm98wzqotmnx/6FHJ4Lcozjfl1yDPxCvQDT/c2f6bdf47327271c068284897527f3da/PagerDuty-Logo.wine.png?h=250\"\n _documentation_url = \"https://docs.prefect.io/api-ref/prefect/blocks/notifications/#prefect.blocks.notifications.PagerDutyWebHook\"\n\n # The default cannot be prefect_default because NotifyPagerDuty's\n # PAGERDUTY_SEVERITY_MAP only has these notify types defined as keys\n notify_type: Literal[\"info\", \"success\", \"warning\", \"failure\"] = Field(\n default=\"info\", description=\"The severity of the notification.\"\n )\n\n integration_key: SecretStr = Field(\n default=...,\n description=(\n \"This can be found on the Events API V2 \"\n \"integration's detail page, and is also referred to as a Routing Key. \"\n \"This must be provided alongside `api_key`, but will error if provided \"\n \"alongside `url`.\"\n ),\n )\n\n api_key: SecretStr = Field(\n default=...,\n title=\"API Key\",\n description=(\n \"This can be found under Integrations. \"\n \"This must be provided alongside `integration_key`, but will error if \"\n \"provided alongside `url`.\"\n ),\n )\n\n source: Optional[str] = Field(\n default=\"Prefect\", description=\"The source string as part of the payload.\"\n )\n\n component: str = Field(\n default=\"Notification\",\n description=\"The component string as part of the payload.\",\n )\n\n group: Optional[str] = Field(\n default=None, description=\"The group string as part of the payload.\"\n )\n\n class_id: Optional[str] = Field(\n default=None,\n title=\"Class ID\",\n description=\"The class string as part of the payload.\",\n )\n\n region_name: Literal[\"us\", \"eu\"] = Field(\n default=\"us\", description=\"The region name.\"\n )\n\n clickable_url: Optional[AnyHttpUrl] = Field(\n default=None,\n title=\"Clickable URL\",\n description=\"A clickable URL to associate with the notice.\",\n )\n\n include_image: bool = Field(\n default=True,\n description=\"Associate the notification status via a represented icon.\",\n )\n\n custom_details: Optional[Dict[str, str]] = Field(\n default=None,\n description=\"Additional details to include as part of the payload.\",\n example='{\"disk_space_left\": \"145GB\"}',\n )\n\n def block_initialization(self) -> None:\n from apprise.plugins.NotifyPagerDuty import NotifyPagerDuty\n\n url = SecretStr(\n NotifyPagerDuty(\n apikey=self.api_key.get_secret_value(),\n integrationkey=self.integration_key.get_secret_value(),\n source=self.source,\n component=self.component,\n group=self.group,\n class_id=self.class_id,\n region_name=self.region_name,\n click=self.clickable_url,\n include_image=self.include_image,\n details=self.custom_details,\n ).url()\n )\n self._start_apprise_client(url)\n\n\nclass TwilioSMS(AbstractAppriseNotificationBlock):\n \"\"\"Enables sending notifications via Twilio SMS.\n Find more on sending Twilio SMS messages in the [docs](https://www.twilio.com/docs/sms).\n\n Examples:\n Load a saved `TwilioSMS` block and send a message:\n ```python\n from prefect.blocks.notifications import TwilioSMS\n twilio_webhook_block = TwilioSMS.load(\"BLOCK_NAME\")\n twilio_webhook_block.notify(\"Hello from Prefect!\")\n ```\n \"\"\"\n\n _description = \"Enables sending notifications via Twilio SMS.\"\n _block_type_name = \"Twilio SMS\"\n _block_type_slug = \"twilio-sms\"\n _logo_url = \"https://images.ctfassets.net/zscdif0zqppk/YTCgPL6bnK3BczP2gV9md/609283105a7006c57dbfe44ee1a8f313/58482bb9cef1014c0b5e4a31.png?h=250\" # noqa\n _documentation_url = \"https://docs.prefect.io/api-ref/prefect/blocks/notifications/#prefect.blocks.notifications.TwilioSMS\"\n\n account_sid: str = Field(\n default=...,\n description=(\n \"The Twilio Account SID - it can be found on the homepage \"\n \"of the Twilio console.\"\n ),\n )\n\n auth_token: SecretStr = Field(\n default=...,\n description=(\n \"The Twilio Authentication Token - \"\n \"it can be found on the homepage of the Twilio console.\"\n ),\n )\n\n from_phone_number: str = Field(\n default=...,\n description=\"The valid Twilio phone number to send the message from.\",\n example=\"18001234567\",\n )\n\n to_phone_numbers: List[str] = Field(\n default=...,\n description=\"A list of valid Twilio phone number(s) to send the message to.\",\n # not wrapped in brackets because of the way UI displays examples; in code should be [\"18004242424\"]\n example=\"18004242424\",\n )\n\n def block_initialization(self) -> None:\n from apprise.plugins.NotifyTwilio import NotifyTwilio\n\n url = SecretStr(\n NotifyTwilio(\n account_sid=self.account_sid,\n auth_token=self.auth_token.get_secret_value(),\n source=self.from_phone_number,\n targets=self.to_phone_numbers,\n ).url()\n )\n self._start_apprise_client(url)\n\n\nclass OpsgenieWebhook(AbstractAppriseNotificationBlock):\n \"\"\"\n Enables sending notifications via a provided Opsgenie webhook.\n See [Apprise notify_opsgenie docs](https://github.com/caronc/apprise/wiki/Notify_opsgenie)\n for more info on formatting the URL.\n\n Examples:\n Load a saved Opsgenie webhook and send a message:\n ```python\n from prefect.blocks.notifications import OpsgenieWebhook\n opsgenie_webhook_block = OpsgenieWebhook.load(\"BLOCK_NAME\")\n opsgenie_webhook_block.notify(\"Hello from Prefect!\")\n ```\n \"\"\"\n\n _description = \"Enables sending notifications via a provided Opsgenie webhook.\"\n\n _block_type_name = \"Opsgenie Webhook\"\n _block_type_slug = \"opsgenie-webhook\"\n _logo_url = \"https://images.ctfassets.net/sahxz1jinscj/3habq8fTzmplh7Ctkppk4/590cecb73f766361fcea9223cd47bad8/opsgenie.png\"\n _documentation_url = \"https://docs.prefect.io/api-ref/prefect/blocks/notifications/#prefect.blocks.notifications.OpsgenieWebhook\"\n\n apikey: SecretStr = Field(\n default=...,\n title=\"API Key\",\n description=\"The API Key associated with your Opsgenie account.\",\n )\n\n target_user: Optional[List] = Field(\n default=None, description=\"The user(s) you wish to notify.\"\n )\n\n target_team: Optional[List] = Field(\n default=None, description=\"The team(s) you wish to notify.\"\n )\n\n target_schedule: Optional[List] = Field(\n default=None, description=\"The schedule(s) you wish to notify.\"\n )\n\n target_escalation: Optional[List] = Field(\n default=None, description=\"The escalation(s) you wish to notify.\"\n )\n\n region_name: Literal[\"us\", \"eu\"] = Field(\n default=\"us\", description=\"The 2-character region code.\"\n )\n\n batch: bool = Field(\n default=False,\n description=\"Notify all targets in batches (instead of individually).\",\n )\n\n tags: Optional[List] = Field(\n default=None,\n description=(\n \"A comma-separated list of tags you can associate with your Opsgenie\"\n \" message.\"\n ),\n example='[\"tag1\", \"tag2\"]',\n )\n\n priority: Optional[str] = Field(\n default=3,\n description=(\n \"The priority to associate with the message. It is on a scale between 1\"\n \" (LOW) and 5 (EMERGENCY).\"\n ),\n )\n\n alias: Optional[str] = Field(\n default=None, description=\"The alias to associate with the message.\"\n )\n\n entity: Optional[str] = Field(\n default=None, description=\"The entity to associate with the message.\"\n )\n\n details: Optional[Dict[str, str]] = Field(\n default=None,\n description=\"Additional details composed of key/values pairs.\",\n example='{\"key1\": \"value1\", \"key2\": \"value2\"}',\n )\n\n def block_initialization(self) -> None:\n from apprise.plugins.NotifyOpsgenie import NotifyOpsgenie\n\n targets = []\n if self.target_user:\n [targets.append(f\"@{x}\") for x in self.target_user]\n if self.target_team:\n [targets.append(f\"#{x}\") for x in self.target_team]\n if self.target_schedule:\n [targets.append(f\"*{x}\") for x in self.target_schedule]\n if self.target_escalation:\n [targets.append(f\"^{x}\") for x in self.target_escalation]\n url = SecretStr(\n NotifyOpsgenie(\n apikey=self.apikey.get_secret_value(),\n targets=targets,\n region_name=self.region_name,\n details=self.details,\n priority=self.priority,\n alias=self.alias,\n entity=self.entity,\n batch=self.batch,\n tags=self.tags,\n ).url()\n )\n self._start_apprise_client(url)\n\n\nclass MattermostWebhook(AbstractAppriseNotificationBlock):\n \"\"\"\n Enables sending notifications via a provided Mattermost webhook.\n See [Apprise notify_Mattermost docs](https://github.com/caronc/apprise/wiki/Notify_Mattermost) # noqa\n\n\n Examples:\n Load a saved Mattermost webhook and send a message:\n ```python\n from prefect.blocks.notifications import MattermostWebhook\n\n mattermost_webhook_block = MattermostWebhook.load(\"BLOCK_NAME\")\n\n mattermost_webhook_block.notify(\"Hello from Prefect!\")\n ```\n \"\"\"\n\n _description = \"Enables sending notifications via a provided Mattermost webhook.\"\n _block_type_name = \"Mattermost Webhook\"\n _block_type_slug = \"mattermost-webhook\"\n _logo_url = \"https://images.ctfassets.net/zscdif0zqppk/3mlbsJDAmK402ER1sf0zUF/a48ac43fa38f395dd5f56c6ed29f22bb/mattermost-logo-png-transparent.png?h=250\"\n _documentation_url = \"https://docs.prefect.io/api-ref/prefect/blocks/notifications/#prefect.blocks.notifications.MattermostWebhook\"\n\n hostname: str = Field(\n default=...,\n description=\"The hostname of your Mattermost server.\",\n example=\"Mattermost.example.com\",\n )\n\n token: SecretStr = Field(\n default=...,\n description=\"The token associated with your Mattermost webhook.\",\n )\n\n botname: Optional[str] = Field(\n title=\"Bot name\",\n default=None,\n description=\"The name of the bot that will send the message.\",\n )\n\n channels: Optional[List[str]] = Field(\n default=None,\n description=\"The channel(s) you wish to notify.\",\n )\n\n include_image: bool = Field(\n default=False,\n description=\"Whether to include the Apprise status image in the message.\",\n )\n\n path: Optional[str] = Field(\n default=None,\n description=\"An optional sub-path specification to append to the hostname.\",\n )\n\n port: int = Field(\n default=8065,\n description=\"The port of your Mattermost server.\",\n )\n\n def block_initialization(self) -> None:\n from apprise.plugins.NotifyMattermost import NotifyMattermost\n\n url = SecretStr(\n NotifyMattermost(\n token=self.token.get_secret_value(),\n fullpath=self.path,\n host=self.hostname,\n botname=self.botname,\n channels=self.channels,\n include_image=self.include_image,\n port=self.port,\n ).url()\n )\n self._start_apprise_client(url)\n\n\nclass DiscordWebhook(AbstractAppriseNotificationBlock):\n \"\"\"\n Enables sending notifications via a provided Discord webhook.\n See [Apprise notify_Discord docs](https://github.com/caronc/apprise/wiki/Notify_Discord) # noqa\n\n Examples:\n Load a saved Discord webhook and send a message:\n ```python\n from prefect.blocks.notifications import DiscordWebhook\n\n discord_webhook_block = DiscordWebhook.load(\"BLOCK_NAME\")\n\n discord_webhook_block.notify(\"Hello from Prefect!\")\n ```\n \"\"\"\n\n _description = \"Enables sending notifications via a provided Discord webhook.\"\n _block_type_name = \"Discord Webhook\"\n _block_type_slug = \"discord-webhook\"\n _logo_url = \"https://images.ctfassets.net/keir3zrx8eg0/64fsff0qm7st33BqViEpqY/e177db0d1ada88a7ee6c9433576b98d5/icons8-discord-new-480.png?h=250\"\n _documentation_url = \"https://docs.prefect.io/api-ref/prefect/blocks/notifications/#prefect.blocks.notifications.DiscordWebhook\"\n\n webhook_id: SecretStr = Field(\n default=...,\n description=(\n \"The first part of 2 tokens provided to you after creating a\"\n \" incoming-webhook.\"\n ),\n )\n\n webhook_token: SecretStr = Field(\n default=...,\n description=(\n \"The second part of 2 tokens provided to you after creating a\"\n \" incoming-webhook.\"\n ),\n )\n\n botname: Optional[str] = Field(\n title=\"Bot name\",\n default=None,\n description=(\n \"Identify the name of the bot that should issue the message. If one isn't\"\n \" specified then the default is to just use your account (associated with\"\n \" the incoming-webhook).\"\n ),\n )\n\n tts: bool = Field(\n default=False,\n description=\"Whether to enable Text-To-Speech.\",\n )\n\n include_image: bool = Field(\n default=False,\n description=(\n \"Whether to include an image in-line with the message describing the\"\n \" notification type.\"\n ),\n )\n\n avatar: bool = Field(\n default=False,\n description=\"Whether to override the default discord avatar icon.\",\n )\n\n avatar_url: Optional[str] = Field(\n title=\"Avatar URL\",\n default=False,\n description=(\n \"Over-ride the default discord avatar icon URL. By default this is not set\"\n \" and Apprise chooses the URL dynamically based on the type of message\"\n \" (info, success, warning, or error).\"\n ),\n )\n\n def block_initialization(self) -> None:\n from apprise.plugins.NotifyDiscord import NotifyDiscord\n\n url = SecretStr(\n NotifyDiscord(\n webhook_id=self.webhook_id.get_secret_value(),\n webhook_token=self.webhook_token.get_secret_value(),\n botname=self.botname,\n tts=self.tts,\n include_image=self.include_image,\n avatar=self.avatar,\n avatar_url=self.avatar_url,\n ).url()\n )\n self._start_apprise_client(url)\n\n\nclass CustomWebhookNotificationBlock(NotificationBlock):\n \"\"\"\n Enables sending notifications via any custom webhook.\n\n All nested string param contains `{{key}}` will be substituted with value from context/secrets.\n\n Context values include: `subject`, `body` and `name`.\n\n Examples:\n Load a saved custom webhook and send a message:\n ```python\n from prefect.blocks.notifications import CustomWebhookNotificationBlock\n\n custom_webhook_block = CustomWebhookNotificationBlock.load(\"BLOCK_NAME\")\n\n custom_webhook_block.notify(\"Hello from Prefect!\")\n ```\n \"\"\"\n\n _block_type_name = \"Custom Webhook\"\n _logo_url = \"https://images.ctfassets.net/gm98wzqotmnx/6ciCsTFsvUAiiIvTllMfOU/627e9513376ca457785118fbba6a858d/webhook_icon_138018.png?h=250\"\n _documentation_url = \"https://docs.prefect.io/api-ref/prefect/blocks/notifications/#prefect.blocks.notifications.CustomWebhookNotificationBlock\"\n\n name: str = Field(title=\"Name\", description=\"Name of the webhook.\")\n\n url: str = Field(\n title=\"Webhook URL\",\n description=\"The webhook URL.\",\n example=\"https://hooks.slack.com/XXX\",\n )\n\n method: Literal[\"GET\", \"POST\", \"PUT\", \"PATCH\", \"DELETE\"] = Field(\n default=\"POST\", description=\"The webhook request method. Defaults to `POST`.\"\n )\n\n params: Optional[Dict[str, str]] = Field(\n default=None, title=\"Query Params\", description=\"Custom query params.\"\n )\n json_data: Optional[dict] = Field(\n default=None,\n title=\"JSON Data\",\n description=\"Send json data as payload.\",\n example=(\n '{\"text\": \"{{subject}}\\\\n{{body}}\", \"title\": \"{{name}}\", \"token\":'\n ' \"{{tokenFromSecrets}}\"}'\n ),\n )\n form_data: Optional[Dict[str, str]] = Field(\n default=None,\n title=\"Form Data\",\n description=(\n \"Send form data as payload. Should not be used together with _JSON Data_.\"\n ),\n example=(\n '{\"text\": \"{{subject}}\\\\n{{body}}\", \"title\": \"{{name}}\", \"token\":'\n ' \"{{tokenFromSecrets}}\"}'\n ),\n )\n\n headers: Optional[Dict[str, str]] = Field(None, description=\"Custom headers.\")\n cookies: Optional[Dict[str, str]] = Field(None, description=\"Custom cookies.\")\n\n timeout: float = Field(\n default=10, description=\"Request timeout in seconds. Defaults to 10.\"\n )\n\n secrets: SecretDict = Field(\n default_factory=lambda: SecretDict(dict()),\n title=\"Custom Secret Values\",\n description=\"A dictionary of secret values to be substituted in other configs.\",\n example='{\"tokenFromSecrets\":\"SomeSecretToken\"}',\n )\n\n def _build_request_args(self, body: str, subject: Optional[str]):\n \"\"\"Build kwargs for httpx.AsyncClient.request\"\"\"\n # prepare values\n values = self.secrets.get_secret_value()\n # use 'null' when subject is None\n values.update(\n {\n \"subject\": \"null\" if subject is None else subject,\n \"body\": body,\n \"name\": self.name,\n }\n )\n # do substution\n return apply_values(\n {\n \"method\": self.method,\n \"url\": self.url,\n \"params\": self.params,\n \"data\": self.form_data,\n \"json\": self.json_data,\n \"headers\": self.headers,\n \"cookies\": self.cookies,\n \"timeout\": self.timeout,\n },\n values,\n )\n\n def block_initialization(self) -> None:\n # check form_data and json_data\n if self.form_data is not None and self.json_data is not None:\n raise ValueError(\"both `Form Data` and `JSON Data` provided\")\n allowed_keys = {\"subject\", \"body\", \"name\"}.union(\n self.secrets.get_secret_value().keys()\n )\n # test template to raise a error early\n for name in [\"url\", \"params\", \"form_data\", \"json_data\", \"headers\", \"cookies\"]:\n template = getattr(self, name)\n if template is None:\n continue\n # check for placeholders not in predefined keys and secrets\n placeholders = find_placeholders(template)\n for placeholder in placeholders:\n if placeholder.name not in allowed_keys:\n raise KeyError(f\"{name}/{placeholder}\")\n\n @sync_compatible\n @instrument_instance_method_call()\n async def notify(self, body: str, subject: Optional[str] = None):\n import httpx\n\n # make request with httpx\n client = httpx.AsyncClient(headers={\"user-agent\": \"Prefect Notifications\"})\n resp = await client.request(**self._build_request_args(body, subject))\n resp.raise_for_status()\n\n\nclass SendgridEmail(AbstractAppriseNotificationBlock):\n \"\"\"\n Enables sending notifications via any sendgrid account.\n See [Apprise Notify_sendgrid docs](https://github.com/caronc/apprise/wiki/Notify_Sendgrid)\n\n Examples:\n Load a saved Sendgrid and send a email message:\n ```python\n from prefect.blocks.notifications import SendgridEmail\n\n sendgrid_block = SendgridEmail.load(\"BLOCK_NAME\")\n\n sendgrid_block.notify(\"Hello from Prefect!\")\n \"\"\"\n\n _description = \"Enables sending notifications via Sendgrid email service.\"\n _block_type_name = \"Sendgrid Email\"\n _block_type_slug = \"sendgrid-email\"\n _logo_url = \"https://images.ctfassets.net/gm98wzqotmnx/3PcxFuO9XUqs7wU9MiUBMg/af6affa646899cc1712d14b7fc4c0f1f/email__1_.png?h=250\"\n _documentation_url = \"https://docs.prefect.io/api-ref/prefect/blocks/notifications/#prefect.blocks.notifications.SendgridEmail\"\n\n api_key: SecretStr = Field(\n default=...,\n title=\"API Key\",\n description=\"The API Key associated with your sendgrid account.\",\n )\n\n sender_email: str = Field(\n title=\"Sender email id\",\n description=\"The sender email id.\",\n example=\"[email protected]\",\n )\n\n to_emails: List[str] = Field(\n default=...,\n title=\"Recipient emails\",\n description=\"Email ids of all recipients.\",\n example=\"[email protected]\",\n )\n\n def block_initialization(self) -> None:\n from apprise.plugins.NotifySendGrid import NotifySendGrid\n\n url = SecretStr(\n NotifySendGrid(\n apikey=self.api_key.get_secret_value(),\n from_email=self.sender_email,\n targets=self.to_emails,\n ).url()\n )\n\n self._start_apprise_client(url)\n", "path": "src/prefect/blocks/notifications.py" } ]
[ { "content": "from abc import ABC\nfrom typing import Dict, List, Optional\n\nfrom pydantic import AnyHttpUrl, Field, SecretStr\nfrom typing_extensions import Literal\n\nfrom prefect.blocks.abstract import NotificationBlock\nfrom prefect.blocks.fields import SecretDict\nfrom prefect.events.instrument import instrument_instance_method_call\nfrom prefect.utilities.asyncutils import sync_compatible\nfrom prefect.utilities.templating import apply_values, find_placeholders\n\nPREFECT_NOTIFY_TYPE_DEFAULT = \"prefect_default\"\n\n\nclass AbstractAppriseNotificationBlock(NotificationBlock, ABC):\n \"\"\"\n An abstract class for sending notifications using Apprise.\n \"\"\"\n\n notify_type: Literal[\"prefect_default\", \"info\", \"success\", \"warning\", \"failure\"] = (\n Field(\n default=PREFECT_NOTIFY_TYPE_DEFAULT,\n description=(\n \"The type of notification being performed; the prefect_default \"\n \"is a plain notification that does not attach an image.\"\n ),\n )\n )\n\n def __init__(self, *args, **kwargs):\n import apprise\n\n if PREFECT_NOTIFY_TYPE_DEFAULT not in apprise.NOTIFY_TYPES:\n apprise.NOTIFY_TYPES += (PREFECT_NOTIFY_TYPE_DEFAULT,)\n\n super().__init__(*args, **kwargs)\n\n def _start_apprise_client(self, url: SecretStr):\n from apprise import Apprise, AppriseAsset\n\n # A custom `AppriseAsset` that ensures Prefect Notifications\n # appear correctly across multiple messaging platforms\n prefect_app_data = AppriseAsset(\n app_id=\"Prefect Notifications\",\n app_desc=\"Prefect Notifications\",\n app_url=\"https://prefect.io\",\n )\n\n self._apprise_client = Apprise(asset=prefect_app_data)\n self._apprise_client.add(url.get_secret_value())\n\n def block_initialization(self) -> None:\n self._start_apprise_client(self.url)\n\n @sync_compatible\n @instrument_instance_method_call()\n async def notify(self, body: str, subject: Optional[str] = None):\n await self._apprise_client.async_notify(\n body=body, title=subject, notify_type=self.notify_type\n )\n\n\nclass AppriseNotificationBlock(AbstractAppriseNotificationBlock, ABC):\n \"\"\"\n A base class for sending notifications using Apprise, through webhook URLs.\n \"\"\"\n\n _documentation_url = \"https://docs.prefect.io/ui/notifications/\"\n url: SecretStr = Field(\n default=...,\n title=\"Webhook URL\",\n description=\"Incoming webhook URL used to send notifications.\",\n example=\"https://hooks.example.com/XXX\",\n )\n\n\n# TODO: Move to prefect-slack once collection block auto-registration is\n# available\nclass SlackWebhook(AppriseNotificationBlock):\n \"\"\"\n Enables sending notifications via a provided Slack webhook.\n\n Examples:\n Load a saved Slack webhook and send a message:\n ```python\n from prefect.blocks.notifications import SlackWebhook\n\n slack_webhook_block = SlackWebhook.load(\"BLOCK_NAME\")\n slack_webhook_block.notify(\"Hello from Prefect!\")\n ```\n \"\"\"\n\n _block_type_name = \"Slack Webhook\"\n _logo_url = \"https://images.ctfassets.net/gm98wzqotmnx/7dkzINU9r6j44giEFuHuUC/85d4cd321ad60c1b1e898bc3fbd28580/5cb480cd5f1b6d3fbadece79.png?h=250\"\n _documentation_url = \"https://docs.prefect.io/api-ref/prefect/blocks/notifications/#prefect.blocks.notifications.SlackWebhook\"\n\n url: SecretStr = Field(\n default=...,\n title=\"Webhook URL\",\n description=\"Slack incoming webhook URL used to send notifications.\",\n example=\"https://hooks.slack.com/XXX\",\n )\n\n\nclass MicrosoftTeamsWebhook(AppriseNotificationBlock):\n \"\"\"\n Enables sending notifications via a provided Microsoft Teams webhook.\n\n Examples:\n Load a saved Teams webhook and send a message:\n ```python\n from prefect.blocks.notifications import MicrosoftTeamsWebhook\n teams_webhook_block = MicrosoftTeamsWebhook.load(\"BLOCK_NAME\")\n teams_webhook_block.notify(\"Hello from Prefect!\")\n ```\n \"\"\"\n\n _block_type_name = \"Microsoft Teams Webhook\"\n _block_type_slug = \"ms-teams-webhook\"\n _logo_url = \"https://images.ctfassets.net/gm98wzqotmnx/6n0dSTBzwoVPhX8Vgg37i7/9040e07a62def4f48242be3eae6d3719/teams_logo.png?h=250\"\n _documentation_url = \"https://docs.prefect.io/api-ref/prefect/blocks/notifications/#prefect.blocks.notifications.MicrosoftTeamsWebhook\"\n\n url: SecretStr = Field(\n ...,\n title=\"Webhook URL\",\n description=\"The Teams incoming webhook URL used to send notifications.\",\n example=(\n \"https://your-org.webhook.office.com/webhookb2/XXX/IncomingWebhook/YYY/ZZZ\"\n ),\n )\n\n\nclass PagerDutyWebHook(AbstractAppriseNotificationBlock):\n \"\"\"\n Enables sending notifications via a provided PagerDuty webhook.\n See [Apprise notify_pagerduty docs](https://github.com/caronc/apprise/wiki/Notify_pagerduty)\n for more info on formatting the URL.\n\n Examples:\n Load a saved PagerDuty webhook and send a message:\n ```python\n from prefect.blocks.notifications import PagerDutyWebHook\n pagerduty_webhook_block = PagerDutyWebHook.load(\"BLOCK_NAME\")\n pagerduty_webhook_block.notify(\"Hello from Prefect!\")\n ```\n \"\"\"\n\n _description = \"Enables sending notifications via a provided PagerDuty webhook.\"\n\n _block_type_name = \"Pager Duty Webhook\"\n _block_type_slug = \"pager-duty-webhook\"\n _logo_url = \"https://images.ctfassets.net/gm98wzqotmnx/6FHJ4Lcozjfl1yDPxCvQDT/c2f6bdf47327271c068284897527f3da/PagerDuty-Logo.wine.png?h=250\"\n _documentation_url = \"https://docs.prefect.io/api-ref/prefect/blocks/notifications/#prefect.blocks.notifications.PagerDutyWebHook\"\n\n # The default cannot be prefect_default because NotifyPagerDuty's\n # PAGERDUTY_SEVERITY_MAP only has these notify types defined as keys\n notify_type: Literal[\"info\", \"success\", \"warning\", \"failure\"] = Field(\n default=\"info\", description=\"The severity of the notification.\"\n )\n\n integration_key: SecretStr = Field(\n default=...,\n description=(\n \"This can be found on the Events API V2 \"\n \"integration's detail page, and is also referred to as a Routing Key. \"\n \"This must be provided alongside `api_key`, but will error if provided \"\n \"alongside `url`.\"\n ),\n )\n\n api_key: SecretStr = Field(\n default=...,\n title=\"API Key\",\n description=(\n \"This can be found under Integrations. \"\n \"This must be provided alongside `integration_key`, but will error if \"\n \"provided alongside `url`.\"\n ),\n )\n\n source: Optional[str] = Field(\n default=\"Prefect\", description=\"The source string as part of the payload.\"\n )\n\n component: str = Field(\n default=\"Notification\",\n description=\"The component string as part of the payload.\",\n )\n\n group: Optional[str] = Field(\n default=None, description=\"The group string as part of the payload.\"\n )\n\n class_id: Optional[str] = Field(\n default=None,\n title=\"Class ID\",\n description=\"The class string as part of the payload.\",\n )\n\n region_name: Literal[\"us\", \"eu\"] = Field(\n default=\"us\", description=\"The region name.\"\n )\n\n clickable_url: Optional[AnyHttpUrl] = Field(\n default=None,\n title=\"Clickable URL\",\n description=\"A clickable URL to associate with the notice.\",\n )\n\n include_image: bool = Field(\n default=True,\n description=\"Associate the notification status via a represented icon.\",\n )\n\n custom_details: Optional[Dict[str, str]] = Field(\n default=None,\n description=\"Additional details to include as part of the payload.\",\n example='{\"disk_space_left\": \"145GB\"}',\n )\n\n def block_initialization(self) -> None:\n from apprise.plugins.NotifyPagerDuty import NotifyPagerDuty\n\n url = SecretStr(\n NotifyPagerDuty(\n apikey=self.api_key.get_secret_value(),\n integrationkey=self.integration_key.get_secret_value(),\n source=self.source,\n component=self.component,\n group=self.group,\n class_id=self.class_id,\n region_name=self.region_name,\n click=self.clickable_url,\n include_image=self.include_image,\n details=self.custom_details,\n ).url()\n )\n self._start_apprise_client(url)\n\n\nclass TwilioSMS(AbstractAppriseNotificationBlock):\n \"\"\"Enables sending notifications via Twilio SMS.\n Find more on sending Twilio SMS messages in the [docs](https://www.twilio.com/docs/sms).\n\n Examples:\n Load a saved `TwilioSMS` block and send a message:\n ```python\n from prefect.blocks.notifications import TwilioSMS\n twilio_webhook_block = TwilioSMS.load(\"BLOCK_NAME\")\n twilio_webhook_block.notify(\"Hello from Prefect!\")\n ```\n \"\"\"\n\n _description = \"Enables sending notifications via Twilio SMS.\"\n _block_type_name = \"Twilio SMS\"\n _block_type_slug = \"twilio-sms\"\n _logo_url = \"https://images.ctfassets.net/zscdif0zqppk/YTCgPL6bnK3BczP2gV9md/609283105a7006c57dbfe44ee1a8f313/58482bb9cef1014c0b5e4a31.png?h=250\" # noqa\n _documentation_url = \"https://docs.prefect.io/api-ref/prefect/blocks/notifications/#prefect.blocks.notifications.TwilioSMS\"\n\n account_sid: str = Field(\n default=...,\n description=(\n \"The Twilio Account SID - it can be found on the homepage \"\n \"of the Twilio console.\"\n ),\n )\n\n auth_token: SecretStr = Field(\n default=...,\n description=(\n \"The Twilio Authentication Token - \"\n \"it can be found on the homepage of the Twilio console.\"\n ),\n )\n\n from_phone_number: str = Field(\n default=...,\n description=\"The valid Twilio phone number to send the message from.\",\n example=\"18001234567\",\n )\n\n to_phone_numbers: List[str] = Field(\n default=...,\n description=\"A list of valid Twilio phone number(s) to send the message to.\",\n # not wrapped in brackets because of the way UI displays examples; in code should be [\"18004242424\"]\n example=\"18004242424\",\n )\n\n def block_initialization(self) -> None:\n from apprise.plugins.NotifyTwilio import NotifyTwilio\n\n url = SecretStr(\n NotifyTwilio(\n account_sid=self.account_sid,\n auth_token=self.auth_token.get_secret_value(),\n source=self.from_phone_number,\n targets=self.to_phone_numbers,\n ).url()\n )\n self._start_apprise_client(url)\n\n\nclass OpsgenieWebhook(AbstractAppriseNotificationBlock):\n \"\"\"\n Enables sending notifications via a provided Opsgenie webhook.\n See [Apprise notify_opsgenie docs](https://github.com/caronc/apprise/wiki/Notify_opsgenie)\n for more info on formatting the URL.\n\n Examples:\n Load a saved Opsgenie webhook and send a message:\n ```python\n from prefect.blocks.notifications import OpsgenieWebhook\n opsgenie_webhook_block = OpsgenieWebhook.load(\"BLOCK_NAME\")\n opsgenie_webhook_block.notify(\"Hello from Prefect!\")\n ```\n \"\"\"\n\n _description = \"Enables sending notifications via a provided Opsgenie webhook.\"\n\n _block_type_name = \"Opsgenie Webhook\"\n _block_type_slug = \"opsgenie-webhook\"\n _logo_url = \"https://images.ctfassets.net/sahxz1jinscj/3habq8fTzmplh7Ctkppk4/590cecb73f766361fcea9223cd47bad8/opsgenie.png\"\n _documentation_url = \"https://docs.prefect.io/api-ref/prefect/blocks/notifications/#prefect.blocks.notifications.OpsgenieWebhook\"\n\n apikey: SecretStr = Field(\n default=...,\n title=\"API Key\",\n description=\"The API Key associated with your Opsgenie account.\",\n )\n\n target_user: Optional[List] = Field(\n default=None, description=\"The user(s) you wish to notify.\"\n )\n\n target_team: Optional[List] = Field(\n default=None, description=\"The team(s) you wish to notify.\"\n )\n\n target_schedule: Optional[List] = Field(\n default=None, description=\"The schedule(s) you wish to notify.\"\n )\n\n target_escalation: Optional[List] = Field(\n default=None, description=\"The escalation(s) you wish to notify.\"\n )\n\n region_name: Literal[\"us\", \"eu\"] = Field(\n default=\"us\", description=\"The 2-character region code.\"\n )\n\n batch: bool = Field(\n default=False,\n description=\"Notify all targets in batches (instead of individually).\",\n )\n\n tags: Optional[List] = Field(\n default=None,\n description=(\n \"A comma-separated list of tags you can associate with your Opsgenie\"\n \" message.\"\n ),\n example='[\"tag1\", \"tag2\"]',\n )\n\n priority: Optional[str] = Field(\n default=3,\n description=(\n \"The priority to associate with the message. It is on a scale between 1\"\n \" (LOW) and 5 (EMERGENCY).\"\n ),\n )\n\n alias: Optional[str] = Field(\n default=None, description=\"The alias to associate with the message.\"\n )\n\n entity: Optional[str] = Field(\n default=None, description=\"The entity to associate with the message.\"\n )\n\n details: Optional[Dict[str, str]] = Field(\n default=None,\n description=\"Additional details composed of key/values pairs.\",\n example='{\"key1\": \"value1\", \"key2\": \"value2\"}',\n )\n\n def block_initialization(self) -> None:\n from apprise.plugins.NotifyOpsgenie import NotifyOpsgenie\n\n targets = []\n if self.target_user:\n [targets.append(f\"@{x}\") for x in self.target_user]\n if self.target_team:\n [targets.append(f\"#{x}\") for x in self.target_team]\n if self.target_schedule:\n [targets.append(f\"*{x}\") for x in self.target_schedule]\n if self.target_escalation:\n [targets.append(f\"^{x}\") for x in self.target_escalation]\n url = SecretStr(\n NotifyOpsgenie(\n apikey=self.apikey.get_secret_value(),\n targets=targets,\n region_name=self.region_name,\n details=self.details,\n priority=self.priority,\n alias=self.alias,\n entity=self.entity,\n batch=self.batch,\n tags=self.tags,\n ).url()\n )\n self._start_apprise_client(url)\n\n\nclass MattermostWebhook(AbstractAppriseNotificationBlock):\n \"\"\"\n Enables sending notifications via a provided Mattermost webhook.\n See [Apprise notify_Mattermost docs](https://github.com/caronc/apprise/wiki/Notify_Mattermost) # noqa\n\n\n Examples:\n Load a saved Mattermost webhook and send a message:\n ```python\n from prefect.blocks.notifications import MattermostWebhook\n\n mattermost_webhook_block = MattermostWebhook.load(\"BLOCK_NAME\")\n\n mattermost_webhook_block.notify(\"Hello from Prefect!\")\n ```\n \"\"\"\n\n _description = \"Enables sending notifications via a provided Mattermost webhook.\"\n _block_type_name = \"Mattermost Webhook\"\n _block_type_slug = \"mattermost-webhook\"\n _logo_url = \"https://images.ctfassets.net/zscdif0zqppk/3mlbsJDAmK402ER1sf0zUF/a48ac43fa38f395dd5f56c6ed29f22bb/mattermost-logo-png-transparent.png?h=250\"\n _documentation_url = \"https://docs.prefect.io/api-ref/prefect/blocks/notifications/#prefect.blocks.notifications.MattermostWebhook\"\n\n hostname: str = Field(\n default=...,\n description=\"The hostname of your Mattermost server.\",\n example=\"Mattermost.example.com\",\n )\n\n token: SecretStr = Field(\n default=...,\n description=\"The token associated with your Mattermost webhook.\",\n )\n\n botname: Optional[str] = Field(\n title=\"Bot name\",\n default=None,\n description=\"The name of the bot that will send the message.\",\n )\n\n channels: Optional[List[str]] = Field(\n default=None,\n description=\"The channel(s) you wish to notify.\",\n )\n\n include_image: bool = Field(\n default=False,\n description=\"Whether to include the Apprise status image in the message.\",\n )\n\n path: Optional[str] = Field(\n default=None,\n description=\"An optional sub-path specification to append to the hostname.\",\n )\n\n port: int = Field(\n default=8065,\n description=\"The port of your Mattermost server.\",\n )\n\n def block_initialization(self) -> None:\n from apprise.plugins.NotifyMattermost import NotifyMattermost\n\n url = SecretStr(\n NotifyMattermost(\n token=self.token.get_secret_value(),\n fullpath=self.path,\n host=self.hostname,\n botname=self.botname,\n channels=self.channels,\n include_image=self.include_image,\n port=self.port,\n ).url()\n )\n self._start_apprise_client(url)\n\n\nclass DiscordWebhook(AbstractAppriseNotificationBlock):\n \"\"\"\n Enables sending notifications via a provided Discord webhook.\n See [Apprise notify_Discord docs](https://github.com/caronc/apprise/wiki/Notify_Discord) # noqa\n\n Examples:\n Load a saved Discord webhook and send a message:\n ```python\n from prefect.blocks.notifications import DiscordWebhook\n\n discord_webhook_block = DiscordWebhook.load(\"BLOCK_NAME\")\n\n discord_webhook_block.notify(\"Hello from Prefect!\")\n ```\n \"\"\"\n\n _description = \"Enables sending notifications via a provided Discord webhook.\"\n _block_type_name = \"Discord Webhook\"\n _block_type_slug = \"discord-webhook\"\n _logo_url = \"https://images.ctfassets.net/keir3zrx8eg0/64fsff0qm7st33BqViEpqY/e177db0d1ada88a7ee6c9433576b98d5/icons8-discord-new-480.png?h=250\"\n _documentation_url = \"https://docs.prefect.io/api-ref/prefect/blocks/notifications/#prefect.blocks.notifications.DiscordWebhook\"\n\n webhook_id: SecretStr = Field(\n default=...,\n description=(\n \"The first part of 2 tokens provided to you after creating a\"\n \" incoming-webhook.\"\n ),\n )\n\n webhook_token: SecretStr = Field(\n default=...,\n description=(\n \"The second part of 2 tokens provided to you after creating a\"\n \" incoming-webhook.\"\n ),\n )\n\n botname: Optional[str] = Field(\n title=\"Bot name\",\n default=None,\n description=(\n \"Identify the name of the bot that should issue the message. If one isn't\"\n \" specified then the default is to just use your account (associated with\"\n \" the incoming-webhook).\"\n ),\n )\n\n tts: bool = Field(\n default=False,\n description=\"Whether to enable Text-To-Speech.\",\n )\n\n include_image: bool = Field(\n default=False,\n description=(\n \"Whether to include an image in-line with the message describing the\"\n \" notification type.\"\n ),\n )\n\n avatar: bool = Field(\n default=False,\n description=\"Whether to override the default discord avatar icon.\",\n )\n\n avatar_url: Optional[str] = Field(\n title=\"Avatar URL\",\n default=False,\n description=(\n \"Over-ride the default discord avatar icon URL. By default this is not set\"\n \" and Apprise chooses the URL dynamically based on the type of message\"\n \" (info, success, warning, or error).\"\n ),\n )\n\n def block_initialization(self) -> None:\n from apprise.plugins.NotifyDiscord import NotifyDiscord\n\n url = SecretStr(\n NotifyDiscord(\n webhook_id=self.webhook_id.get_secret_value(),\n webhook_token=self.webhook_token.get_secret_value(),\n botname=self.botname,\n tts=self.tts,\n include_image=self.include_image,\n avatar=self.avatar,\n avatar_url=self.avatar_url,\n ).url()\n )\n self._start_apprise_client(url)\n\n\nclass CustomWebhookNotificationBlock(NotificationBlock):\n \"\"\"\n Enables sending notifications via any custom webhook.\n\n All nested string param contains `{{key}}` will be substituted with value from context/secrets.\n\n Context values include: `subject`, `body` and `name`.\n\n Examples:\n Load a saved custom webhook and send a message:\n ```python\n from prefect.blocks.notifications import CustomWebhookNotificationBlock\n\n custom_webhook_block = CustomWebhookNotificationBlock.load(\"BLOCK_NAME\")\n\n custom_webhook_block.notify(\"Hello from Prefect!\")\n ```\n \"\"\"\n\n _block_type_name = \"Custom Webhook\"\n _logo_url = \"https://images.ctfassets.net/gm98wzqotmnx/6ciCsTFsvUAiiIvTllMfOU/627e9513376ca457785118fbba6a858d/webhook_icon_138018.png?h=250\"\n _documentation_url = \"https://docs.prefect.io/api-ref/prefect/blocks/notifications/#prefect.blocks.notifications.CustomWebhookNotificationBlock\"\n\n name: str = Field(title=\"Name\", description=\"Name of the webhook.\")\n\n url: str = Field(\n title=\"Webhook URL\",\n description=\"The webhook URL.\",\n example=\"https://hooks.slack.com/XXX\",\n )\n\n method: Literal[\"GET\", \"POST\", \"PUT\", \"PATCH\", \"DELETE\"] = Field(\n default=\"POST\", description=\"The webhook request method. Defaults to `POST`.\"\n )\n\n params: Optional[Dict[str, str]] = Field(\n default=None, title=\"Query Params\", description=\"Custom query params.\"\n )\n json_data: Optional[dict] = Field(\n default=None,\n title=\"JSON Data\",\n description=\"Send json data as payload.\",\n example=(\n '{\"text\": \"{{subject}}\\\\n{{body}}\", \"title\": \"{{name}}\", \"token\":'\n ' \"{{tokenFromSecrets}}\"}'\n ),\n )\n form_data: Optional[Dict[str, str]] = Field(\n default=None,\n title=\"Form Data\",\n description=(\n \"Send form data as payload. Should not be used together with _JSON Data_.\"\n ),\n example=(\n '{\"text\": \"{{subject}}\\\\n{{body}}\", \"title\": \"{{name}}\", \"token\":'\n ' \"{{tokenFromSecrets}}\"}'\n ),\n )\n\n headers: Optional[Dict[str, str]] = Field(None, description=\"Custom headers.\")\n cookies: Optional[Dict[str, str]] = Field(None, description=\"Custom cookies.\")\n\n timeout: float = Field(\n default=10, description=\"Request timeout in seconds. Defaults to 10.\"\n )\n\n secrets: SecretDict = Field(\n default_factory=lambda: SecretDict(dict()),\n title=\"Custom Secret Values\",\n description=\"A dictionary of secret values to be substituted in other configs.\",\n example='{\"tokenFromSecrets\":\"SomeSecretToken\"}',\n )\n\n def _build_request_args(self, body: str, subject: Optional[str]):\n \"\"\"Build kwargs for httpx.AsyncClient.request\"\"\"\n # prepare values\n values = self.secrets.get_secret_value()\n # use 'null' when subject is None\n values.update(\n {\n \"subject\": \"null\" if subject is None else subject,\n \"body\": body,\n \"name\": self.name,\n }\n )\n # do substution\n return apply_values(\n {\n \"method\": self.method,\n \"url\": self.url,\n \"params\": self.params,\n \"data\": self.form_data,\n \"json\": self.json_data,\n \"headers\": self.headers,\n \"cookies\": self.cookies,\n \"timeout\": self.timeout,\n },\n values,\n )\n\n def block_initialization(self) -> None:\n # check form_data and json_data\n if self.form_data is not None and self.json_data is not None:\n raise ValueError(\"both `Form Data` and `JSON Data` provided\")\n allowed_keys = {\"subject\", \"body\", \"name\"}.union(\n self.secrets.get_secret_value().keys()\n )\n # test template to raise a error early\n for name in [\"url\", \"params\", \"form_data\", \"json_data\", \"headers\", \"cookies\"]:\n template = getattr(self, name)\n if template is None:\n continue\n # check for placeholders not in predefined keys and secrets\n placeholders = find_placeholders(template)\n for placeholder in placeholders:\n if placeholder.name not in allowed_keys:\n raise KeyError(f\"{name}/{placeholder}\")\n\n @sync_compatible\n @instrument_instance_method_call()\n async def notify(self, body: str, subject: Optional[str] = None):\n import httpx\n\n # make request with httpx\n client = httpx.AsyncClient(headers={\"user-agent\": \"Prefect Notifications\"})\n resp = await client.request(**self._build_request_args(body, subject))\n resp.raise_for_status()\n\n\nclass SendgridEmail(AbstractAppriseNotificationBlock):\n \"\"\"\n Enables sending notifications via any sendgrid account.\n See [Apprise Notify_sendgrid docs](https://github.com/caronc/apprise/wiki/Notify_Sendgrid)\n\n Examples:\n Load a saved Sendgrid and send a email message:\n ```python\n from prefect.blocks.notifications import SendgridEmail\n\n sendgrid_block = SendgridEmail.load(\"BLOCK_NAME\")\n\n sendgrid_block.notify(\"Hello from Prefect!\")\n \"\"\"\n\n _description = \"Enables sending notifications via Sendgrid email service.\"\n _block_type_name = \"Sendgrid Email\"\n _block_type_slug = \"sendgrid-email\"\n _logo_url = \"https://images.ctfassets.net/gm98wzqotmnx/3PcxFuO9XUqs7wU9MiUBMg/af6affa646899cc1712d14b7fc4c0f1f/email__1_.png?h=250\"\n _documentation_url = \"https://docs.prefect.io/api-ref/prefect/blocks/notifications/#prefect.blocks.notifications.SendgridEmail\"\n\n api_key: SecretStr = Field(\n default=...,\n title=\"API Key\",\n description=\"The API Key associated with your sendgrid account.\",\n )\n\n sender_email: str = Field(\n title=\"Sender email id\",\n description=\"The sender email id.\",\n example=\"[email protected]\",\n )\n\n to_emails: List[str] = Field(\n default=...,\n title=\"Recipient emails\",\n description=\"Email ids of all recipients.\",\n example='\"[email protected]\"',\n )\n\n def block_initialization(self) -> None:\n from apprise.plugins.NotifySendGrid import NotifySendGrid\n\n url = SecretStr(\n NotifySendGrid(\n apikey=self.api_key.get_secret_value(),\n from_email=self.sender_email,\n targets=self.to_emails,\n ).url()\n )\n\n self._start_apprise_client(url)\n", "path": "src/prefect/blocks/notifications.py" } ]
diff --git a/src/prefect/blocks/notifications.py b/src/prefect/blocks/notifications.py index 52cfc6e3ad03..86d7c1d6ff53 100644 --- a/src/prefect/blocks/notifications.py +++ b/src/prefect/blocks/notifications.py @@ -749,7 +749,7 @@ class SendgridEmail(AbstractAppriseNotificationBlock): default=..., title="Recipient emails", description="Email ids of all recipients.", - example="[email protected]", + example='"[email protected]"', ) def block_initialization(self) -> None:
tobymao__sqlglot-2598
sqlglot corrupts date_format spec for MySQL **Before you file an issue** > - Make sure you specify the "read" dialect eg. parse_one(sql, read="spark") Yes, `read='mysql'` > - Check if the issue still exists on main Yes **Fully reproducible code snippet** > Please include a fully reproducible code snippet or the input sql, dialect, and expected output. ``` In [19]: import sqlglot In [20]: sqlglot.parse_one("date_format(now(), '%Y-%m-%d %H:%i:00.0000')", read='mysql').sql(dialect='mysql') Out[20]: "DATE_FORMAT(NOW(), '%Y-%m-%d %H:%M:00.0000')" ``` sqlglot uses `%M` specifier for minute, but in MySQL `%i` should be used. **Official Documentation** > Please include links to official SQL documentation related to your issue. https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format
[ { "content": "import typing as t\n\n# The generic time format is based on python time.strftime.\n# https://docs.python.org/3/library/time.html#time.strftime\nfrom sqlglot.trie import TrieResult, in_trie, new_trie\n\n\ndef format_time(\n string: str, mapping: t.Dict[str, str], trie: t.Optional[t.Dict] = None\n) -> t.Optional[str]:\n \"\"\"\n Converts a time string given a mapping.\n\n Examples:\n >>> format_time(\"%Y\", {\"%Y\": \"YYYY\"})\n 'YYYY'\n\n Args:\n mapping: dictionary of time format to target time format.\n trie: optional trie, can be passed in for performance.\n\n Returns:\n The converted time string.\n \"\"\"\n if not string:\n return None\n\n start = 0\n end = 1\n size = len(string)\n trie = trie or new_trie(mapping)\n current = trie\n chunks = []\n sym = None\n\n while end <= size:\n chars = string[start:end]\n result, current = in_trie(current, chars[-1])\n\n if result == TrieResult.FAILED:\n if sym:\n end -= 1\n chars = sym\n sym = None\n start += len(chars)\n chunks.append(chars)\n current = trie\n elif result == TrieResult.EXISTS:\n sym = chars\n\n end += 1\n\n if result != TrieResult.FAILED and end > size:\n chunks.append(chars)\n\n return \"\".join(mapping.get(chars, chars) for chars in chunks)\n\n\nTIMEZONES = {\n tz.lower()\n for tz in (\n \"Africa/Abidjan\",\n \"Africa/Accra\",\n \"Africa/Addis_Ababa\",\n \"Africa/Algiers\",\n \"Africa/Asmara\",\n \"Africa/Asmera\",\n \"Africa/Bamako\",\n \"Africa/Bangui\",\n \"Africa/Banjul\",\n \"Africa/Bissau\",\n \"Africa/Blantyre\",\n \"Africa/Brazzaville\",\n \"Africa/Bujumbura\",\n \"Africa/Cairo\",\n \"Africa/Casablanca\",\n \"Africa/Ceuta\",\n \"Africa/Conakry\",\n \"Africa/Dakar\",\n \"Africa/Dar_es_Salaam\",\n \"Africa/Djibouti\",\n \"Africa/Douala\",\n \"Africa/El_Aaiun\",\n \"Africa/Freetown\",\n \"Africa/Gaborone\",\n \"Africa/Harare\",\n \"Africa/Johannesburg\",\n \"Africa/Juba\",\n \"Africa/Kampala\",\n \"Africa/Khartoum\",\n \"Africa/Kigali\",\n \"Africa/Kinshasa\",\n \"Africa/Lagos\",\n \"Africa/Libreville\",\n \"Africa/Lome\",\n \"Africa/Luanda\",\n \"Africa/Lubumbashi\",\n \"Africa/Lusaka\",\n \"Africa/Malabo\",\n \"Africa/Maputo\",\n \"Africa/Maseru\",\n \"Africa/Mbabane\",\n \"Africa/Mogadishu\",\n \"Africa/Monrovia\",\n \"Africa/Nairobi\",\n \"Africa/Ndjamena\",\n \"Africa/Niamey\",\n \"Africa/Nouakchott\",\n \"Africa/Ouagadougou\",\n \"Africa/Porto-Novo\",\n \"Africa/Sao_Tome\",\n \"Africa/Timbuktu\",\n \"Africa/Tripoli\",\n \"Africa/Tunis\",\n \"Africa/Windhoek\",\n \"America/Adak\",\n \"America/Anchorage\",\n \"America/Anguilla\",\n \"America/Antigua\",\n \"America/Araguaina\",\n \"America/Argentina/Buenos_Aires\",\n \"America/Argentina/Catamarca\",\n \"America/Argentina/ComodRivadavia\",\n \"America/Argentina/Cordoba\",\n \"America/Argentina/Jujuy\",\n \"America/Argentina/La_Rioja\",\n \"America/Argentina/Mendoza\",\n \"America/Argentina/Rio_Gallegos\",\n \"America/Argentina/Salta\",\n \"America/Argentina/San_Juan\",\n \"America/Argentina/San_Luis\",\n \"America/Argentina/Tucuman\",\n \"America/Argentina/Ushuaia\",\n \"America/Aruba\",\n \"America/Asuncion\",\n \"America/Atikokan\",\n \"America/Atka\",\n \"America/Bahia\",\n \"America/Bahia_Banderas\",\n \"America/Barbados\",\n \"America/Belem\",\n \"America/Belize\",\n \"America/Blanc-Sablon\",\n \"America/Boa_Vista\",\n \"America/Bogota\",\n \"America/Boise\",\n \"America/Buenos_Aires\",\n \"America/Cambridge_Bay\",\n \"America/Campo_Grande\",\n \"America/Cancun\",\n \"America/Caracas\",\n \"America/Catamarca\",\n \"America/Cayenne\",\n \"America/Cayman\",\n \"America/Chicago\",\n \"America/Chihuahua\",\n \"America/Ciudad_Juarez\",\n \"America/Coral_Harbour\",\n \"America/Cordoba\",\n \"America/Costa_Rica\",\n \"America/Creston\",\n \"America/Cuiaba\",\n \"America/Curacao\",\n \"America/Danmarkshavn\",\n \"America/Dawson\",\n \"America/Dawson_Creek\",\n \"America/Denver\",\n \"America/Detroit\",\n \"America/Dominica\",\n \"America/Edmonton\",\n \"America/Eirunepe\",\n \"America/El_Salvador\",\n \"America/Ensenada\",\n \"America/Fort_Nelson\",\n \"America/Fort_Wayne\",\n \"America/Fortaleza\",\n \"America/Glace_Bay\",\n \"America/Godthab\",\n \"America/Goose_Bay\",\n \"America/Grand_Turk\",\n \"America/Grenada\",\n \"America/Guadeloupe\",\n \"America/Guatemala\",\n \"America/Guayaquil\",\n \"America/Guyana\",\n \"America/Halifax\",\n \"America/Havana\",\n \"America/Hermosillo\",\n \"America/Indiana/Indianapolis\",\n \"America/Indiana/Knox\",\n \"America/Indiana/Marengo\",\n \"America/Indiana/Petersburg\",\n \"America/Indiana/Tell_City\",\n \"America/Indiana/Vevay\",\n \"America/Indiana/Vincennes\",\n \"America/Indiana/Winamac\",\n \"America/Indianapolis\",\n \"America/Inuvik\",\n \"America/Iqaluit\",\n \"America/Jamaica\",\n \"America/Jujuy\",\n \"America/Juneau\",\n \"America/Kentucky/Louisville\",\n \"America/Kentucky/Monticello\",\n \"America/Knox_IN\",\n \"America/Kralendijk\",\n \"America/La_Paz\",\n \"America/Lima\",\n \"America/Los_Angeles\",\n \"America/Louisville\",\n \"America/Lower_Princes\",\n \"America/Maceio\",\n \"America/Managua\",\n \"America/Manaus\",\n \"America/Marigot\",\n \"America/Martinique\",\n \"America/Matamoros\",\n \"America/Mazatlan\",\n \"America/Mendoza\",\n \"America/Menominee\",\n \"America/Merida\",\n \"America/Metlakatla\",\n \"America/Mexico_City\",\n \"America/Miquelon\",\n \"America/Moncton\",\n \"America/Monterrey\",\n \"America/Montevideo\",\n \"America/Montreal\",\n \"America/Montserrat\",\n \"America/Nassau\",\n \"America/New_York\",\n \"America/Nipigon\",\n \"America/Nome\",\n \"America/Noronha\",\n \"America/North_Dakota/Beulah\",\n \"America/North_Dakota/Center\",\n \"America/North_Dakota/New_Salem\",\n \"America/Nuuk\",\n \"America/Ojinaga\",\n \"America/Panama\",\n \"America/Pangnirtung\",\n \"America/Paramaribo\",\n \"America/Phoenix\",\n \"America/Port-au-Prince\",\n \"America/Port_of_Spain\",\n \"America/Porto_Acre\",\n \"America/Porto_Velho\",\n \"America/Puerto_Rico\",\n \"America/Punta_Arenas\",\n \"America/Rainy_River\",\n \"America/Rankin_Inlet\",\n \"America/Recife\",\n \"America/Regina\",\n \"America/Resolute\",\n \"America/Rio_Branco\",\n \"America/Rosario\",\n \"America/Santa_Isabel\",\n \"America/Santarem\",\n \"America/Santiago\",\n \"America/Santo_Domingo\",\n \"America/Sao_Paulo\",\n \"America/Scoresbysund\",\n \"America/Shiprock\",\n \"America/Sitka\",\n \"America/St_Barthelemy\",\n \"America/St_Johns\",\n \"America/St_Kitts\",\n \"America/St_Lucia\",\n \"America/St_Thomas\",\n \"America/St_Vincent\",\n \"America/Swift_Current\",\n \"America/Tegucigalpa\",\n \"America/Thule\",\n \"America/Thunder_Bay\",\n \"America/Tijuana\",\n \"America/Toronto\",\n \"America/Tortola\",\n \"America/Vancouver\",\n \"America/Virgin\",\n \"America/Whitehorse\",\n \"America/Winnipeg\",\n \"America/Yakutat\",\n \"America/Yellowknife\",\n \"Antarctica/Casey\",\n \"Antarctica/Davis\",\n \"Antarctica/DumontDUrville\",\n \"Antarctica/Macquarie\",\n \"Antarctica/Mawson\",\n \"Antarctica/McMurdo\",\n \"Antarctica/Palmer\",\n \"Antarctica/Rothera\",\n \"Antarctica/South_Pole\",\n \"Antarctica/Syowa\",\n \"Antarctica/Troll\",\n \"Antarctica/Vostok\",\n \"Arctic/Longyearbyen\",\n \"Asia/Aden\",\n \"Asia/Almaty\",\n \"Asia/Amman\",\n \"Asia/Anadyr\",\n \"Asia/Aqtau\",\n \"Asia/Aqtobe\",\n \"Asia/Ashgabat\",\n \"Asia/Ashkhabad\",\n \"Asia/Atyrau\",\n \"Asia/Baghdad\",\n \"Asia/Bahrain\",\n \"Asia/Baku\",\n \"Asia/Bangkok\",\n \"Asia/Barnaul\",\n \"Asia/Beirut\",\n \"Asia/Bishkek\",\n \"Asia/Brunei\",\n \"Asia/Calcutta\",\n \"Asia/Chita\",\n \"Asia/Choibalsan\",\n \"Asia/Chongqing\",\n \"Asia/Chungking\",\n \"Asia/Colombo\",\n \"Asia/Dacca\",\n \"Asia/Damascus\",\n \"Asia/Dhaka\",\n \"Asia/Dili\",\n \"Asia/Dubai\",\n \"Asia/Dushanbe\",\n \"Asia/Famagusta\",\n \"Asia/Gaza\",\n \"Asia/Harbin\",\n \"Asia/Hebron\",\n \"Asia/Ho_Chi_Minh\",\n \"Asia/Hong_Kong\",\n \"Asia/Hovd\",\n \"Asia/Irkutsk\",\n \"Asia/Istanbul\",\n \"Asia/Jakarta\",\n \"Asia/Jayapura\",\n \"Asia/Jerusalem\",\n \"Asia/Kabul\",\n \"Asia/Kamchatka\",\n \"Asia/Karachi\",\n \"Asia/Kashgar\",\n \"Asia/Kathmandu\",\n \"Asia/Katmandu\",\n \"Asia/Khandyga\",\n \"Asia/Kolkata\",\n \"Asia/Krasnoyarsk\",\n \"Asia/Kuala_Lumpur\",\n \"Asia/Kuching\",\n \"Asia/Kuwait\",\n \"Asia/Macao\",\n \"Asia/Macau\",\n \"Asia/Magadan\",\n \"Asia/Makassar\",\n \"Asia/Manila\",\n \"Asia/Muscat\",\n \"Asia/Nicosia\",\n \"Asia/Novokuznetsk\",\n \"Asia/Novosibirsk\",\n \"Asia/Omsk\",\n \"Asia/Oral\",\n \"Asia/Phnom_Penh\",\n \"Asia/Pontianak\",\n \"Asia/Pyongyang\",\n \"Asia/Qatar\",\n \"Asia/Qostanay\",\n \"Asia/Qyzylorda\",\n \"Asia/Rangoon\",\n \"Asia/Riyadh\",\n \"Asia/Saigon\",\n \"Asia/Sakhalin\",\n \"Asia/Samarkand\",\n \"Asia/Seoul\",\n \"Asia/Shanghai\",\n \"Asia/Singapore\",\n \"Asia/Srednekolymsk\",\n \"Asia/Taipei\",\n \"Asia/Tashkent\",\n \"Asia/Tbilisi\",\n \"Asia/Tehran\",\n \"Asia/Tel_Aviv\",\n \"Asia/Thimbu\",\n \"Asia/Thimphu\",\n \"Asia/Tokyo\",\n \"Asia/Tomsk\",\n \"Asia/Ujung_Pandang\",\n \"Asia/Ulaanbaatar\",\n \"Asia/Ulan_Bator\",\n \"Asia/Urumqi\",\n \"Asia/Ust-Nera\",\n \"Asia/Vientiane\",\n \"Asia/Vladivostok\",\n \"Asia/Yakutsk\",\n \"Asia/Yangon\",\n \"Asia/Yekaterinburg\",\n \"Asia/Yerevan\",\n \"Atlantic/Azores\",\n \"Atlantic/Bermuda\",\n \"Atlantic/Canary\",\n \"Atlantic/Cape_Verde\",\n \"Atlantic/Faeroe\",\n \"Atlantic/Faroe\",\n \"Atlantic/Jan_Mayen\",\n \"Atlantic/Madeira\",\n \"Atlantic/Reykjavik\",\n \"Atlantic/South_Georgia\",\n \"Atlantic/St_Helena\",\n \"Atlantic/Stanley\",\n \"Australia/ACT\",\n \"Australia/Adelaide\",\n \"Australia/Brisbane\",\n \"Australia/Broken_Hill\",\n \"Australia/Canberra\",\n \"Australia/Currie\",\n \"Australia/Darwin\",\n \"Australia/Eucla\",\n \"Australia/Hobart\",\n \"Australia/LHI\",\n \"Australia/Lindeman\",\n \"Australia/Lord_Howe\",\n \"Australia/Melbourne\",\n \"Australia/NSW\",\n \"Australia/North\",\n \"Australia/Perth\",\n \"Australia/Queensland\",\n \"Australia/South\",\n \"Australia/Sydney\",\n \"Australia/Tasmania\",\n \"Australia/Victoria\",\n \"Australia/West\",\n \"Australia/Yancowinna\",\n \"Brazil/Acre\",\n \"Brazil/DeNoronha\",\n \"Brazil/East\",\n \"Brazil/West\",\n \"CET\",\n \"CST6CDT\",\n \"Canada/Atlantic\",\n \"Canada/Central\",\n \"Canada/Eastern\",\n \"Canada/Mountain\",\n \"Canada/Newfoundland\",\n \"Canada/Pacific\",\n \"Canada/Saskatchewan\",\n \"Canada/Yukon\",\n \"Chile/Continental\",\n \"Chile/EasterIsland\",\n \"Cuba\",\n \"EET\",\n \"EST\",\n \"EST5EDT\",\n \"Egypt\",\n \"Eire\",\n \"Etc/GMT\",\n \"Etc/GMT+0\",\n \"Etc/GMT+1\",\n \"Etc/GMT+10\",\n \"Etc/GMT+11\",\n \"Etc/GMT+12\",\n \"Etc/GMT+2\",\n \"Etc/GMT+3\",\n \"Etc/GMT+4\",\n \"Etc/GMT+5\",\n \"Etc/GMT+6\",\n \"Etc/GMT+7\",\n \"Etc/GMT+8\",\n \"Etc/GMT+9\",\n \"Etc/GMT-0\",\n \"Etc/GMT-1\",\n \"Etc/GMT-10\",\n \"Etc/GMT-11\",\n \"Etc/GMT-12\",\n \"Etc/GMT-13\",\n \"Etc/GMT-14\",\n \"Etc/GMT-2\",\n \"Etc/GMT-3\",\n \"Etc/GMT-4\",\n \"Etc/GMT-5\",\n \"Etc/GMT-6\",\n \"Etc/GMT-7\",\n \"Etc/GMT-8\",\n \"Etc/GMT-9\",\n \"Etc/GMT0\",\n \"Etc/Greenwich\",\n \"Etc/UCT\",\n \"Etc/UTC\",\n \"Etc/Universal\",\n \"Etc/Zulu\",\n \"Europe/Amsterdam\",\n \"Europe/Andorra\",\n \"Europe/Astrakhan\",\n \"Europe/Athens\",\n \"Europe/Belfast\",\n \"Europe/Belgrade\",\n \"Europe/Berlin\",\n \"Europe/Bratislava\",\n \"Europe/Brussels\",\n \"Europe/Bucharest\",\n \"Europe/Budapest\",\n \"Europe/Busingen\",\n \"Europe/Chisinau\",\n \"Europe/Copenhagen\",\n \"Europe/Dublin\",\n \"Europe/Gibraltar\",\n \"Europe/Guernsey\",\n \"Europe/Helsinki\",\n \"Europe/Isle_of_Man\",\n \"Europe/Istanbul\",\n \"Europe/Jersey\",\n \"Europe/Kaliningrad\",\n \"Europe/Kiev\",\n \"Europe/Kirov\",\n \"Europe/Kyiv\",\n \"Europe/Lisbon\",\n \"Europe/Ljubljana\",\n \"Europe/London\",\n \"Europe/Luxembourg\",\n \"Europe/Madrid\",\n \"Europe/Malta\",\n \"Europe/Mariehamn\",\n \"Europe/Minsk\",\n \"Europe/Monaco\",\n \"Europe/Moscow\",\n \"Europe/Nicosia\",\n \"Europe/Oslo\",\n \"Europe/Paris\",\n \"Europe/Podgorica\",\n \"Europe/Prague\",\n \"Europe/Riga\",\n \"Europe/Rome\",\n \"Europe/Samara\",\n \"Europe/San_Marino\",\n \"Europe/Sarajevo\",\n \"Europe/Saratov\",\n \"Europe/Simferopol\",\n \"Europe/Skopje\",\n \"Europe/Sofia\",\n \"Europe/Stockholm\",\n \"Europe/Tallinn\",\n \"Europe/Tirane\",\n \"Europe/Tiraspol\",\n \"Europe/Ulyanovsk\",\n \"Europe/Uzhgorod\",\n \"Europe/Vaduz\",\n \"Europe/Vatican\",\n \"Europe/Vienna\",\n \"Europe/Vilnius\",\n \"Europe/Volgograd\",\n \"Europe/Warsaw\",\n \"Europe/Zagreb\",\n \"Europe/Zaporozhye\",\n \"Europe/Zurich\",\n \"GB\",\n \"GB-Eire\",\n \"GMT\",\n \"GMT+0\",\n \"GMT-0\",\n \"GMT0\",\n \"Greenwich\",\n \"HST\",\n \"Hongkong\",\n \"Iceland\",\n \"Indian/Antananarivo\",\n \"Indian/Chagos\",\n \"Indian/Christmas\",\n \"Indian/Cocos\",\n \"Indian/Comoro\",\n \"Indian/Kerguelen\",\n \"Indian/Mahe\",\n \"Indian/Maldives\",\n \"Indian/Mauritius\",\n \"Indian/Mayotte\",\n \"Indian/Reunion\",\n \"Iran\",\n \"Israel\",\n \"Jamaica\",\n \"Japan\",\n \"Kwajalein\",\n \"Libya\",\n \"MET\",\n \"MST\",\n \"MST7MDT\",\n \"Mexico/BajaNorte\",\n \"Mexico/BajaSur\",\n \"Mexico/General\",\n \"NZ\",\n \"NZ-CHAT\",\n \"Navajo\",\n \"PRC\",\n \"PST8PDT\",\n \"Pacific/Apia\",\n \"Pacific/Auckland\",\n \"Pacific/Bougainville\",\n \"Pacific/Chatham\",\n \"Pacific/Chuuk\",\n \"Pacific/Easter\",\n \"Pacific/Efate\",\n \"Pacific/Enderbury\",\n \"Pacific/Fakaofo\",\n \"Pacific/Fiji\",\n \"Pacific/Funafuti\",\n \"Pacific/Galapagos\",\n \"Pacific/Gambier\",\n \"Pacific/Guadalcanal\",\n \"Pacific/Guam\",\n \"Pacific/Honolulu\",\n \"Pacific/Johnston\",\n \"Pacific/Kanton\",\n \"Pacific/Kiritimati\",\n \"Pacific/Kosrae\",\n \"Pacific/Kwajalein\",\n \"Pacific/Majuro\",\n \"Pacific/Marquesas\",\n \"Pacific/Midway\",\n \"Pacific/Nauru\",\n \"Pacific/Niue\",\n \"Pacific/Norfolk\",\n \"Pacific/Noumea\",\n \"Pacific/Pago_Pago\",\n \"Pacific/Palau\",\n \"Pacific/Pitcairn\",\n \"Pacific/Pohnpei\",\n \"Pacific/Ponape\",\n \"Pacific/Port_Moresby\",\n \"Pacific/Rarotonga\",\n \"Pacific/Saipan\",\n \"Pacific/Samoa\",\n \"Pacific/Tahiti\",\n \"Pacific/Tarawa\",\n \"Pacific/Tongatapu\",\n \"Pacific/Truk\",\n \"Pacific/Wake\",\n \"Pacific/Wallis\",\n \"Pacific/Yap\",\n \"Poland\",\n \"Portugal\",\n \"ROC\",\n \"ROK\",\n \"Singapore\",\n \"Turkey\",\n \"UCT\",\n \"US/Alaska\",\n \"US/Aleutian\",\n \"US/Arizona\",\n \"US/Central\",\n \"US/East-Indiana\",\n \"US/Eastern\",\n \"US/Hawaii\",\n \"US/Indiana-Starke\",\n \"US/Michigan\",\n \"US/Mountain\",\n \"US/Pacific\",\n \"US/Samoa\",\n \"UTC\",\n \"Universal\",\n \"W-SU\",\n \"WET\",\n \"Zulu\",\n )\n}\n", "path": "sqlglot/time.py" } ]
[ { "content": "import typing as t\n\n# The generic time format is based on python time.strftime.\n# https://docs.python.org/3/library/time.html#time.strftime\nfrom sqlglot.trie import TrieResult, in_trie, new_trie\n\n\ndef format_time(\n string: str, mapping: t.Dict[str, str], trie: t.Optional[t.Dict] = None\n) -> t.Optional[str]:\n \"\"\"\n Converts a time string given a mapping.\n\n Examples:\n >>> format_time(\"%Y\", {\"%Y\": \"YYYY\"})\n 'YYYY'\n\n Args:\n mapping: dictionary of time format to target time format.\n trie: optional trie, can be passed in for performance.\n\n Returns:\n The converted time string.\n \"\"\"\n if not string:\n return None\n\n start = 0\n end = 1\n size = len(string)\n trie = trie or new_trie(mapping)\n current = trie\n chunks = []\n sym = None\n\n while end <= size:\n chars = string[start:end]\n result, current = in_trie(current, chars[-1])\n\n if result == TrieResult.FAILED:\n if sym:\n end -= 1\n chars = sym\n sym = None\n else:\n chars = chars[0]\n end = start + 1\n\n start += len(chars)\n chunks.append(chars)\n current = trie\n elif result == TrieResult.EXISTS:\n sym = chars\n\n end += 1\n\n if result != TrieResult.FAILED and end > size:\n chunks.append(chars)\n\n return \"\".join(mapping.get(chars, chars) for chars in chunks)\n\n\nTIMEZONES = {\n tz.lower()\n for tz in (\n \"Africa/Abidjan\",\n \"Africa/Accra\",\n \"Africa/Addis_Ababa\",\n \"Africa/Algiers\",\n \"Africa/Asmara\",\n \"Africa/Asmera\",\n \"Africa/Bamako\",\n \"Africa/Bangui\",\n \"Africa/Banjul\",\n \"Africa/Bissau\",\n \"Africa/Blantyre\",\n \"Africa/Brazzaville\",\n \"Africa/Bujumbura\",\n \"Africa/Cairo\",\n \"Africa/Casablanca\",\n \"Africa/Ceuta\",\n \"Africa/Conakry\",\n \"Africa/Dakar\",\n \"Africa/Dar_es_Salaam\",\n \"Africa/Djibouti\",\n \"Africa/Douala\",\n \"Africa/El_Aaiun\",\n \"Africa/Freetown\",\n \"Africa/Gaborone\",\n \"Africa/Harare\",\n \"Africa/Johannesburg\",\n \"Africa/Juba\",\n \"Africa/Kampala\",\n \"Africa/Khartoum\",\n \"Africa/Kigali\",\n \"Africa/Kinshasa\",\n \"Africa/Lagos\",\n \"Africa/Libreville\",\n \"Africa/Lome\",\n \"Africa/Luanda\",\n \"Africa/Lubumbashi\",\n \"Africa/Lusaka\",\n \"Africa/Malabo\",\n \"Africa/Maputo\",\n \"Africa/Maseru\",\n \"Africa/Mbabane\",\n \"Africa/Mogadishu\",\n \"Africa/Monrovia\",\n \"Africa/Nairobi\",\n \"Africa/Ndjamena\",\n \"Africa/Niamey\",\n \"Africa/Nouakchott\",\n \"Africa/Ouagadougou\",\n \"Africa/Porto-Novo\",\n \"Africa/Sao_Tome\",\n \"Africa/Timbuktu\",\n \"Africa/Tripoli\",\n \"Africa/Tunis\",\n \"Africa/Windhoek\",\n \"America/Adak\",\n \"America/Anchorage\",\n \"America/Anguilla\",\n \"America/Antigua\",\n \"America/Araguaina\",\n \"America/Argentina/Buenos_Aires\",\n \"America/Argentina/Catamarca\",\n \"America/Argentina/ComodRivadavia\",\n \"America/Argentina/Cordoba\",\n \"America/Argentina/Jujuy\",\n \"America/Argentina/La_Rioja\",\n \"America/Argentina/Mendoza\",\n \"America/Argentina/Rio_Gallegos\",\n \"America/Argentina/Salta\",\n \"America/Argentina/San_Juan\",\n \"America/Argentina/San_Luis\",\n \"America/Argentina/Tucuman\",\n \"America/Argentina/Ushuaia\",\n \"America/Aruba\",\n \"America/Asuncion\",\n \"America/Atikokan\",\n \"America/Atka\",\n \"America/Bahia\",\n \"America/Bahia_Banderas\",\n \"America/Barbados\",\n \"America/Belem\",\n \"America/Belize\",\n \"America/Blanc-Sablon\",\n \"America/Boa_Vista\",\n \"America/Bogota\",\n \"America/Boise\",\n \"America/Buenos_Aires\",\n \"America/Cambridge_Bay\",\n \"America/Campo_Grande\",\n \"America/Cancun\",\n \"America/Caracas\",\n \"America/Catamarca\",\n \"America/Cayenne\",\n \"America/Cayman\",\n \"America/Chicago\",\n \"America/Chihuahua\",\n \"America/Ciudad_Juarez\",\n \"America/Coral_Harbour\",\n \"America/Cordoba\",\n \"America/Costa_Rica\",\n \"America/Creston\",\n \"America/Cuiaba\",\n \"America/Curacao\",\n \"America/Danmarkshavn\",\n \"America/Dawson\",\n \"America/Dawson_Creek\",\n \"America/Denver\",\n \"America/Detroit\",\n \"America/Dominica\",\n \"America/Edmonton\",\n \"America/Eirunepe\",\n \"America/El_Salvador\",\n \"America/Ensenada\",\n \"America/Fort_Nelson\",\n \"America/Fort_Wayne\",\n \"America/Fortaleza\",\n \"America/Glace_Bay\",\n \"America/Godthab\",\n \"America/Goose_Bay\",\n \"America/Grand_Turk\",\n \"America/Grenada\",\n \"America/Guadeloupe\",\n \"America/Guatemala\",\n \"America/Guayaquil\",\n \"America/Guyana\",\n \"America/Halifax\",\n \"America/Havana\",\n \"America/Hermosillo\",\n \"America/Indiana/Indianapolis\",\n \"America/Indiana/Knox\",\n \"America/Indiana/Marengo\",\n \"America/Indiana/Petersburg\",\n \"America/Indiana/Tell_City\",\n \"America/Indiana/Vevay\",\n \"America/Indiana/Vincennes\",\n \"America/Indiana/Winamac\",\n \"America/Indianapolis\",\n \"America/Inuvik\",\n \"America/Iqaluit\",\n \"America/Jamaica\",\n \"America/Jujuy\",\n \"America/Juneau\",\n \"America/Kentucky/Louisville\",\n \"America/Kentucky/Monticello\",\n \"America/Knox_IN\",\n \"America/Kralendijk\",\n \"America/La_Paz\",\n \"America/Lima\",\n \"America/Los_Angeles\",\n \"America/Louisville\",\n \"America/Lower_Princes\",\n \"America/Maceio\",\n \"America/Managua\",\n \"America/Manaus\",\n \"America/Marigot\",\n \"America/Martinique\",\n \"America/Matamoros\",\n \"America/Mazatlan\",\n \"America/Mendoza\",\n \"America/Menominee\",\n \"America/Merida\",\n \"America/Metlakatla\",\n \"America/Mexico_City\",\n \"America/Miquelon\",\n \"America/Moncton\",\n \"America/Monterrey\",\n \"America/Montevideo\",\n \"America/Montreal\",\n \"America/Montserrat\",\n \"America/Nassau\",\n \"America/New_York\",\n \"America/Nipigon\",\n \"America/Nome\",\n \"America/Noronha\",\n \"America/North_Dakota/Beulah\",\n \"America/North_Dakota/Center\",\n \"America/North_Dakota/New_Salem\",\n \"America/Nuuk\",\n \"America/Ojinaga\",\n \"America/Panama\",\n \"America/Pangnirtung\",\n \"America/Paramaribo\",\n \"America/Phoenix\",\n \"America/Port-au-Prince\",\n \"America/Port_of_Spain\",\n \"America/Porto_Acre\",\n \"America/Porto_Velho\",\n \"America/Puerto_Rico\",\n \"America/Punta_Arenas\",\n \"America/Rainy_River\",\n \"America/Rankin_Inlet\",\n \"America/Recife\",\n \"America/Regina\",\n \"America/Resolute\",\n \"America/Rio_Branco\",\n \"America/Rosario\",\n \"America/Santa_Isabel\",\n \"America/Santarem\",\n \"America/Santiago\",\n \"America/Santo_Domingo\",\n \"America/Sao_Paulo\",\n \"America/Scoresbysund\",\n \"America/Shiprock\",\n \"America/Sitka\",\n \"America/St_Barthelemy\",\n \"America/St_Johns\",\n \"America/St_Kitts\",\n \"America/St_Lucia\",\n \"America/St_Thomas\",\n \"America/St_Vincent\",\n \"America/Swift_Current\",\n \"America/Tegucigalpa\",\n \"America/Thule\",\n \"America/Thunder_Bay\",\n \"America/Tijuana\",\n \"America/Toronto\",\n \"America/Tortola\",\n \"America/Vancouver\",\n \"America/Virgin\",\n \"America/Whitehorse\",\n \"America/Winnipeg\",\n \"America/Yakutat\",\n \"America/Yellowknife\",\n \"Antarctica/Casey\",\n \"Antarctica/Davis\",\n \"Antarctica/DumontDUrville\",\n \"Antarctica/Macquarie\",\n \"Antarctica/Mawson\",\n \"Antarctica/McMurdo\",\n \"Antarctica/Palmer\",\n \"Antarctica/Rothera\",\n \"Antarctica/South_Pole\",\n \"Antarctica/Syowa\",\n \"Antarctica/Troll\",\n \"Antarctica/Vostok\",\n \"Arctic/Longyearbyen\",\n \"Asia/Aden\",\n \"Asia/Almaty\",\n \"Asia/Amman\",\n \"Asia/Anadyr\",\n \"Asia/Aqtau\",\n \"Asia/Aqtobe\",\n \"Asia/Ashgabat\",\n \"Asia/Ashkhabad\",\n \"Asia/Atyrau\",\n \"Asia/Baghdad\",\n \"Asia/Bahrain\",\n \"Asia/Baku\",\n \"Asia/Bangkok\",\n \"Asia/Barnaul\",\n \"Asia/Beirut\",\n \"Asia/Bishkek\",\n \"Asia/Brunei\",\n \"Asia/Calcutta\",\n \"Asia/Chita\",\n \"Asia/Choibalsan\",\n \"Asia/Chongqing\",\n \"Asia/Chungking\",\n \"Asia/Colombo\",\n \"Asia/Dacca\",\n \"Asia/Damascus\",\n \"Asia/Dhaka\",\n \"Asia/Dili\",\n \"Asia/Dubai\",\n \"Asia/Dushanbe\",\n \"Asia/Famagusta\",\n \"Asia/Gaza\",\n \"Asia/Harbin\",\n \"Asia/Hebron\",\n \"Asia/Ho_Chi_Minh\",\n \"Asia/Hong_Kong\",\n \"Asia/Hovd\",\n \"Asia/Irkutsk\",\n \"Asia/Istanbul\",\n \"Asia/Jakarta\",\n \"Asia/Jayapura\",\n \"Asia/Jerusalem\",\n \"Asia/Kabul\",\n \"Asia/Kamchatka\",\n \"Asia/Karachi\",\n \"Asia/Kashgar\",\n \"Asia/Kathmandu\",\n \"Asia/Katmandu\",\n \"Asia/Khandyga\",\n \"Asia/Kolkata\",\n \"Asia/Krasnoyarsk\",\n \"Asia/Kuala_Lumpur\",\n \"Asia/Kuching\",\n \"Asia/Kuwait\",\n \"Asia/Macao\",\n \"Asia/Macau\",\n \"Asia/Magadan\",\n \"Asia/Makassar\",\n \"Asia/Manila\",\n \"Asia/Muscat\",\n \"Asia/Nicosia\",\n \"Asia/Novokuznetsk\",\n \"Asia/Novosibirsk\",\n \"Asia/Omsk\",\n \"Asia/Oral\",\n \"Asia/Phnom_Penh\",\n \"Asia/Pontianak\",\n \"Asia/Pyongyang\",\n \"Asia/Qatar\",\n \"Asia/Qostanay\",\n \"Asia/Qyzylorda\",\n \"Asia/Rangoon\",\n \"Asia/Riyadh\",\n \"Asia/Saigon\",\n \"Asia/Sakhalin\",\n \"Asia/Samarkand\",\n \"Asia/Seoul\",\n \"Asia/Shanghai\",\n \"Asia/Singapore\",\n \"Asia/Srednekolymsk\",\n \"Asia/Taipei\",\n \"Asia/Tashkent\",\n \"Asia/Tbilisi\",\n \"Asia/Tehran\",\n \"Asia/Tel_Aviv\",\n \"Asia/Thimbu\",\n \"Asia/Thimphu\",\n \"Asia/Tokyo\",\n \"Asia/Tomsk\",\n \"Asia/Ujung_Pandang\",\n \"Asia/Ulaanbaatar\",\n \"Asia/Ulan_Bator\",\n \"Asia/Urumqi\",\n \"Asia/Ust-Nera\",\n \"Asia/Vientiane\",\n \"Asia/Vladivostok\",\n \"Asia/Yakutsk\",\n \"Asia/Yangon\",\n \"Asia/Yekaterinburg\",\n \"Asia/Yerevan\",\n \"Atlantic/Azores\",\n \"Atlantic/Bermuda\",\n \"Atlantic/Canary\",\n \"Atlantic/Cape_Verde\",\n \"Atlantic/Faeroe\",\n \"Atlantic/Faroe\",\n \"Atlantic/Jan_Mayen\",\n \"Atlantic/Madeira\",\n \"Atlantic/Reykjavik\",\n \"Atlantic/South_Georgia\",\n \"Atlantic/St_Helena\",\n \"Atlantic/Stanley\",\n \"Australia/ACT\",\n \"Australia/Adelaide\",\n \"Australia/Brisbane\",\n \"Australia/Broken_Hill\",\n \"Australia/Canberra\",\n \"Australia/Currie\",\n \"Australia/Darwin\",\n \"Australia/Eucla\",\n \"Australia/Hobart\",\n \"Australia/LHI\",\n \"Australia/Lindeman\",\n \"Australia/Lord_Howe\",\n \"Australia/Melbourne\",\n \"Australia/NSW\",\n \"Australia/North\",\n \"Australia/Perth\",\n \"Australia/Queensland\",\n \"Australia/South\",\n \"Australia/Sydney\",\n \"Australia/Tasmania\",\n \"Australia/Victoria\",\n \"Australia/West\",\n \"Australia/Yancowinna\",\n \"Brazil/Acre\",\n \"Brazil/DeNoronha\",\n \"Brazil/East\",\n \"Brazil/West\",\n \"CET\",\n \"CST6CDT\",\n \"Canada/Atlantic\",\n \"Canada/Central\",\n \"Canada/Eastern\",\n \"Canada/Mountain\",\n \"Canada/Newfoundland\",\n \"Canada/Pacific\",\n \"Canada/Saskatchewan\",\n \"Canada/Yukon\",\n \"Chile/Continental\",\n \"Chile/EasterIsland\",\n \"Cuba\",\n \"EET\",\n \"EST\",\n \"EST5EDT\",\n \"Egypt\",\n \"Eire\",\n \"Etc/GMT\",\n \"Etc/GMT+0\",\n \"Etc/GMT+1\",\n \"Etc/GMT+10\",\n \"Etc/GMT+11\",\n \"Etc/GMT+12\",\n \"Etc/GMT+2\",\n \"Etc/GMT+3\",\n \"Etc/GMT+4\",\n \"Etc/GMT+5\",\n \"Etc/GMT+6\",\n \"Etc/GMT+7\",\n \"Etc/GMT+8\",\n \"Etc/GMT+9\",\n \"Etc/GMT-0\",\n \"Etc/GMT-1\",\n \"Etc/GMT-10\",\n \"Etc/GMT-11\",\n \"Etc/GMT-12\",\n \"Etc/GMT-13\",\n \"Etc/GMT-14\",\n \"Etc/GMT-2\",\n \"Etc/GMT-3\",\n \"Etc/GMT-4\",\n \"Etc/GMT-5\",\n \"Etc/GMT-6\",\n \"Etc/GMT-7\",\n \"Etc/GMT-8\",\n \"Etc/GMT-9\",\n \"Etc/GMT0\",\n \"Etc/Greenwich\",\n \"Etc/UCT\",\n \"Etc/UTC\",\n \"Etc/Universal\",\n \"Etc/Zulu\",\n \"Europe/Amsterdam\",\n \"Europe/Andorra\",\n \"Europe/Astrakhan\",\n \"Europe/Athens\",\n \"Europe/Belfast\",\n \"Europe/Belgrade\",\n \"Europe/Berlin\",\n \"Europe/Bratislava\",\n \"Europe/Brussels\",\n \"Europe/Bucharest\",\n \"Europe/Budapest\",\n \"Europe/Busingen\",\n \"Europe/Chisinau\",\n \"Europe/Copenhagen\",\n \"Europe/Dublin\",\n \"Europe/Gibraltar\",\n \"Europe/Guernsey\",\n \"Europe/Helsinki\",\n \"Europe/Isle_of_Man\",\n \"Europe/Istanbul\",\n \"Europe/Jersey\",\n \"Europe/Kaliningrad\",\n \"Europe/Kiev\",\n \"Europe/Kirov\",\n \"Europe/Kyiv\",\n \"Europe/Lisbon\",\n \"Europe/Ljubljana\",\n \"Europe/London\",\n \"Europe/Luxembourg\",\n \"Europe/Madrid\",\n \"Europe/Malta\",\n \"Europe/Mariehamn\",\n \"Europe/Minsk\",\n \"Europe/Monaco\",\n \"Europe/Moscow\",\n \"Europe/Nicosia\",\n \"Europe/Oslo\",\n \"Europe/Paris\",\n \"Europe/Podgorica\",\n \"Europe/Prague\",\n \"Europe/Riga\",\n \"Europe/Rome\",\n \"Europe/Samara\",\n \"Europe/San_Marino\",\n \"Europe/Sarajevo\",\n \"Europe/Saratov\",\n \"Europe/Simferopol\",\n \"Europe/Skopje\",\n \"Europe/Sofia\",\n \"Europe/Stockholm\",\n \"Europe/Tallinn\",\n \"Europe/Tirane\",\n \"Europe/Tiraspol\",\n \"Europe/Ulyanovsk\",\n \"Europe/Uzhgorod\",\n \"Europe/Vaduz\",\n \"Europe/Vatican\",\n \"Europe/Vienna\",\n \"Europe/Vilnius\",\n \"Europe/Volgograd\",\n \"Europe/Warsaw\",\n \"Europe/Zagreb\",\n \"Europe/Zaporozhye\",\n \"Europe/Zurich\",\n \"GB\",\n \"GB-Eire\",\n \"GMT\",\n \"GMT+0\",\n \"GMT-0\",\n \"GMT0\",\n \"Greenwich\",\n \"HST\",\n \"Hongkong\",\n \"Iceland\",\n \"Indian/Antananarivo\",\n \"Indian/Chagos\",\n \"Indian/Christmas\",\n \"Indian/Cocos\",\n \"Indian/Comoro\",\n \"Indian/Kerguelen\",\n \"Indian/Mahe\",\n \"Indian/Maldives\",\n \"Indian/Mauritius\",\n \"Indian/Mayotte\",\n \"Indian/Reunion\",\n \"Iran\",\n \"Israel\",\n \"Jamaica\",\n \"Japan\",\n \"Kwajalein\",\n \"Libya\",\n \"MET\",\n \"MST\",\n \"MST7MDT\",\n \"Mexico/BajaNorte\",\n \"Mexico/BajaSur\",\n \"Mexico/General\",\n \"NZ\",\n \"NZ-CHAT\",\n \"Navajo\",\n \"PRC\",\n \"PST8PDT\",\n \"Pacific/Apia\",\n \"Pacific/Auckland\",\n \"Pacific/Bougainville\",\n \"Pacific/Chatham\",\n \"Pacific/Chuuk\",\n \"Pacific/Easter\",\n \"Pacific/Efate\",\n \"Pacific/Enderbury\",\n \"Pacific/Fakaofo\",\n \"Pacific/Fiji\",\n \"Pacific/Funafuti\",\n \"Pacific/Galapagos\",\n \"Pacific/Gambier\",\n \"Pacific/Guadalcanal\",\n \"Pacific/Guam\",\n \"Pacific/Honolulu\",\n \"Pacific/Johnston\",\n \"Pacific/Kanton\",\n \"Pacific/Kiritimati\",\n \"Pacific/Kosrae\",\n \"Pacific/Kwajalein\",\n \"Pacific/Majuro\",\n \"Pacific/Marquesas\",\n \"Pacific/Midway\",\n \"Pacific/Nauru\",\n \"Pacific/Niue\",\n \"Pacific/Norfolk\",\n \"Pacific/Noumea\",\n \"Pacific/Pago_Pago\",\n \"Pacific/Palau\",\n \"Pacific/Pitcairn\",\n \"Pacific/Pohnpei\",\n \"Pacific/Ponape\",\n \"Pacific/Port_Moresby\",\n \"Pacific/Rarotonga\",\n \"Pacific/Saipan\",\n \"Pacific/Samoa\",\n \"Pacific/Tahiti\",\n \"Pacific/Tarawa\",\n \"Pacific/Tongatapu\",\n \"Pacific/Truk\",\n \"Pacific/Wake\",\n \"Pacific/Wallis\",\n \"Pacific/Yap\",\n \"Poland\",\n \"Portugal\",\n \"ROC\",\n \"ROK\",\n \"Singapore\",\n \"Turkey\",\n \"UCT\",\n \"US/Alaska\",\n \"US/Aleutian\",\n \"US/Arizona\",\n \"US/Central\",\n \"US/East-Indiana\",\n \"US/Eastern\",\n \"US/Hawaii\",\n \"US/Indiana-Starke\",\n \"US/Michigan\",\n \"US/Mountain\",\n \"US/Pacific\",\n \"US/Samoa\",\n \"UTC\",\n \"Universal\",\n \"W-SU\",\n \"WET\",\n \"Zulu\",\n )\n}\n", "path": "sqlglot/time.py" } ]
diff --git a/sqlglot/time.py b/sqlglot/time.py index c286ec1e8c..50ec2ec3f0 100644 --- a/sqlglot/time.py +++ b/sqlglot/time.py @@ -42,6 +42,10 @@ def format_time( end -= 1 chars = sym sym = None + else: + chars = chars[0] + end = start + 1 + start += len(chars) chunks.append(chars) current = trie diff --git a/tests/dialects/test_mysql.py b/tests/dialects/test_mysql.py index 45bb763b8a..ab246a3d6a 100644 --- a/tests/dialects/test_mysql.py +++ b/tests/dialects/test_mysql.py @@ -123,6 +123,7 @@ def test_ddl(self): self.validate_identity("ALTER TABLE test_table ALTER COLUMN test_column SET DEFAULT 1") def test_identity(self): + self.validate_identity("SELECT DATE_FORMAT(NOW(), '%Y-%m-%d %H:%i:00.0000')") self.validate_identity("SELECT @var1 := 1, @var2") self.validate_identity("UNLOCK TABLES") self.validate_identity("LOCK TABLES `app_fields` WRITE")
OpenEnergyPlatform__oeplatform-483
Aliases are not resolved in column matching Aliases in from-clauses will cause an error when parsing column elements. There must be a structure that keeps track of aliases while parsing to resolve them appropriately.
[ { "content": "###########\n# Parsers #\n###########\nimport decimal\nimport re\nfrom datetime import datetime, date\n\nimport geoalchemy2 # Although this import seems unused is has to be here\nimport sqlalchemy as sa\nfrom sqlalchemy import (\n Column,\n MetaData,\n Table,\n and_,\n not_,\n column,\n func,\n literal_column,\n or_,\n select,\n util,\n cast,\n)\nimport dateutil\nfrom sqlalchemy.dialects.postgresql.base import INTERVAL\nfrom sqlalchemy.schema import Sequence\nfrom sqlalchemy.sql import functions as fun\nfrom sqlalchemy.sql.annotation import Annotated\nfrom sqlalchemy.sql.elements import Slice\nfrom sqlalchemy.sql.expression import ColumnClause, CompoundSelect\nfrom sqlalchemy.sql.sqltypes import Interval, _AbstractInterval\n\nfrom api.connection import _get_engine\nfrom api.error import APIError, APIKeyError\nfrom api.connection import _get_engine\nfrom sqlalchemy.sql.sqltypes import Interval, _AbstractInterval\nfrom sqlalchemy.dialects.postgresql.base import INTERVAL\nfrom sqlalchemy import types as sqltypes\n\nfrom . import DEFAULT_SCHEMA\n\n__KNOWN_TABLES = {}\n\npgsql_qualifier = re.compile(r\"^[\\w\\d_\\.]+$\")\n\n\ndef get_or_403(dictionary, key):\n try:\n return dictionary[key]\n except KeyError:\n raise APIKeyError(dictionary, key)\n\n\ndef parse_single(x, caster):\n try:\n return caster(x)\n except ValueError:\n raise APIError(\"Could not parse %s as %s\" % (x, caster))\n\n\ndef is_pg_qual(x):\n if not isinstance(x, str):\n return False\n return pgsql_qualifier.search(x)\n\n\ndef read_pgvalue(x):\n # TODO: Implement check for valid values\n if x is None:\n return \"null\"\n return x\n\n\nclass ValidationError(Exception):\n def __init__(self, message, value):\n self.message = message\n self.value = value\n\n\ndef read_bool(s):\n if isinstance(s, bool):\n return s\n if s.lower() in [\"true\", \"false\"]:\n return s.lower() == \"true\"\n elif s.lower() in [\"yes\", \"no\"]:\n return s.lower() == \"true\"\n else:\n raise APIError(\"Invalid value in binary field\", s)\n\n\ndef read_pgid(s):\n if is_pg_qual(s):\n return s\n raise APIError(\"Invalid identifier: '%s'\" % s)\n\n\ndef set_meta_info(method, user, message=None):\n val_dict = {}\n val_dict[\"_user\"] = user # TODO: Add user handling\n val_dict[\"_message\"] = message\n return val_dict\n\n\ndef parse_insert(d, context, message=None, mapper=None):\n table = Table(\n read_pgid(get_or_403(d, \"table\")),\n MetaData(bind=_get_engine()),\n autoload=True,\n schema=read_pgid(get_or_403(d, \"schema\")),\n )\n field_strings = []\n for field in d.get(\"fields\", []):\n if not (\n (isinstance(field, dict) and \"type\" in field and field[\"type\"] == \"column\")\n or isinstance(field, str)\n ):\n raise APIError(\"Only pure column expressions are allowed in insert\")\n field_strings.append(parse_expression(field))\n\n query = table.insert()\n\n if not \"method\" in d:\n d[\"method\"] = \"values\"\n if d[\"method\"] == \"values\":\n if field_strings:\n raw_values = get_or_403(d, \"values\")\n if not isinstance(raw_values, list):\n raise APIError(\"{} is not a list\".format(raw_values))\n values = (\n zip(\n field_strings,\n parse_expression(x, allow_untyped_dicts=True, escape_quotes=False),\n )\n for x in raw_values\n )\n else:\n values = get_or_403(d, \"values\")\n\n def clear_meta(vals):\n val_dict = dict(vals)\n # make sure meta fields are not compromised\n if context[\"user\"].is_anonymous:\n username = \"Anonymous\"\n else:\n username = context[\"user\"].name\n val_dict.update(set_meta_info(\"insert\", username, message))\n return val_dict\n\n values = list(map(clear_meta, values))\n\n query = query.values(values)\n elif d[\"method\"] == \"select\":\n values = parse_select(d[\"values\"])\n query = query.from_select(field_strings, values)\n else:\n raise APIError(\"Unknown insert method: \" + str(d[\"method\"]))\n\n if \"returning\" in d:\n return_clauses = [parse_expression(x, mapper) for x in d[\"returning\"]]\n query = query.returning(*return_clauses)\n\n return query, values\n\n\ndef parse_select(d):\n \"\"\"\n Defintion of a select query according to\n http://www.postgresql.org/docs/9.3/static/sql-select.html\n\n not implemented:\n [ WITH [ RECURSIVE ] with_query [, ...] ]\n [ WINDOW window_name AS ( window_definition ) [, ...] ]\n [ FOR { UPDATE | NO KEY UPDATE | SHARE | KEY SHARE } [ OF table_name [, ...] ] [ NOWAIT ] [...] ]\n \"\"\"\n distinct = d.get(\"distinct\", False)\n\n L = None\n\n keyword = d.get(\"keyword\")\n\n if keyword and keyword.lower() in [\"union\", \"except\", \"intersect\"]:\n partials = []\n for part_sel in d.get(\"selects\", []):\n t = part_sel.get(\"type\")\n if t == \"grouping\":\n grouping = get_or_403(part_sel, \"grouping\")\n if isinstance(grouping, dict):\n partials.append(parse_select(grouping))\n elif isinstance(grouping, list):\n partials = map(parse_select, grouping)\n else:\n APIError(\n \"Cannot handle grouping type. Dictionary or list expected.\"\n )\n elif t == \"select\":\n partials.append(parse_select(part_sel))\n else:\n raise APIError(\"Unknown select type: \" + t)\n query = CompoundSelect(util.symbol(keyword), *partials)\n else:\n kwargs = dict(distinct=distinct)\n if \"fields\" in d and d[\"fields\"]:\n L = []\n for field in d[\"fields\"]:\n col = parse_expression(field)\n if \"as\" in field:\n col.label(read_pgid(field[\"as\"]))\n L.append(col)\n if \"from\" in d:\n kwargs[\"from_obj\"] = parse_from_item(get_or_403(d, \"from\"))\n else:\n kwargs[\"from_obj\"] = []\n if not L:\n L = \"*\"\n kwargs[\"columns\"] = L\n query = select(**kwargs)\n\n # [ WHERE condition ]\n if d.get(\"where\", False):\n query = query.where(parse_condition(d[\"where\"]))\n\n if \"group_by\" in d:\n query = query.group_by(*[parse_expression(f) for f in d[\"group_by\"]])\n\n if \"having\" in d:\n query.having([parse_condition(f) for f in d[\"having\"]])\n\n if \"select\" in d:\n for constraint in d[\"select\"]:\n type = get_or_403(constraint, \"type\")\n subquery = parse_select(get_or_403(constraint, \"query\"))\n if type.lower() == \"union\":\n query.union(subquery)\n elif type.lower() == \"intersect\":\n query.intersect(subquery)\n elif type.lower() == \"except\":\n query.except_(subquery)\n if \"order_by\" in d:\n for ob in d[\"order_by\"]:\n expr = parse_expression(ob)\n if isinstance(ob, dict):\n desc = ob.get(\"ordering\", \"asc\").lower() == \"desc\"\n if desc:\n expr = expr.desc()\n query = query.order_by(expr)\n\n if \"limit\" in d:\n if isinstance(d[\"limit\"], int) or d[\"limit\"].isdigit():\n query = query.limit(int(d[\"limit\"]))\n else:\n raise APIError(\"Invalid LIMIT: Expected a digit\")\n\n if \"offset\" in d:\n if isinstance(d[\"offset\"], int) or d[\"offset\"].isdigit():\n query = query.offset(int(d[\"offset\"]))\n else:\n raise APIError(\"Invalid LIMIT: Expected a digit\")\n return query\n\n\ndef parse_from_item(d):\n \"\"\"\n Defintion of a from_item according to \n http://www.postgresql.org/docs/9.3/static/sql-select.html\n \n return: A from_item string with checked psql qualifiers.\n \n Not implemented:\n with_query_name [ [ AS ] alias [ ( column_alias [, ...] ) ] ]\n [ LATERAL ] function_name ( [ argument [, ...] ] ) [ AS ] alias [ ( column_alias [, ...] | column_definition [, ...] ) ]\n [ LATERAL ] function_name ( [ argument [, ...] ] ) AS ( column_definition [, ...] )\n \"\"\"\n # TODO: If 'type' is not set assume just a table name is present\n if isinstance(d, str):\n d = {\"type\": \"table\", \"table\": d}\n if isinstance(d, list):\n return [parse_from_item(f) for f in d]\n dtype = get_or_403(d, \"type\")\n if dtype == \"table\":\n schema_name = read_pgid(d[\"schema\"]) if \"schema\" in d else None\n only = d.get(\"only\", False)\n ext_name = table_name = read_pgid(get_or_403(d, \"table\"))\n tkwargs = dict(autoload=True)\n if schema_name:\n ext_name = schema_name + \".\" + ext_name\n tkwargs[\"schema\"] = d[\"schema\"]\n if ext_name in __PARSER_META.tables:\n item = __PARSER_META.tables[ext_name]\n else:\n try:\n item = Table(d[\"table\"], __PARSER_META, **tkwargs)\n except sa.exc.NoSuchTableError as e:\n raise APIError(\"Table {table} not found\".format(table=ext_name))\n\n engine = _get_engine()\n conn = engine.connect()\n exists = engine.dialect.has_table(conn, item.name, item.schema)\n conn.close()\n if not exists:\n raise APIError(\"Table not found: \" + str(item), status=400)\n elif dtype == \"select\":\n item = parse_select(d)\n elif dtype == \"join\":\n left = parse_from_item(get_or_403(d, \"left\"))\n right = parse_from_item(get_or_403(d, \"right\"))\n is_outer = d.get(\"is_outer\", False)\n full = d.get(\"is_full\", False)\n on_clause = None\n if \"on\" in d:\n on_clause = parse_condition(d[\"on\"])\n item = left.join(right, onclause=on_clause, isouter=is_outer, full=full)\n else:\n raise APIError(\"Unknown from-item: \" + dtype)\n\n if \"alias\" in d:\n item = item.alias(read_pgid(d[\"alias\"]))\n return item\n\n\n__PARSER_META = MetaData(bind=_get_engine())\n\n\ndef load_table_from_metadata(table_name, schema_name=None):\n ext_name = table_name\n if schema_name:\n ext_name = schema_name + \".\" + ext_name\n if ext_name and ext_name in __PARSER_META.tables:\n return __PARSER_META.tables[ext_name]\n else:\n if _get_engine().dialect.has_table(\n _get_engine().connect(), table_name, schema=schema_name\n ):\n return Table(table_name, __PARSER_META, autoload=True, schema=schema_name)\n\n\ndef parse_column(d, mapper):\n name = get_or_403(d, \"column\")\n is_literal = parse_single(d.get(\"is_literal\", False), bool)\n table_name = d.get(\"table\")\n table = None\n if table_name:\n table_name = read_pgid(table_name)\n if mapper is None:\n mapper = dict()\n do_map = lambda x: mapper.get(x, x)\n if \"schema\" in d:\n schema_name = read_pgid(do_map(d[\"schema\"]))\n else:\n schema_name = None\n table = load_table_from_metadata(table_name, schema_name=schema_name)\n if table is not None and name in table.c:\n col = table.c[name]\n if isinstance(col.type, INTERVAL):\n col.type = Interval(col.type)\n return col\n else:\n if is_literal:\n return literal_column(name)\n else:\n return column(name)\n\n\ndef parse_type(dt_string, **kwargs):\n\n if isinstance(dt_string, dict):\n dt = parse_type(\n get_or_403(dt_string, \"datatype\"), **dt_string.get(\"kwargs\", {})\n )\n return dt\n else:\n # Are you an array?\n dtarr_expression = r\"(?P<dtname>[A-z_]+)\\s*\\[\\]\"\n arr_match = re.match(dtarr_expression, dt_string)\n if arr_match:\n is_array = True\n dt_string = arr_match.groups()[0]\n dt, autoincrement = parse_type(dt_string)\n return sa.ARRAY(dt), autoincrement\n\n # Is the datatypestring of form NAME(NUMBER)?\n dt_expression = r\"(?P<dtname>[A-z_]+)\\s*\\((?P<cardinality>.*(,.*)?)\\)\"\n match = re.match(dt_expression, dt_string)\n if match:\n dt_string = match.groups()[0]\n if dt_string.lower() == \"geometry\":\n return geoalchemy2.Geometry(geometry_type=match.groups()[1]), False\n else:\n dt_cardinality = map(int, match.groups()[1].replace(\" \", \"\").split(\",\"))\n dt, autoincrement = parse_type(dt_string)\n return dt(*dt_cardinality, **kwargs), autoincrement\n\n # So it's a plain type\n autoincrement = False\n\n dt_string = dt_string.lower()\n\n if dt_string in (\"int\", \"integer\"):\n dt = sa.types.INTEGER\n elif dt_string in (\"bigint\", \"biginteger\"):\n dt = sa.types.BigInteger\n elif dt_string in (\"bit\",):\n dt = sa.types.Binary\n elif dt_string in (\"boolean\", \"bool\"):\n dt = sa.types.Boolean\n elif dt_string in (\"char\",):\n dt = sqltypes.CHAR\n elif dt_string in (\"date\",):\n dt = sqltypes.Date\n elif dt_string in (\"datetime\",):\n dt = sqltypes.DateTime\n elif dt_string in (\"timestamp\", \"timestamp without time zone\"):\n dt = sqltypes.TIMESTAMP\n elif dt_string in (\"time\", \"time without time zone\"):\n dt = sqltypes.TIME\n elif dt_string in (\"float\"):\n dt = sqltypes.FLOAT\n elif dt_string in (\"decimal\"):\n dt = sqltypes.DECIMAL\n elif dt_string in (\"interval\",):\n dt = sqltypes.Interval\n elif dt_string in (\"json\",):\n dt = sqltypes.JSON\n elif dt_string in (\"nchar\",):\n dt = sqltypes.NCHAR\n elif dt_string in (\"numerical\", \"numeric\"):\n dt = sa.types.Numeric\n elif dt_string in [\"varchar\", \"character varying\"]:\n dt = sqltypes.VARCHAR\n elif dt_string in (\"real\",):\n dt = sqltypes.REAL\n elif dt_string in (\"smallint\",):\n dt = sqltypes.SMALLINT\n elif hasattr(geoalchemy2, dt_string):\n dt = getattr(geoalchemy2, dt_string)\n elif hasattr(sqltypes, dt_string.upper()):\n dt = getattr(sqltypes, dt_string.upper())\n elif dt_string == \"bigserial\":\n dt = sa.types.BigInteger\n autoincrement = True\n else:\n raise APIError(\"Unknown type (%s).\" % dt_string)\n return dt, autoincrement\n\n\ndef parse_expression(d, mapper=None, allow_untyped_dicts=False, escape_quotes=True):\n # TODO: Implement\n if isinstance(d, dict):\n if allow_untyped_dicts and \"type\" not in d:\n return d\n dtype = get_or_403(d, \"type\")\n if dtype == \"column\":\n return parse_column(d, mapper)\n if dtype == \"grouping\":\n grouping = get_or_403(d, \"grouping\")\n if isinstance(grouping, list):\n return [parse_expression(e) for e in grouping]\n else:\n return parse_expression(grouping)\n if dtype == \"operator\":\n return parse_operator(d)\n if dtype == \"modifier\":\n return parse_modifier(d)\n if dtype == \"function\":\n return parse_function(d)\n if dtype == \"slice\":\n return parse_slice(d)\n if dtype == \"star\":\n return \"*\"\n if dtype == \"value\":\n if \"value\" in d:\n if \"datatype\" in d:\n dt = d[\"datatype\"]\n if dt == \"Decimal\":\n return decimal.Decimal(get_or_403(d, \"value\"))\n elif dt == \"date\":\n return dateutil.parser.parse(get_or_403(d, \"value\")).date()\n elif dt == \"datetime\":\n return dateutil.parser.parse(get_or_403(d, \"value\"))\n elif dt == \"time\":\n return dateutil.parser.parse(get_or_403(d, \"value\")).time()\n return read_pgvalue(get_or_403(d, \"value\"))\n else:\n return None\n if dtype == \"label\":\n return parse_label(d)\n if dtype == \"sequence\":\n schema = read_pgid(d[\"schema\"]) if \"schema\" in d else DEFAULT_SCHEMA\n s = '\"%s\".\"%s\"' % (schema, get_or_403(d, \"sequence\"))\n return Sequence(get_or_403(d, \"sequence\"), schema=schema)\n if dtype == \"select\":\n return parse_select(d)\n if dtype == \"cast\":\n expr = parse_expression(get_or_403(d, \"source\"))\n t, _ = parse_type(get_or_403(d, \"as\"))\n return cast(expr, t)\n else:\n raise APIError(\"Unknown expression type: \" + dtype)\n if isinstance(d, list):\n return [\n parse_expression(\n x, allow_untyped_dicts=allow_untyped_dicts, escape_quotes=escape_quotes\n )\n for x in d\n ]\n if isinstance(d, str):\n if escape_quotes:\n return d.replace('\"', \"\")\n else:\n return d\n return d\n\n\ndef parse_label(d):\n element = parse_expression(get_or_403(d, \"element\"))\n if not isinstance(element, sa.sql.expression.ClauseElement):\n element = sa.literal(element)\n return element.label(get_or_403(d, \"label\"))\n\n\ndef parse_slice(d):\n kwargs = {\"step\": 1}\n if \"start\" in d:\n kwargs[\"start\"] = d[\"start\"]\n if \"stop\" in d:\n kwargs[\"stop\"] = d[\"stop\"]\n return Slice(**kwargs)\n\n\ndef _unpack_clauses(clauses):\n if isinstance(clauses, list):\n clean_clauses = []\n for clause in clauses:\n if isinstance(clause, list):\n clean_clauses += list(map(_unpack_clauses, clause))\n else:\n clean_clauses.append(clause)\n clauses = {\n \"type\": \"operator\",\n \"operator\": \"AND\",\n \"operands\": list(map(parse_expression, clean_clauses)),\n }\n return clauses\n\n\ndef parse_condition(dl):\n clean_dl = _unpack_clauses(dl)\n return parse_expression(clean_dl)\n\n\ndef parse_operator(d):\n query = parse_sqla_operator(\n get_or_403(d, \"operator\"),\n *list(map(parse_expression, get_or_403(d, \"operands\")))\n )\n return query\n\n\ndef parse_modifier(d):\n query = parse_sqla_modifier(\n get_or_403(d, \"operator\"),\n *list(map(parse_expression, get_or_403(d, \"operands\")))\n )\n return query\n\n\ndef parse_function(d):\n fname = get_or_403(d, \"function\")\n\n operand_struc = get_or_403(d, \"operands\")\n if isinstance(operand_struc, list):\n operands = list(map(parse_expression, operand_struc))\n else:\n if (\n isinstance(operand_struc, dict)\n and operand_struc.get(\"type\", None) == \"grouping\"\n ):\n operands = parse_expression(operand_struc)\n else:\n operands = [parse_expression(operand_struc)]\n\n if fname == \"+\":\n if len(operands) != 2:\n raise APIError(\n \"Wrong number of arguments for function %s. Expected 2. Got %d\"\n % (fname, len(operands))\n )\n x, y = operands\n return x + y\n else:\n if fname == \"nextval\":\n return func.next_value(*operands)\n else:\n function = getattr(func, fname)\n return function(*operands)\n\n\ndef parse_scolumnd_from_columnd(schema, table, name, column_description):\n # Migrate Postgres to Python Structures\n data_type = column_description.get(\"data_type\")\n size = column_description.get(\"character_maximum_length\")\n if size is not None and data_type is not None:\n data_type += \"(\" + str(size) + \")\"\n\n notnull = column_description.get(\"is_nullable\", False)\n\n return {\n \"column_name\": name,\n \"not_null\": notnull,\n \"data_type\": data_type,\n \"new_name\": column_description.get(\"new_name\"),\n \"c_schema\": schema,\n \"c_table\": table,\n }\n\n\ndef parse_sconstd_from_constd(schema, table, name_const, constraint_description):\n defi = constraint_description.get(\"definition\")\n return {\n \"action\": None, # {ADD, DROP}\n \"constraint_type\": constraint_description.get(\n \"constraint_typ\"\n ), # {FOREIGN KEY, PRIMARY KEY, UNIQUE, CHECK}\n \"constraint_name\": name_const,\n \"constraint_parameter\": constraint_description.get(\"definition\")\n .split(\"(\")[1]\n .split(\")\")[0],\n # Things in Brackets, e.g. name of column\n \"reference_table\": defi.split(\"REFERENCES \")[1].split(\"(\")[2]\n if \"REFERENCES\" in defi\n else None,\n \"reference_column\": defi.split(\"(\")[2].split(\")\")[1]\n if \"REFERENCES\" in defi\n else None,\n \"c_schema\": schema,\n \"c_table\": table,\n }\n\n\ndef replace_None_with_NULL(dictonary):\n # Replacing None with null for Database\n for key, value in dictonary.items():\n if value is None:\n dictonary[key] = \"NULL\"\n\n return dictonary\n\n\ndef split(string, seperator):\n if string is None:\n return None\n else:\n return str(string).split(seperator)\n\n\ndef replace(string, occuring_symb, replace_symb):\n if string is None:\n return None\n else:\n return str(string).replace(occuring_symb, replace_symb)\n\n\ndef alchemyencoder(obj):\n \"\"\"JSON encoder function for SQLAlchemy special classes.\"\"\"\n if isinstance(obj, datetime.date):\n return obj.isoformat()\n elif isinstance(obj, decimal.Decimal):\n return float(obj)\n\n\nsql_operators = {\n \"EQUALS\": \"=\",\n \"GREATER\": \">\",\n \"LOWER\": \"<\",\n \"NOTEQUAL\": \"!=\",\n \"NOTGREATER\": \"<=\",\n \"NOTLOWER\": \">=\",\n \"=\": \"=\",\n \">\": \">\",\n \"<\": \"<\",\n \"!=\": \"!=\",\n \"<>\": \"!=\",\n \"<=\": \"<=\",\n \">=\": \">=\",\n}\n\n\ndef parse_sql_operator(key: str) -> str:\n return sql_operators.get(key)\n\n\ndef parse_sqla_operator(raw_key, *operands):\n key = raw_key.lower().strip()\n if not operands:\n raise APIError(\"Missing arguments for '%s'.\" % (key))\n if key in [\"and\"]:\n query = and_(*operands)\n return query\n elif key in [\"or\"]:\n query = or_(*operands)\n return query\n elif key in [\"not\"]:\n x = operands[0]\n return not_(parse_condition(x))\n else:\n if len(operands) != 2:\n raise APIError(\n \"Wrong number of arguments for '%s'. Expected: 2 Got: %s\"\n % (key, len(operands))\n )\n x, y = operands\n if key in [\"equals\", \"=\"]:\n return x == y\n if key in [\"greater\", \">\"]:\n return x > y\n if key in [\"lower\", \"<\"]:\n return x < y\n if key in [\"notequal\", \"<>\", \"!=\"]:\n return x != y\n if key in [\"notgreater\", \"<=\"]:\n return x <= y\n if key in [\"notlower\", \">=\"]:\n return x >= y\n if key in [\"add\", \"+\"]:\n return x + y\n if key in [\"substract\", \"-\"]:\n return x - y\n if key in [\"multiply\", \"*\"]:\n return x * y\n if key in [\"divide\", \"/\"]:\n return x / y\n if key in [\"concatenate\", \"||\"]:\n return fun.concat(x, y)\n if key in [\"is not\"]:\n return x.isnot(y)\n if key in [\"<->\"]:\n return x.distance_centroid(y)\n if key in [\"getitem\"]:\n if isinstance(y, Slice):\n return x[parse_single(y.start, int) : parse_single(y.stop, int)]\n else:\n return x[read_pgid(y)]\n if key in [\"in\"]:\n return x.in_(y)\n\n raise APIError(\"Operator '%s' not supported\" % key)\n\n\ndef parse_sqla_modifier(raw_key, *operands):\n key = raw_key.lower().strip()\n if not operands:\n raise APIError(\"Missing arguments for '%s'.\" % key)\n\n if len(operands) != 1:\n raise APIError(\n \"Wrong number of arguments for '%s'. Expected: 1 Got: %s\"\n % (key, len(operands))\n )\n x = operands[0]\n if key in [\"asc\"]:\n return x.asc()\n if key in [\"desc\"]:\n return x.desc()\n raise APIError(\"Operator %s not supported\" % key)\n", "path": "api/parser.py" } ]
[ { "content": "###########\n# Parsers #\n###########\nimport decimal\nimport re\nfrom datetime import datetime, date\n\nimport geoalchemy2 # Although this import seems unused is has to be here\nimport sqlalchemy as sa\nfrom sqlalchemy import (\n Column,\n MetaData,\n Table,\n and_,\n not_,\n column,\n func,\n literal_column,\n or_,\n select,\n util,\n cast,\n)\nimport dateutil\nfrom sqlalchemy.dialects.postgresql.base import INTERVAL\nfrom sqlalchemy.schema import Sequence\nfrom sqlalchemy.sql import functions as fun\nfrom sqlalchemy.sql.annotation import Annotated\nfrom sqlalchemy.sql.elements import Slice\nfrom sqlalchemy.sql.expression import ColumnClause, CompoundSelect\nfrom sqlalchemy.sql.sqltypes import Interval, _AbstractInterval\n\nfrom api.connection import _get_engine\nfrom api.error import APIError, APIKeyError\nfrom api.connection import _get_engine\nfrom sqlalchemy.sql.sqltypes import Interval, _AbstractInterval\nfrom sqlalchemy.dialects.postgresql.base import INTERVAL\nfrom sqlalchemy import types as sqltypes\n\nfrom . import DEFAULT_SCHEMA\n\n__KNOWN_TABLES = {}\n\npgsql_qualifier = re.compile(r\"^[\\w\\d_\\.]+$\")\n\n\ndef get_or_403(dictionary, key):\n try:\n return dictionary[key]\n except KeyError:\n raise APIKeyError(dictionary, key)\n\n\ndef parse_single(x, caster):\n try:\n return caster(x)\n except ValueError:\n raise APIError(\"Could not parse %s as %s\" % (x, caster))\n\n\ndef is_pg_qual(x):\n if not isinstance(x, str):\n return False\n return pgsql_qualifier.search(x)\n\n\ndef read_pgvalue(x):\n # TODO: Implement check for valid values\n if x is None:\n return \"null\"\n return x\n\n\nclass ValidationError(Exception):\n def __init__(self, message, value):\n self.message = message\n self.value = value\n\n\ndef read_bool(s):\n if isinstance(s, bool):\n return s\n if s.lower() in [\"true\", \"false\"]:\n return s.lower() == \"true\"\n elif s.lower() in [\"yes\", \"no\"]:\n return s.lower() == \"true\"\n else:\n raise APIError(\"Invalid value in binary field\", s)\n\n\ndef read_pgid(s):\n if is_pg_qual(s):\n return s\n raise APIError(\"Invalid identifier: '%s'\" % s)\n\n\ndef set_meta_info(method, user, message=None):\n val_dict = {}\n val_dict[\"_user\"] = user # TODO: Add user handling\n val_dict[\"_message\"] = message\n return val_dict\n\n\ndef parse_insert(d, context, message=None, mapper=None):\n table = Table(\n read_pgid(get_or_403(d, \"table\")),\n MetaData(bind=_get_engine()),\n autoload=True,\n schema=read_pgid(get_or_403(d, \"schema\")),\n )\n field_strings = []\n for field in d.get(\"fields\", []):\n if not (\n (isinstance(field, dict) and \"type\" in field and field[\"type\"] == \"column\")\n or isinstance(field, str)\n ):\n raise APIError(\"Only pure column expressions are allowed in insert\")\n field_strings.append(parse_expression(field))\n\n query = table.insert()\n\n if not \"method\" in d:\n d[\"method\"] = \"values\"\n if d[\"method\"] == \"values\":\n if field_strings:\n raw_values = get_or_403(d, \"values\")\n if not isinstance(raw_values, list):\n raise APIError(\"{} is not a list\".format(raw_values))\n values = (\n zip(\n field_strings,\n parse_expression(x, allow_untyped_dicts=True, escape_quotes=False),\n )\n for x in raw_values\n )\n else:\n values = get_or_403(d, \"values\")\n\n def clear_meta(vals):\n val_dict = dict(vals)\n # make sure meta fields are not compromised\n if context[\"user\"].is_anonymous:\n username = \"Anonymous\"\n else:\n username = context[\"user\"].name\n val_dict.update(set_meta_info(\"insert\", username, message))\n return val_dict\n\n values = list(map(clear_meta, values))\n\n query = query.values(values)\n elif d[\"method\"] == \"select\":\n values = parse_select(d[\"values\"])\n query = query.from_select(field_strings, values)\n else:\n raise APIError(\"Unknown insert method: \" + str(d[\"method\"]))\n\n if \"returning\" in d:\n return_clauses = [parse_expression(x, mapper) for x in d[\"returning\"]]\n query = query.returning(*return_clauses)\n\n return query, values\n\n\ndef parse_select(d):\n \"\"\"\n Defintion of a select query according to\n http://www.postgresql.org/docs/9.3/static/sql-select.html\n\n not implemented:\n [ WITH [ RECURSIVE ] with_query [, ...] ]\n [ WINDOW window_name AS ( window_definition ) [, ...] ]\n [ FOR { UPDATE | NO KEY UPDATE | SHARE | KEY SHARE } [ OF table_name [, ...] ] [ NOWAIT ] [...] ]\n \"\"\"\n distinct = d.get(\"distinct\", False)\n\n L = None\n\n keyword = d.get(\"keyword\")\n\n if keyword and keyword.lower() in [\"union\", \"except\", \"intersect\"]:\n partials = []\n for part_sel in d.get(\"selects\", []):\n t = part_sel.get(\"type\")\n if t == \"grouping\":\n grouping = get_or_403(part_sel, \"grouping\")\n if isinstance(grouping, dict):\n partials.append(parse_select(grouping))\n elif isinstance(grouping, list):\n partials = map(parse_select, grouping)\n else:\n APIError(\n \"Cannot handle grouping type. Dictionary or list expected.\"\n )\n elif t == \"select\":\n partials.append(parse_select(part_sel))\n else:\n raise APIError(\"Unknown select type: \" + t)\n query = CompoundSelect(util.symbol(keyword), *partials)\n else:\n kwargs = dict(distinct=distinct)\n if \"fields\" in d and d[\"fields\"]:\n L = []\n for field in d[\"fields\"]:\n col = parse_expression(field)\n if \"as\" in field:\n col.label(read_pgid(field[\"as\"]))\n L.append(col)\n if \"from\" in d:\n kwargs[\"from_obj\"] = parse_from_item(get_or_403(d, \"from\"))\n else:\n kwargs[\"from_obj\"] = []\n if not L:\n L = \"*\"\n kwargs[\"columns\"] = L\n query = select(**kwargs)\n\n # [ WHERE condition ]\n if d.get(\"where\", False):\n query = query.where(parse_condition(d[\"where\"]))\n\n if \"group_by\" in d:\n query = query.group_by(*[parse_expression(f) for f in d[\"group_by\"]])\n\n if \"having\" in d:\n query.having([parse_condition(f) for f in d[\"having\"]])\n\n if \"select\" in d:\n for constraint in d[\"select\"]:\n type = get_or_403(constraint, \"type\")\n subquery = parse_select(get_or_403(constraint, \"query\"))\n if type.lower() == \"union\":\n query.union(subquery)\n elif type.lower() == \"intersect\":\n query.intersect(subquery)\n elif type.lower() == \"except\":\n query.except_(subquery)\n if \"order_by\" in d:\n for ob in d[\"order_by\"]:\n expr = parse_expression(ob)\n if isinstance(ob, dict):\n desc = ob.get(\"ordering\", \"asc\").lower() == \"desc\"\n if desc:\n expr = expr.desc()\n query = query.order_by(expr)\n\n if \"limit\" in d:\n if isinstance(d[\"limit\"], int) or d[\"limit\"].isdigit():\n query = query.limit(int(d[\"limit\"]))\n else:\n raise APIError(\"Invalid LIMIT: Expected a digit\")\n\n if \"offset\" in d:\n if isinstance(d[\"offset\"], int) or d[\"offset\"].isdigit():\n query = query.offset(int(d[\"offset\"]))\n else:\n raise APIError(\"Invalid LIMIT: Expected a digit\")\n return query\n\n\ndef parse_from_item(d):\n \"\"\"\n Defintion of a from_item according to \n http://www.postgresql.org/docs/9.3/static/sql-select.html\n \n return: A from_item string with checked psql qualifiers.\n \n Not implemented:\n with_query_name [ [ AS ] alias [ ( column_alias [, ...] ) ] ]\n [ LATERAL ] function_name ( [ argument [, ...] ] ) [ AS ] alias [ ( column_alias [, ...] | column_definition [, ...] ) ]\n [ LATERAL ] function_name ( [ argument [, ...] ] ) AS ( column_definition [, ...] )\n \"\"\"\n # TODO: If 'type' is not set assume just a table name is present\n if isinstance(d, str):\n d = {\"type\": \"table\", \"table\": d}\n if isinstance(d, list):\n return [parse_from_item(f) for f in d]\n dtype = get_or_403(d, \"type\")\n if dtype == \"table\":\n schema_name = read_pgid(d[\"schema\"]) if \"schema\" in d else None\n only = d.get(\"only\", False)\n ext_name = table_name = read_pgid(get_or_403(d, \"table\"))\n tkwargs = dict(autoload=True)\n if schema_name:\n ext_name = schema_name + \".\" + ext_name\n tkwargs[\"schema\"] = d[\"schema\"]\n if ext_name in __PARSER_META.tables:\n item = __PARSER_META.tables[ext_name]\n else:\n try:\n item = Table(d[\"table\"], __PARSER_META, **tkwargs)\n except sa.exc.NoSuchTableError as e:\n raise APIError(\"Table {table} not found\".format(table=ext_name))\n\n engine = _get_engine()\n conn = engine.connect()\n exists = engine.dialect.has_table(conn, item.name, item.schema)\n conn.close()\n if not exists:\n raise APIError(\"Table not found: \" + str(item), status=400)\n elif dtype == \"select\":\n item = parse_select(d)\n elif dtype == \"join\":\n left = parse_from_item(get_or_403(d, \"left\"))\n right = parse_from_item(get_or_403(d, \"right\"))\n is_outer = d.get(\"is_outer\", False)\n full = d.get(\"is_full\", False)\n on_clause = None\n if \"on\" in d:\n on_clause = parse_condition(d[\"on\"])\n item = left.join(right, onclause=on_clause, isouter=is_outer, full=full)\n else:\n raise APIError(\"Unknown from-item: \" + dtype)\n\n if \"alias\" in d:\n item = item.alias(read_pgid(d[\"alias\"]))\n return item\n\n\n__PARSER_META = MetaData(bind=_get_engine())\n\n\ndef load_table_from_metadata(table_name, schema_name=None):\n ext_name = table_name\n if schema_name:\n ext_name = schema_name + \".\" + ext_name\n if ext_name and ext_name in __PARSER_META.tables:\n return __PARSER_META.tables[ext_name]\n else:\n if _get_engine().dialect.has_table(\n _get_engine().connect(), table_name, schema=schema_name\n ):\n return Table(table_name, __PARSER_META, autoload=True, schema=schema_name)\n\n\ndef parse_column(d, mapper):\n name = get_or_403(d, \"column\")\n is_literal = parse_single(d.get(\"is_literal\", False), bool)\n table_name = d.get(\"table\")\n table = None\n if table_name:\n table_name = read_pgid(table_name)\n if mapper is None:\n mapper = dict()\n do_map = lambda x: mapper.get(x, x)\n if \"schema\" in d:\n schema_name = read_pgid(do_map(d[\"schema\"]))\n else:\n schema_name = None\n table = load_table_from_metadata(table_name, schema_name=schema_name)\n if table is not None and name in table.c:\n col = table.c[name]\n if isinstance(col.type, INTERVAL):\n col.type = Interval(col.type)\n return col\n else:\n if is_literal:\n return literal_column(name)\n else:\n if table_name is not None:\n return literal_column(table_name + \".\" + name)\n else:\n return column(name)\n\n\ndef parse_type(dt_string, **kwargs):\n\n if isinstance(dt_string, dict):\n dt = parse_type(\n get_or_403(dt_string, \"datatype\"), **dt_string.get(\"kwargs\", {})\n )\n return dt\n else:\n # Are you an array?\n dtarr_expression = r\"(?P<dtname>[A-z_]+)\\s*\\[\\]\"\n arr_match = re.match(dtarr_expression, dt_string)\n if arr_match:\n is_array = True\n dt_string = arr_match.groups()[0]\n dt, autoincrement = parse_type(dt_string)\n return sa.ARRAY(dt), autoincrement\n\n # Is the datatypestring of form NAME(NUMBER)?\n dt_expression = r\"(?P<dtname>[A-z_]+)\\s*\\((?P<cardinality>.*(,.*)?)\\)\"\n match = re.match(dt_expression, dt_string)\n if match:\n dt_string = match.groups()[0]\n if dt_string.lower() == \"geometry\":\n return geoalchemy2.Geometry(geometry_type=match.groups()[1]), False\n else:\n dt_cardinality = map(int, match.groups()[1].replace(\" \", \"\").split(\",\"))\n dt, autoincrement = parse_type(dt_string)\n return dt(*dt_cardinality, **kwargs), autoincrement\n\n # So it's a plain type\n autoincrement = False\n\n dt_string = dt_string.lower()\n\n if dt_string in (\"int\", \"integer\"):\n dt = sa.types.INTEGER\n elif dt_string in (\"bigint\", \"biginteger\"):\n dt = sa.types.BigInteger\n elif dt_string in (\"bit\",):\n dt = sa.types.Binary\n elif dt_string in (\"boolean\", \"bool\"):\n dt = sa.types.Boolean\n elif dt_string in (\"char\",):\n dt = sqltypes.CHAR\n elif dt_string in (\"date\",):\n dt = sqltypes.Date\n elif dt_string in (\"datetime\",):\n dt = sqltypes.DateTime\n elif dt_string in (\"timestamp\", \"timestamp without time zone\"):\n dt = sqltypes.TIMESTAMP\n elif dt_string in (\"time\", \"time without time zone\"):\n dt = sqltypes.TIME\n elif dt_string in (\"float\"):\n dt = sqltypes.FLOAT\n elif dt_string in (\"decimal\"):\n dt = sqltypes.DECIMAL\n elif dt_string in (\"interval\",):\n dt = sqltypes.Interval\n elif dt_string in (\"json\",):\n dt = sqltypes.JSON\n elif dt_string in (\"nchar\",):\n dt = sqltypes.NCHAR\n elif dt_string in (\"numerical\", \"numeric\"):\n dt = sa.types.Numeric\n elif dt_string in [\"varchar\", \"character varying\"]:\n dt = sqltypes.VARCHAR\n elif dt_string in (\"real\",):\n dt = sqltypes.REAL\n elif dt_string in (\"smallint\",):\n dt = sqltypes.SMALLINT\n elif hasattr(geoalchemy2, dt_string):\n dt = getattr(geoalchemy2, dt_string)\n elif hasattr(sqltypes, dt_string.upper()):\n dt = getattr(sqltypes, dt_string.upper())\n elif dt_string == \"bigserial\":\n dt = sa.types.BigInteger\n autoincrement = True\n else:\n raise APIError(\"Unknown type (%s).\" % dt_string)\n return dt, autoincrement\n\n\ndef parse_expression(d, mapper=None, allow_untyped_dicts=False, escape_quotes=True):\n # TODO: Implement\n if isinstance(d, dict):\n if allow_untyped_dicts and \"type\" not in d:\n return d\n dtype = get_or_403(d, \"type\")\n if dtype == \"column\":\n return parse_column(d, mapper)\n if dtype == \"grouping\":\n grouping = get_or_403(d, \"grouping\")\n if isinstance(grouping, list):\n return [parse_expression(e) for e in grouping]\n else:\n return parse_expression(grouping)\n if dtype == \"operator\":\n return parse_operator(d)\n if dtype == \"modifier\":\n return parse_modifier(d)\n if dtype == \"function\":\n return parse_function(d)\n if dtype == \"slice\":\n return parse_slice(d)\n if dtype == \"star\":\n return \"*\"\n if dtype == \"value\":\n if \"value\" in d:\n if \"datatype\" in d:\n dt = d[\"datatype\"]\n if dt == \"Decimal\":\n return decimal.Decimal(get_or_403(d, \"value\"))\n elif dt == \"date\":\n return dateutil.parser.parse(get_or_403(d, \"value\")).date()\n elif dt == \"datetime\":\n return dateutil.parser.parse(get_or_403(d, \"value\"))\n elif dt == \"time\":\n return dateutil.parser.parse(get_or_403(d, \"value\")).time()\n return read_pgvalue(get_or_403(d, \"value\"))\n else:\n return None\n if dtype == \"label\":\n return parse_label(d)\n if dtype == \"sequence\":\n schema = read_pgid(d[\"schema\"]) if \"schema\" in d else DEFAULT_SCHEMA\n s = '\"%s\".\"%s\"' % (schema, get_or_403(d, \"sequence\"))\n return Sequence(get_or_403(d, \"sequence\"), schema=schema)\n if dtype == \"select\":\n return parse_select(d)\n if dtype == \"cast\":\n expr = parse_expression(get_or_403(d, \"source\"))\n t, _ = parse_type(get_or_403(d, \"as\"))\n return cast(expr, t)\n else:\n raise APIError(\"Unknown expression type: \" + dtype)\n if isinstance(d, list):\n return [\n parse_expression(\n x, allow_untyped_dicts=allow_untyped_dicts, escape_quotes=escape_quotes\n )\n for x in d\n ]\n if isinstance(d, str):\n if escape_quotes:\n return d.replace('\"', \"\")\n else:\n return d\n return d\n\n\ndef parse_label(d):\n element = parse_expression(get_or_403(d, \"element\"))\n if not isinstance(element, sa.sql.expression.ClauseElement):\n element = sa.literal(element)\n return element.label(get_or_403(d, \"label\"))\n\n\ndef parse_slice(d):\n kwargs = {\"step\": 1}\n if \"start\" in d:\n kwargs[\"start\"] = d[\"start\"]\n if \"stop\" in d:\n kwargs[\"stop\"] = d[\"stop\"]\n return Slice(**kwargs)\n\n\ndef _unpack_clauses(clauses):\n if isinstance(clauses, list):\n clean_clauses = []\n for clause in clauses:\n if isinstance(clause, list):\n clean_clauses += list(map(_unpack_clauses, clause))\n else:\n clean_clauses.append(clause)\n clauses = {\n \"type\": \"operator\",\n \"operator\": \"AND\",\n \"operands\": list(map(parse_expression, clean_clauses)),\n }\n return clauses\n\n\ndef parse_condition(dl):\n clean_dl = _unpack_clauses(dl)\n return parse_expression(clean_dl)\n\n\ndef parse_operator(d):\n query = parse_sqla_operator(\n get_or_403(d, \"operator\"),\n *list(map(parse_expression, get_or_403(d, \"operands\")))\n )\n return query\n\n\ndef parse_modifier(d):\n query = parse_sqla_modifier(\n get_or_403(d, \"operator\"),\n *list(map(parse_expression, get_or_403(d, \"operands\")))\n )\n return query\n\n\ndef parse_function(d):\n fname = get_or_403(d, \"function\")\n\n operand_struc = get_or_403(d, \"operands\")\n if isinstance(operand_struc, list):\n operands = list(map(parse_expression, operand_struc))\n else:\n if (\n isinstance(operand_struc, dict)\n and operand_struc.get(\"type\", None) == \"grouping\"\n ):\n operands = parse_expression(operand_struc)\n else:\n operands = [parse_expression(operand_struc)]\n\n if fname == \"+\":\n if len(operands) != 2:\n raise APIError(\n \"Wrong number of arguments for function %s. Expected 2. Got %d\"\n % (fname, len(operands))\n )\n x, y = operands\n return x + y\n else:\n if fname == \"nextval\":\n return func.next_value(*operands)\n else:\n function = getattr(func, fname)\n return function(*operands)\n\n\ndef parse_scolumnd_from_columnd(schema, table, name, column_description):\n # Migrate Postgres to Python Structures\n data_type = column_description.get(\"data_type\")\n size = column_description.get(\"character_maximum_length\")\n if size is not None and data_type is not None:\n data_type += \"(\" + str(size) + \")\"\n\n notnull = column_description.get(\"is_nullable\", False)\n\n return {\n \"column_name\": name,\n \"not_null\": notnull,\n \"data_type\": data_type,\n \"new_name\": column_description.get(\"new_name\"),\n \"c_schema\": schema,\n \"c_table\": table,\n }\n\n\ndef parse_sconstd_from_constd(schema, table, name_const, constraint_description):\n defi = constraint_description.get(\"definition\")\n return {\n \"action\": None, # {ADD, DROP}\n \"constraint_type\": constraint_description.get(\n \"constraint_typ\"\n ), # {FOREIGN KEY, PRIMARY KEY, UNIQUE, CHECK}\n \"constraint_name\": name_const,\n \"constraint_parameter\": constraint_description.get(\"definition\")\n .split(\"(\")[1]\n .split(\")\")[0],\n # Things in Brackets, e.g. name of column\n \"reference_table\": defi.split(\"REFERENCES \")[1].split(\"(\")[2]\n if \"REFERENCES\" in defi\n else None,\n \"reference_column\": defi.split(\"(\")[2].split(\")\")[1]\n if \"REFERENCES\" in defi\n else None,\n \"c_schema\": schema,\n \"c_table\": table,\n }\n\n\ndef replace_None_with_NULL(dictonary):\n # Replacing None with null for Database\n for key, value in dictonary.items():\n if value is None:\n dictonary[key] = \"NULL\"\n\n return dictonary\n\n\ndef split(string, seperator):\n if string is None:\n return None\n else:\n return str(string).split(seperator)\n\n\ndef replace(string, occuring_symb, replace_symb):\n if string is None:\n return None\n else:\n return str(string).replace(occuring_symb, replace_symb)\n\n\ndef alchemyencoder(obj):\n \"\"\"JSON encoder function for SQLAlchemy special classes.\"\"\"\n if isinstance(obj, datetime.date):\n return obj.isoformat()\n elif isinstance(obj, decimal.Decimal):\n return float(obj)\n\n\nsql_operators = {\n \"EQUALS\": \"=\",\n \"GREATER\": \">\",\n \"LOWER\": \"<\",\n \"NOTEQUAL\": \"!=\",\n \"NOTGREATER\": \"<=\",\n \"NOTLOWER\": \">=\",\n \"=\": \"=\",\n \">\": \">\",\n \"<\": \"<\",\n \"!=\": \"!=\",\n \"<>\": \"!=\",\n \"<=\": \"<=\",\n \">=\": \">=\",\n}\n\n\ndef parse_sql_operator(key: str) -> str:\n return sql_operators.get(key)\n\n\ndef parse_sqla_operator(raw_key, *operands):\n key = raw_key.lower().strip()\n if not operands:\n raise APIError(\"Missing arguments for '%s'.\" % (key))\n if key in [\"and\"]:\n query = and_(*operands)\n return query\n elif key in [\"or\"]:\n query = or_(*operands)\n return query\n elif key in [\"not\"]:\n x = operands[0]\n return not_(parse_condition(x))\n else:\n if len(operands) != 2:\n raise APIError(\n \"Wrong number of arguments for '%s'. Expected: 2 Got: %s\"\n % (key, len(operands))\n )\n x, y = operands\n if key in [\"equals\", \"=\"]:\n return x == y\n if key in [\"greater\", \">\"]:\n return x > y\n if key in [\"lower\", \"<\"]:\n return x < y\n if key in [\"notequal\", \"<>\", \"!=\"]:\n return x != y\n if key in [\"notgreater\", \"<=\"]:\n return x <= y\n if key in [\"notlower\", \">=\"]:\n return x >= y\n if key in [\"add\", \"+\"]:\n return x + y\n if key in [\"substract\", \"-\"]:\n return x - y\n if key in [\"multiply\", \"*\"]:\n return x * y\n if key in [\"divide\", \"/\"]:\n return x / y\n if key in [\"concatenate\", \"||\"]:\n return fun.concat(x, y)\n if key in [\"is not\"]:\n return x.isnot(y)\n if key in [\"<->\"]:\n return x.distance_centroid(y)\n if key in [\"getitem\"]:\n if isinstance(y, Slice):\n return x[parse_single(y.start, int) : parse_single(y.stop, int)]\n else:\n return x[read_pgid(y)]\n if key in [\"in\"]:\n return x.in_(y)\n\n raise APIError(\"Operator '%s' not supported\" % key)\n\n\ndef parse_sqla_modifier(raw_key, *operands):\n key = raw_key.lower().strip()\n if not operands:\n raise APIError(\"Missing arguments for '%s'.\" % key)\n\n if len(operands) != 1:\n raise APIError(\n \"Wrong number of arguments for '%s'. Expected: 1 Got: %s\"\n % (key, len(operands))\n )\n x = operands[0]\n if key in [\"asc\"]:\n return x.asc()\n if key in [\"desc\"]:\n return x.desc()\n raise APIError(\"Operator %s not supported\" % key)\n", "path": "api/parser.py" } ]
diff --git a/api/parser.py b/api/parser.py index 3c76c36ef..37cb06463 100644 --- a/api/parser.py +++ b/api/parser.py @@ -357,7 +357,10 @@ def parse_column(d, mapper): if is_literal: return literal_column(name) else: - return column(name) + if table_name is not None: + return literal_column(table_name + "." + name) + else: + return column(name) def parse_type(dt_string, **kwargs): diff --git a/api/tests/test_regression/test_issue_482.py b/api/tests/test_regression/test_issue_482.py new file mode 100644 index 000000000..57e361b0a --- /dev/null +++ b/api/tests/test_regression/test_issue_482.py @@ -0,0 +1,132 @@ +import json + +import requests + +from api.tests import APITestCase + +from ..util import load_content_as_json + + +class TestAliasesTracking(APITestCase): + def setUp(self): + self._structure_data = { + "constraints": [ + { + "constraint_type": "PRIMARY KEY", + "constraint_parameter": "id", + "reference_table": None, + "reference_column": None, + } + ], + "columns": [ + { + "name": "id", + "data_type": "bigserial", + "is_nullable": False, + "character_maximum_length": None, + }, + { + "name": "name", + "data_type": "character varying", + "is_nullable": True, + "character_maximum_length": 123, + }, + ], + } + + resp = self.__class__.client.put( + "/api/v0/schema/{schema}/tables/{table}/".format( + schema=self.test_schema, table=self.test_table + ), + data=json.dumps({"query": self._structure_data}), + HTTP_AUTHORIZATION="Token %s" % self.__class__.token, + content_type="application/json", + ) + + # Check HTTP-response (201 = Successful create) + self.assertEqual( + resp.status_code, 201, resp.json().get("reason", "No reason returned") + ) + + resp = self.__class__.client.post( + "/api/v0/schema/{schema}/tables/{table}/rows/new".format( + schema=self.test_schema, table=self.test_table + ), + data=json.dumps({"query": [{"name": "Hans"}, {"name": "Petra"}]}), + HTTP_AUTHORIZATION="Token %s" % self.__class__.token, + content_type="application/json", + ) + + # Check HTTP-response (201 = Successful create) + self.assertEqual( + resp.status_code, + 201, + load_content_as_json(resp).get("reason", "No reason returned"), + ) + + def test_aliases_in_form_clauses(self): + data = { + "query": { + "fields": [dict(type="column", column="id", table="a")], + "where": [ + { + "type": "operator", + "operator": "=", + "operands": [ + {"type": "column", "column": "name", "table": "a"}, + {"type": "value", "value": "Hans"}, + ], + } + ], + "from": { + "type": "join", + "left": { + "type": "table", + "table": self.test_table, + "schema": self.test_schema, + "alias": "a" + }, + "right": { + "type": "table", + "table": self.test_table, + "schema": self.test_schema, + "alias": "b" + }, + "on":[ + { + "type": "operator", + "operator": "=", + "operands": [ + {"type": "column", "column": "id", "table": "a"}, + {"type": "column", "column": "id", "table": "b"}, + ], + } + ] + } + } + } + + resp = self.__class__.client.post( + "/api/v0/advanced/search", + data=json.dumps(data), + HTTP_AUTHORIZATION="Token %s" % self.__class__.token, + content_type="application/json", + ) + + self.check_api_post( + "/api/v0/advanced/search", data=data, expected_result=[[1]] + ) + + def tearDown(self): + resp = self.__class__.client.delete( + "/api/v0/schema/{schema}/tables/{table}/".format( + schema=self.test_schema, table=self.test_table + ), + HTTP_AUTHORIZATION="Token %s" % self.__class__.token, + content_type="application/json", + ) + + # Check HTTP-response (200 = Successful request) + self.assertEqual( + resp.status_code, 200, resp.json().get("reason", "No reason returned") + ) diff --git a/versions/changelogs/current.md b/versions/changelogs/current.md index a601a060f..830f0834e 100644 --- a/versions/changelogs/current.md +++ b/versions/changelogs/current.md @@ -6,4 +6,5 @@ ### Bugs * API: Fixed negation in where clauses -* API: Fixed metadata tooltips \ No newline at end of file +* API: Fixed metadata tooltips +* API: Fixed alias handling (#482) \ No newline at end of file
googleapis__google-api-python-client-1221
Published package is missing discovery files in discovery_cache Many thanks to the @wyk9787 for noticing this and reaching out. All calls to `discovery.build()` using `2.0.0` fail with "unknown api name or version". ```python from googleapiclient import discovery client = discovery.build("cloudprofiler", "v2") ``` This is because the published package has no `discovery_cache/documents` directory. 1. `python3 -m venv env` 2. `source env/bin/activate` 3. `python3 -m pip install google-api-python-client` 4. `ls env/lib/python*/site-packages/googleapiclient/discovery_cache` ``` busunkim@busunkim:~/github$ ls env/lib/python*/site-packages/googleapiclient/discovery_cache appengine_memcache.py base.py file_cache.py __init__.py __pycache__ ```
[ { "content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (3, 6):\n print(\"google-api-python-client requires python3 version >= 3.6.\", file=sys.stderr)\n sys.exit(1)\n\nimport io\nimport os\nfrom setuptools import setup\n\npackages = [\"apiclient\", \"googleapiclient\", \"googleapiclient/discovery_cache\"]\n\ninstall_requires = [\n \"httplib2>=0.15.0,<1dev\",\n \"google-auth>=1.16.0,<2dev\",\n \"google-auth-httplib2>=0.0.3\",\n \"google-api-core>=1.21.0,<2dev\",\n \"six>=1.13.0,<2dev\",\n \"uritemplate>=3.0.0,<4dev\",\n]\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.md\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = \"2.0.1\"\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=readme,\n long_description_content_type='text/markdown',\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n url=\"https://github.com/googleapis/google-api-python-client/\",\n install_requires=install_requires,\n python_requires=\">=3.6\",\n packages=packages,\n package_data={},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (3, 6):\n print(\"google-api-python-client requires python3 version >= 3.6.\", file=sys.stderr)\n sys.exit(1)\n\nimport io\nimport os\nfrom setuptools import setup\n\npackages = [\"apiclient\", \"googleapiclient\", \"googleapiclient/discovery_cache\"]\n\ninstall_requires = [\n \"httplib2>=0.15.0,<1dev\",\n \"google-auth>=1.16.0,<2dev\",\n \"google-auth-httplib2>=0.0.3\",\n \"google-api-core>=1.21.0,<2dev\",\n \"six>=1.13.0,<2dev\",\n \"uritemplate>=3.0.0,<4dev\",\n]\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.md\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = \"2.0.1\"\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=readme,\n long_description_content_type='text/markdown',\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n url=\"https://github.com/googleapis/google-api-python-client/\",\n install_requires=install_requires,\n python_requires=\">=3.6\",\n packages=packages,\n package_data={\"googleapiclient\": [\"discovery_cache/documents/*.json\"]},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 894018a1b1e..d3ef571a9a3 100644 --- a/setup.py +++ b/setup.py @@ -60,7 +60,7 @@ install_requires=install_requires, python_requires=">=3.6", packages=packages, - package_data={}, + package_data={"googleapiclient": ["discovery_cache/documents/*.json"]}, license="Apache 2.0", keywords="google api client", classifiers=[
buildbot__buildbot-3490
UnboundLocalError in mq/base.py on master shutdown Hello, We're using buildbot in multi-master mode and got this stacktrace on one of the master when shutting it down: ``` 2017-07-17 12:33:29+0000 [-] Waiting for 1 build(s) to finish 2017-07-17 12:33:29+0000 [-] Builder <Builder 'u'sql-monitor-bitbucket_scality_ring-monitor_ring_frequent-prod-frontend-0'' at 140555339856784> has 1 builds running 2017-07-17 12:33:29+0000 [-] Not shutting down, there are 1 builds running 2017-07-17 12:33:29+0000 [-] Trying shutdown sequence again 2017-07-17 12:33:30+0000 [-] <Build sql-monitor-bitbucket_scality_ring-monitor_ring_frequent-prod-frontend-0 number:32108L results:exception>: stopping build: Master Shutdown 5 2017-07-17 12:33:30+0000 [-] Unhandled error in Deferred: 2017-07-17 12:33:30+0000 [-] Unhandled Error Traceback (most recent call last): File "/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1299, in _inlineCallbacks result = g.send(result) File "/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/buildbot/process/botmaster.py", line 105, in cleanShutdown l.append(build.waitUntilFinished()) File "/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/buildbot/process/build.py", line 687, in waitUntilFinished lambda: self.finished) File "/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1445, in unwindGenerator return _inlineCallbacks(None, gen, Deferred()) — <exception caught here> — File "/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 1299, in _inlineCallbacks result = g.send(result) File "/root/bitbucket/scality/ring/venv/local/lib/python2.7/site-packages/buildbot/mq/base.py", line 40, in waitUntilEvent defer.returnValue(res) exceptions.UnboundLocalError: local variable 'res' referenced before assignment ``` Looking at the code at the end of `waitUntilEvent()`: ``` if not check: res = yield d yield buildCompleteConsumer.stopConsuming defer.returnValue(res) ``` If the check returned false, we try to return a value (`res`) that was never defined.
[ { "content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom twisted.internet import defer\nfrom twisted.python import failure\nfrom twisted.python import log\n\nfrom buildbot.util import service\n\n\nclass MQBase(service.AsyncService):\n name = 'mq-implementation'\n\n @defer.inlineCallbacks\n def waitUntilEvent(self, filter, check_callback):\n d = defer.Deferred()\n buildCompleteConsumer = yield self.startConsuming(\n lambda key, value: d.callback((key, value)),\n filter)\n check = yield check_callback()\n # we only wait if the check callback return true\n if not check:\n res = yield d\n yield buildCompleteConsumer.stopConsuming\n defer.returnValue(res)\n\n\nclass QueueRef(object):\n\n __slots__ = ['callback']\n\n def __init__(self, callback):\n self.callback = callback\n\n def invoke(self, routing_key, data):\n if not self.callback:\n return\n\n try:\n x = self.callback(routing_key, data)\n except Exception:\n log.err(failure.Failure(), 'while invoking %r' % (self.callback,))\n return\n if isinstance(x, defer.Deferred):\n x.addErrback(log.err, 'while invoking %r' % (self.callback,))\n\n def stopConsuming(self):\n # subclasses should set self.callback to None in this method\n raise NotImplementedError\n", "path": "master/buildbot/mq/base.py" } ]
[ { "content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom twisted.internet import defer\nfrom twisted.python import failure\nfrom twisted.python import log\n\nfrom buildbot.util import service\n\n\nclass MQBase(service.AsyncService):\n name = 'mq-implementation'\n\n @defer.inlineCallbacks\n def waitUntilEvent(self, filter, check_callback):\n d = defer.Deferred()\n buildCompleteConsumer = yield self.startConsuming(\n lambda key, value: d.callback((key, value)),\n filter)\n check = yield check_callback()\n # we only wait if the check callback return true\n if not check:\n res = yield d\n else:\n res = None\n yield buildCompleteConsumer.stopConsuming()\n defer.returnValue(res)\n\n\nclass QueueRef(object):\n\n __slots__ = ['callback']\n\n def __init__(self, callback):\n self.callback = callback\n\n def invoke(self, routing_key, data):\n if not self.callback:\n return\n\n try:\n x = self.callback(routing_key, data)\n except Exception:\n log.err(failure.Failure(), 'while invoking %r' % (self.callback,))\n return\n if isinstance(x, defer.Deferred):\n x.addErrback(log.err, 'while invoking %r' % (self.callback,))\n\n def stopConsuming(self):\n # subclasses should set self.callback to None in this method\n raise NotImplementedError\n", "path": "master/buildbot/mq/base.py" } ]
diff --git a/master/buildbot/mq/base.py b/master/buildbot/mq/base.py index 2379dead364c..b27f462492a4 100644 --- a/master/buildbot/mq/base.py +++ b/master/buildbot/mq/base.py @@ -36,7 +36,9 @@ def waitUntilEvent(self, filter, check_callback): # we only wait if the check callback return true if not check: res = yield d - yield buildCompleteConsumer.stopConsuming + else: + res = None + yield buildCompleteConsumer.stopConsuming() defer.returnValue(res) diff --git a/master/buildbot/newsfragments/3478.bugfix b/master/buildbot/newsfragments/3478.bugfix new file mode 100644 index 000000000000..5b79c0e6da39 --- /dev/null +++ b/master/buildbot/newsfragments/3478.bugfix @@ -0,0 +1 @@ +Fix exception when shutting down a master (:issue:`3478`)
liberapay__liberapay.com-195
Twitter API chokes on at-sign https://liberapay.com/on/twitter/@korben/ returns a 500. sentry#35, public link: https://sentry.changaco.oy.lc/share/issue/322e3335/.
[ { "content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom datetime import timedelta\nimport json\nimport uuid\nimport xml.etree.ElementTree as ET\n\nfrom six.moves.urllib.parse import urlsplit, urlunsplit\n\nfrom aspen import Response\nfrom aspen.utils import utcnow\nfrom postgres.orm import Model\nfrom psycopg2 import IntegrityError\nimport xmltodict\n\nfrom liberapay.constants import AVATAR_QUERY\nfrom liberapay.security.crypto import constant_time_compare\nfrom liberapay.website import website\n\n\nCONNECT_TOKEN_TIMEOUT = timedelta(hours=24)\n\n\nclass UnknownAccountElsewhere(Exception): pass\n\n\nclass AccountElsewhere(Model):\n\n typname = \"elsewhere_with_participant\"\n\n def __init__(self, *args, **kwargs):\n super(AccountElsewhere, self).__init__(*args, **kwargs)\n self.platform_data = getattr(website.platforms, self.platform)\n\n\n # Constructors\n # ============\n\n @classmethod\n def from_id(cls, id):\n \"\"\"Return an existing AccountElsewhere based on id.\n \"\"\"\n return cls.db.one(\"\"\"\n SELECT elsewhere.*::elsewhere_with_participant\n FROM elsewhere\n WHERE id = %s\n \"\"\", (id,))\n\n @classmethod\n def from_user_id(cls, platform, user_id):\n \"\"\"Return an existing AccountElsewhere based on platform and user_id.\n \"\"\"\n return cls._from_thing('user_id', platform, user_id)\n\n @classmethod\n def from_user_name(cls, platform, user_name):\n \"\"\"Return an existing AccountElsewhere based on platform and user_name.\n \"\"\"\n return cls._from_thing('user_name', platform, user_name)\n\n @classmethod\n def _from_thing(cls, thing, platform, value):\n assert thing in ('user_id', 'user_name')\n if thing == 'user_name':\n thing = 'lower(user_name)'\n value = value.lower()\n exception = UnknownAccountElsewhere(thing, platform, value)\n return cls.db.one(\"\"\"\n\n SELECT elsewhere.*::elsewhere_with_participant\n FROM elsewhere\n WHERE platform = %s\n AND {} = %s\n\n \"\"\".format(thing), (platform, value), default=exception)\n\n @classmethod\n def get_many(cls, platform, user_infos):\n accounts = []\n found = cls.db.all(\"\"\"\\\n\n SELECT elsewhere.*::elsewhere_with_participant\n FROM elsewhere\n WHERE platform = %s\n AND user_id = any(%s)\n\n \"\"\", (platform, [i.user_id for i in user_infos]))\n found = {a.user_id: a for a in found}\n for i in user_infos:\n if i.user_id in found:\n accounts.append(found[i.user_id])\n else:\n accounts.append(cls.upsert(i))\n return accounts\n\n @classmethod\n def upsert(cls, i):\n \"\"\"Insert or update a user's info.\n \"\"\"\n\n # Clean up avatar_url\n if i.avatar_url:\n scheme, netloc, path, query, fragment = urlsplit(i.avatar_url)\n fragment = ''\n if netloc.endswith('githubusercontent.com') or \\\n netloc.endswith('gravatar.com'):\n query = AVATAR_QUERY\n i.avatar_url = urlunsplit((scheme, netloc, path, query, fragment))\n\n # Serialize extra_info\n if isinstance(i.extra_info, ET.Element):\n i.extra_info = xmltodict.parse(ET.tostring(i.extra_info))\n i.extra_info = json.dumps(i.extra_info)\n\n cols, vals = zip(*i.__dict__.items())\n cols = ', '.join(cols)\n placeholders = ', '.join(['%s']*len(vals))\n\n try:\n # Try to insert the account\n # We do this with a transaction so that if the insert fails, the\n # participant we reserved for them is rolled back as well.\n with cls.db.get_cursor() as cursor:\n id = cursor.one(\"\"\"\n INSERT INTO participants DEFAULT VALUES RETURNING id\n \"\"\")\n account = cursor.one(\"\"\"\n INSERT INTO elsewhere\n (participant, {0})\n VALUES (%s, {1})\n RETURNING elsewhere.*::elsewhere_with_participant\n \"\"\".format(cols, placeholders), (id,)+vals)\n except IntegrityError:\n # The account is already in the DB, update it instead\n account = cls.db.one(\"\"\"\n UPDATE elsewhere\n SET ({0}) = ({1})\n WHERE platform=%s AND user_id=%s\n RETURNING elsewhere.*::elsewhere_with_participant\n \"\"\".format(cols, placeholders), vals+(i.platform, i.user_id))\n if not account:\n raise\n\n # Return account after propagating avatar_url to participant\n account.participant.update_avatar()\n return account\n\n\n # Connect tokens\n # ==============\n\n def check_connect_token(self, token):\n return (\n self.connect_token and\n constant_time_compare(self.connect_token, token) and\n self.connect_expires > utcnow()\n )\n\n def make_connect_token(self):\n token = uuid.uuid4().hex\n expires = utcnow() + CONNECT_TOKEN_TIMEOUT\n return self.save_connect_token(token, expires)\n\n def save_connect_token(self, token, expires):\n return self.db.one(\"\"\"\n UPDATE elsewhere\n SET connect_token = %s\n , connect_expires = %s\n WHERE id = %s\n RETURNING connect_token, connect_expires\n \"\"\", (token, expires, self.id))\n\n\n # Random Stuff\n # ============\n\n def get_auth_session(self):\n if not self.token:\n return\n params = dict(token=self.token)\n if 'refresh_token' in self.token:\n params['token_updater'] = self.save_token\n return self.platform_data.get_auth_session(**params)\n\n @property\n def liberapay_slug(self):\n return self.user_name or ('~' + self.user_id)\n\n @property\n def liberapay_url(self):\n scheme = website.canonical_scheme\n host = website.canonical_host\n platform = self.platform\n slug = self.liberapay_slug\n return \"{scheme}://{host}/on/{platform}/{slug}/\".format(**locals())\n\n @property\n def html_url(self):\n return self.platform_data.account_url.format(\n user_id=self.user_id,\n user_name=self.user_name,\n platform_data=self.platform_data\n )\n\n @property\n def friendly_name(self):\n if getattr(self.platform, 'optional_user_name', False):\n return self.display_name or self.user_name or self.user_id\n else:\n return self.user_name or self.display_name or self.user_id\n\n @property\n def friendly_name_long(self):\n r = self.friendly_name\n display_name = self.display_name\n if display_name and display_name != r:\n return '%s (%s)' % (r, display_name)\n user_name = self.user_name\n if user_name and user_name != r:\n return '%s (%s)' % (r, user_name)\n return r\n\n def save_token(self, token):\n \"\"\"Saves the given access token in the database.\n \"\"\"\n self.db.run(\"\"\"\n UPDATE elsewhere\n SET token = %s\n WHERE id=%s\n \"\"\", (token, self.id))\n self.set_attributes(token=token)\n\n\ndef get_account_elsewhere(website, state, api_lookup=True):\n path = state['request'].line.uri.path\n platform = getattr(website.platforms, path['platform'], None)\n if platform is None:\n raise Response(404)\n uid = path['user_name']\n if uid[:1] == '~':\n key = 'user_id'\n uid = uid[1:]\n else:\n key = 'user_name'\n try:\n account = AccountElsewhere._from_thing(key, platform.name, uid)\n except UnknownAccountElsewhere:\n account = None\n if not account:\n if not api_lookup:\n raise Response(404)\n try:\n user_info = platform.get_user_info(key, uid)\n except Response as r:\n if r.code == 404:\n _ = state['_']\n err = _(\"There doesn't seem to be a user named {0} on {1}.\",\n uid, platform.display_name)\n raise Response(404, err)\n raise\n account = AccountElsewhere.upsert(user_info)\n return platform, account\n", "path": "liberapay/models/account_elsewhere.py" } ]
[ { "content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom datetime import timedelta\nimport json\nimport uuid\nimport xml.etree.ElementTree as ET\n\nfrom six.moves.urllib.parse import urlsplit, urlunsplit\n\nfrom aspen import Response\nfrom aspen.utils import utcnow\nfrom postgres.orm import Model\nfrom psycopg2 import IntegrityError\nimport xmltodict\n\nfrom liberapay.constants import AVATAR_QUERY\nfrom liberapay.security.crypto import constant_time_compare\nfrom liberapay.website import website\n\n\nCONNECT_TOKEN_TIMEOUT = timedelta(hours=24)\n\n\nclass UnknownAccountElsewhere(Exception): pass\n\n\nclass AccountElsewhere(Model):\n\n typname = \"elsewhere_with_participant\"\n\n def __init__(self, *args, **kwargs):\n super(AccountElsewhere, self).__init__(*args, **kwargs)\n self.platform_data = getattr(website.platforms, self.platform)\n\n\n # Constructors\n # ============\n\n @classmethod\n def from_id(cls, id):\n \"\"\"Return an existing AccountElsewhere based on id.\n \"\"\"\n return cls.db.one(\"\"\"\n SELECT elsewhere.*::elsewhere_with_participant\n FROM elsewhere\n WHERE id = %s\n \"\"\", (id,))\n\n @classmethod\n def from_user_id(cls, platform, user_id):\n \"\"\"Return an existing AccountElsewhere based on platform and user_id.\n \"\"\"\n return cls._from_thing('user_id', platform, user_id)\n\n @classmethod\n def from_user_name(cls, platform, user_name):\n \"\"\"Return an existing AccountElsewhere based on platform and user_name.\n \"\"\"\n return cls._from_thing('user_name', platform, user_name)\n\n @classmethod\n def _from_thing(cls, thing, platform, value):\n assert thing in ('user_id', 'user_name')\n if thing == 'user_name':\n thing = 'lower(user_name)'\n value = value.lower()\n exception = UnknownAccountElsewhere(thing, platform, value)\n return cls.db.one(\"\"\"\n\n SELECT elsewhere.*::elsewhere_with_participant\n FROM elsewhere\n WHERE platform = %s\n AND {} = %s\n\n \"\"\".format(thing), (platform, value), default=exception)\n\n @classmethod\n def get_many(cls, platform, user_infos):\n accounts = []\n found = cls.db.all(\"\"\"\\\n\n SELECT elsewhere.*::elsewhere_with_participant\n FROM elsewhere\n WHERE platform = %s\n AND user_id = any(%s)\n\n \"\"\", (platform, [i.user_id for i in user_infos]))\n found = {a.user_id: a for a in found}\n for i in user_infos:\n if i.user_id in found:\n accounts.append(found[i.user_id])\n else:\n accounts.append(cls.upsert(i))\n return accounts\n\n @classmethod\n def upsert(cls, i):\n \"\"\"Insert or update a user's info.\n \"\"\"\n\n # Clean up avatar_url\n if i.avatar_url:\n scheme, netloc, path, query, fragment = urlsplit(i.avatar_url)\n fragment = ''\n if netloc.endswith('githubusercontent.com') or \\\n netloc.endswith('gravatar.com'):\n query = AVATAR_QUERY\n i.avatar_url = urlunsplit((scheme, netloc, path, query, fragment))\n\n # Serialize extra_info\n if isinstance(i.extra_info, ET.Element):\n i.extra_info = xmltodict.parse(ET.tostring(i.extra_info))\n i.extra_info = json.dumps(i.extra_info)\n\n cols, vals = zip(*i.__dict__.items())\n cols = ', '.join(cols)\n placeholders = ', '.join(['%s']*len(vals))\n\n try:\n # Try to insert the account\n # We do this with a transaction so that if the insert fails, the\n # participant we reserved for them is rolled back as well.\n with cls.db.get_cursor() as cursor:\n id = cursor.one(\"\"\"\n INSERT INTO participants DEFAULT VALUES RETURNING id\n \"\"\")\n account = cursor.one(\"\"\"\n INSERT INTO elsewhere\n (participant, {0})\n VALUES (%s, {1})\n RETURNING elsewhere.*::elsewhere_with_participant\n \"\"\".format(cols, placeholders), (id,)+vals)\n except IntegrityError:\n # The account is already in the DB, update it instead\n account = cls.db.one(\"\"\"\n UPDATE elsewhere\n SET ({0}) = ({1})\n WHERE platform=%s AND user_id=%s\n RETURNING elsewhere.*::elsewhere_with_participant\n \"\"\".format(cols, placeholders), vals+(i.platform, i.user_id))\n if not account:\n raise\n\n # Return account after propagating avatar_url to participant\n account.participant.update_avatar()\n return account\n\n\n # Connect tokens\n # ==============\n\n def check_connect_token(self, token):\n return (\n self.connect_token and\n constant_time_compare(self.connect_token, token) and\n self.connect_expires > utcnow()\n )\n\n def make_connect_token(self):\n token = uuid.uuid4().hex\n expires = utcnow() + CONNECT_TOKEN_TIMEOUT\n return self.save_connect_token(token, expires)\n\n def save_connect_token(self, token, expires):\n return self.db.one(\"\"\"\n UPDATE elsewhere\n SET connect_token = %s\n , connect_expires = %s\n WHERE id = %s\n RETURNING connect_token, connect_expires\n \"\"\", (token, expires, self.id))\n\n\n # Random Stuff\n # ============\n\n def get_auth_session(self):\n if not self.token:\n return\n params = dict(token=self.token)\n if 'refresh_token' in self.token:\n params['token_updater'] = self.save_token\n return self.platform_data.get_auth_session(**params)\n\n @property\n def liberapay_slug(self):\n return self.user_name or ('~' + self.user_id)\n\n @property\n def liberapay_url(self):\n scheme = website.canonical_scheme\n host = website.canonical_host\n platform = self.platform\n slug = self.liberapay_slug\n return \"{scheme}://{host}/on/{platform}/{slug}/\".format(**locals())\n\n @property\n def html_url(self):\n return self.platform_data.account_url.format(\n user_id=self.user_id,\n user_name=self.user_name,\n platform_data=self.platform_data\n )\n\n @property\n def friendly_name(self):\n if getattr(self.platform, 'optional_user_name', False):\n return self.display_name or self.user_name or self.user_id\n else:\n return self.user_name or self.display_name or self.user_id\n\n @property\n def friendly_name_long(self):\n r = self.friendly_name\n display_name = self.display_name\n if display_name and display_name != r:\n return '%s (%s)' % (r, display_name)\n user_name = self.user_name\n if user_name and user_name != r:\n return '%s (%s)' % (r, user_name)\n return r\n\n def save_token(self, token):\n \"\"\"Saves the given access token in the database.\n \"\"\"\n self.db.run(\"\"\"\n UPDATE elsewhere\n SET token = %s\n WHERE id=%s\n \"\"\", (token, self.id))\n self.set_attributes(token=token)\n\n\ndef get_account_elsewhere(website, state, api_lookup=True):\n path = state['request'].line.uri.path\n platform = getattr(website.platforms, path['platform'], None)\n if platform is None:\n raise Response(404)\n uid = path['user_name']\n if uid[:1] == '~':\n key = 'user_id'\n uid = uid[1:]\n else:\n key = 'user_name'\n if uid[:1] == '@':\n uid = uid[1:]\n try:\n account = AccountElsewhere._from_thing(key, platform.name, uid)\n except UnknownAccountElsewhere:\n account = None\n if not account:\n if not api_lookup:\n raise Response(404)\n try:\n user_info = platform.get_user_info(key, uid)\n except Response as r:\n if r.code == 404:\n _ = state['_']\n err = _(\"There doesn't seem to be a user named {0} on {1}.\",\n uid, platform.display_name)\n raise Response(404, err)\n raise\n account = AccountElsewhere.upsert(user_info)\n return platform, account\n", "path": "liberapay/models/account_elsewhere.py" } ]
diff --git a/liberapay/models/account_elsewhere.py b/liberapay/models/account_elsewhere.py index cc3f674596..81361dfb4c 100644 --- a/liberapay/models/account_elsewhere.py +++ b/liberapay/models/account_elsewhere.py @@ -242,6 +242,8 @@ def get_account_elsewhere(website, state, api_lookup=True): uid = uid[1:] else: key = 'user_name' + if uid[:1] == '@': + uid = uid[1:] try: account = AccountElsewhere._from_thing(key, platform.name, uid) except UnknownAccountElsewhere: diff --git a/tests/py/test_elsewhere.py b/tests/py/test_elsewhere.py index c156dcc191..dfc79c1c63 100644 --- a/tests/py/test_elsewhere.py +++ b/tests/py/test_elsewhere.py @@ -116,6 +116,19 @@ def test_user_page_shows_pledges(self, get_user_info): r = self.client.GET('/on/github/alice/') assert str(amount) in r.body, r.body.decode('utf8') + @mock.patch('liberapay.elsewhere._base.Platform.get_user_info') + def test_user_page_doesnt_fail_on_at_sign(self, get_user_info): + def f(k, v, *a): + if (k, v) == ('user_name', 'alice'): + return UserInfo( + platform='twitter', user_id='0', user_name='alice', + is_team=False + ) + raise Exception + get_user_info.side_effect = f + response = self.client.GET('/on/twitter/@alice/') + assert response.code == 200 + def test_user_pages_not_found(self): user_name = 'adhsjakdjsdkjsajdhksda' error = "There doesn't seem to be a user named %s on %s."
tornadoweb__tornado-3167
Tornado 6.2 release readiness I'm creating this issue to collect feedback on the 6.2 betas. For the folks who have tried them, do you think the release is ready to go or are there still more changes to be made? Tagging @minrk and @graingert as authors of relevant PRs, although I'd welcome feedback from anyone interested in this release.
[ { "content": "#\n# Copyright 2009 Facebook\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"The Tornado web server and tools.\"\"\"\n\n# version is a human-readable version number.\n\n# version_info is a four-tuple for programmatic comparison. The first\n# three numbers are the components of the version number. The fourth\n# is zero for an official release, positive for a development branch,\n# or negative for a release candidate or beta (after the base version\n# number has been incremented)\nversion = \"6.2b2\"\nversion_info = (6, 2, 0, -98)\n", "path": "tornado/__init__.py" } ]
[ { "content": "#\n# Copyright 2009 Facebook\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"The Tornado web server and tools.\"\"\"\n\n# version is a human-readable version number.\n\n# version_info is a four-tuple for programmatic comparison. The first\n# three numbers are the components of the version number. The fourth\n# is zero for an official release, positive for a development branch,\n# or negative for a release candidate or beta (after the base version\n# number has been incremented)\nversion = \"6.2\"\nversion_info = (6, 2, 0, 0)\n", "path": "tornado/__init__.py" } ]
diff --git a/docs/releases/v6.2.0.rst b/docs/releases/v6.2.0.rst index a2277b9267..57b76ecb14 100644 --- a/docs/releases/v6.2.0.rst +++ b/docs/releases/v6.2.0.rst @@ -1,8 +1,8 @@ What's new in Tornado 6.2.0 =========================== -Jun XX, 2022 ------------- +Jul 3, 2022 +----------- Deprecation notice ~~~~~~~~~~~~~~~~~~ @@ -75,6 +75,8 @@ General changes has been unnecessary since Python 3.2 added a logger of last resort. - The `.IOLoop` constructor now accepts an ``asyncio_loop`` keyword argument to initialize with a specfied asyncio event loop. +- It is now possible to construct an `.IOLoop` on one thread (with + ``make_current=False``) and start it on a different thread. `tornado.iostream` ~~~~~~~~~~~~~~~~~~ diff --git a/tornado/__init__.py b/tornado/__init__.py index 43fe83cb3d..39d7c44bf4 100644 --- a/tornado/__init__.py +++ b/tornado/__init__.py @@ -22,5 +22,5 @@ # is zero for an official release, positive for a development branch, # or negative for a release candidate or beta (after the base version # number has been incremented) -version = "6.2b2" -version_info = (6, 2, 0, -98) +version = "6.2" +version_info = (6, 2, 0, 0)
mkdocs__mkdocs-708
Remove unicode_literals import in the CLI code Looks like this can cause some issues with Click, I've not seen any but we should probably remove it anyway or we will start to get warnings from Click 5.0. https://github.com/mitsuhiko/click/commit/5f337705f68bdfa66d7c7a9fe7fc5d6bfd48db94
[ { "content": "#!/usr/bin/env python\n# coding: utf-8\n\nfrom __future__ import unicode_literals\nimport logging\nimport click\nimport socket\n\nfrom mkdocs import __version__\nfrom mkdocs import utils\nfrom mkdocs import exceptions\nfrom mkdocs.config import load_config\nfrom mkdocs.commands import build, gh_deploy, new, serve\n\nlog = logging.getLogger(__name__)\n\n\nclass State(object):\n ''' Maintain logging level.'''\n\n def __init__(self, log_name='mkdocs', level=logging.INFO):\n self.logger = logging.getLogger(log_name)\n self.logger.propagate = False\n stream = logging.StreamHandler()\n formatter = logging.Formatter(\"%(levelname)-7s - %(message)s \")\n stream.setFormatter(formatter)\n self.logger.addHandler(stream)\n\n self.logger.setLevel(level)\n\n\npass_state = click.make_pass_decorator(State, ensure=True)\n\n\ndef verbose_option(f):\n def callback(ctx, param, value):\n state = ctx.ensure_object(State)\n if value:\n state.logger.setLevel(logging.DEBUG)\n return click.option('-v', '--verbose',\n is_flag=True,\n expose_value=False,\n help='Enable verbose output',\n callback=callback)(f)\n\n\ndef quiet_option(f):\n def callback(ctx, param, value):\n state = ctx.ensure_object(State)\n if value:\n state.logger.setLevel(logging.ERROR)\n return click.option('-q', '--quiet',\n is_flag=True,\n expose_value=False,\n help='Silence warnings',\n callback=callback)(f)\n\n\ndef common_options(f):\n f = verbose_option(f)\n f = quiet_option(f)\n return f\n\n\nclean_help = \"Remove old files from the site_dir before building\"\nconfig_help = \"Provide a specific MkDocs config\"\ndev_addr_help = (\"IP address and port to serve documentation locally (default: \"\n \"localhost:8000)\")\nstrict_help = (\"Enable strict mode. This will cause MkDocs to abort the build \"\n \"on any warnings.\")\ntheme_help = \"The theme to use when building your documentation.\"\ntheme_choices = utils.get_theme_names()\nsite_dir_help = \"The directory to output the result of the documentation build.\"\nreload_help = \"Enable and disable the live reloading in the development server.\"\ncommit_message_help = (\"A commit message to use when commiting to the \"\n \"Github Pages remote branch\")\nremote_branch_help = (\"The remote branch to commit to for Github Pages. This \"\n \"overrides the value specified in config\")\n\n\[email protected](context_settings={'help_option_names': ['-h', '--help']})\[email protected]_option(__version__, '-V', '--version')\n@common_options\ndef cli():\n \"\"\"\n MkDocs - Project documentation with Markdown.\n \"\"\"\n\n\[email protected](name=\"serve\")\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_help)\[email protected]('-a', '--dev-addr', help=dev_addr_help, metavar='<IP:PORT>')\[email protected]('-s', '--strict', is_flag=True, help=strict_help)\[email protected]('-t', '--theme', type=click.Choice(theme_choices), help=theme_help)\[email protected]('--livereload/--no-livereload', default=True, help=reload_help)\n@common_options\ndef serve_command(dev_addr, config_file, strict, theme, livereload):\n \"\"\"Run the builtin development server\"\"\"\n\n logging.getLogger('tornado').setLevel(logging.WARNING)\n\n try:\n serve.serve(\n config_file=config_file,\n dev_addr=dev_addr,\n strict=strict,\n theme=theme,\n livereload=livereload,\n )\n except (exceptions.ConfigurationError, socket.error) as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"build\")\[email protected]('-c', '--clean', is_flag=True, help=clean_help)\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_help)\[email protected]('-s', '--strict', is_flag=True, help=strict_help)\[email protected]('-t', '--theme', type=click.Choice(theme_choices), help=theme_help)\[email protected]('-d', '--site-dir', type=click.Path(), help=site_dir_help)\n@common_options\ndef build_command(clean, config_file, strict, theme, site_dir):\n \"\"\"Build the MkDocs documentation\"\"\"\n try:\n build.build(load_config(\n config_file=config_file,\n strict=strict,\n theme=theme,\n site_dir=site_dir\n ), clean_site_dir=clean)\n except exceptions.ConfigurationError as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"json\")\[email protected]('-c', '--clean', is_flag=True, help=clean_help)\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_help)\[email protected]('-s', '--strict', is_flag=True, help=strict_help)\[email protected]('-d', '--site-dir', type=click.Path(), help=site_dir_help)\n@common_options\ndef json_command(clean, config_file, strict, site_dir):\n \"\"\"Build the MkDocs documentation to JSON files\n\n Rather than building your documentation to HTML pages, this\n outputs each page in a simple JSON format. This command is\n useful if you want to index your documentation in an external\n search engine.\n \"\"\"\n\n log.warning(\"The json command is deprecated and will be removed in a \"\n \"future MkDocs release. For details on updating: \"\n \"http://www.mkdocs.org/about/release-notes/\")\n\n try:\n build.build(load_config(\n config_file=config_file,\n strict=strict,\n site_dir=site_dir\n ), dump_json=True, clean_site_dir=clean)\n except exceptions.ConfigurationError as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"gh-deploy\")\[email protected]('-c', '--clean', is_flag=True, help=clean_help)\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_help)\[email protected]('-m', '--message', help=commit_message_help)\[email protected]('-b', '--remote-branch', help=remote_branch_help)\[email protected]('-r', '--remote-name', help=remote_branch_help)\n@common_options\ndef gh_deploy_command(config_file, clean, message, remote_branch, remote_name):\n \"\"\"Deploy your documentation to GitHub Pages\"\"\"\n try:\n config = load_config(\n config_file=config_file,\n remote_branch=remote_branch,\n remote_name=remote_name\n )\n build.build(config, clean_site_dir=clean)\n gh_deploy.gh_deploy(config, message=message)\n except exceptions.ConfigurationError as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"new\")\[email protected](\"project_directory\")\n@common_options\ndef new_command(project_directory):\n \"\"\"Create a new MkDocs project\"\"\"\n new.new(project_directory)\n\nif __name__ == '__main__':\n cli()\n", "path": "mkdocs/__main__.py" } ]
[ { "content": "#!/usr/bin/env python\n# coding: utf-8\n\nfrom __future__ import unicode_literals\nimport logging\nimport click\nimport socket\n\nfrom mkdocs import __version__\nfrom mkdocs import utils\nfrom mkdocs import exceptions\nfrom mkdocs.config import load_config\nfrom mkdocs.commands import build, gh_deploy, new, serve\n\nlog = logging.getLogger(__name__)\n\n# Disable the warning that Click displays (as of Click version 5.0) when users\n# use unicode_literals in Python 2.\n# See http://click.pocoo.org/dev/python3/#unicode-literals for more details.\nclick.disable_unicode_literals_warning = True\n\n\nclass State(object):\n ''' Maintain logging level.'''\n\n def __init__(self, log_name='mkdocs', level=logging.INFO):\n self.logger = logging.getLogger(log_name)\n self.logger.propagate = False\n stream = logging.StreamHandler()\n formatter = logging.Formatter(\"%(levelname)-7s - %(message)s \")\n stream.setFormatter(formatter)\n self.logger.addHandler(stream)\n\n self.logger.setLevel(level)\n\n\npass_state = click.make_pass_decorator(State, ensure=True)\n\n\ndef verbose_option(f):\n def callback(ctx, param, value):\n state = ctx.ensure_object(State)\n if value:\n state.logger.setLevel(logging.DEBUG)\n return click.option('-v', '--verbose',\n is_flag=True,\n expose_value=False,\n help='Enable verbose output',\n callback=callback)(f)\n\n\ndef quiet_option(f):\n def callback(ctx, param, value):\n state = ctx.ensure_object(State)\n if value:\n state.logger.setLevel(logging.ERROR)\n return click.option('-q', '--quiet',\n is_flag=True,\n expose_value=False,\n help='Silence warnings',\n callback=callback)(f)\n\n\ndef common_options(f):\n f = verbose_option(f)\n f = quiet_option(f)\n return f\n\n\nclean_help = \"Remove old files from the site_dir before building\"\nconfig_help = \"Provide a specific MkDocs config\"\ndev_addr_help = (\"IP address and port to serve documentation locally (default: \"\n \"localhost:8000)\")\nstrict_help = (\"Enable strict mode. This will cause MkDocs to abort the build \"\n \"on any warnings.\")\ntheme_help = \"The theme to use when building your documentation.\"\ntheme_choices = utils.get_theme_names()\nsite_dir_help = \"The directory to output the result of the documentation build.\"\nreload_help = \"Enable and disable the live reloading in the development server.\"\ncommit_message_help = (\"A commit message to use when commiting to the \"\n \"Github Pages remote branch\")\nremote_branch_help = (\"The remote branch to commit to for Github Pages. This \"\n \"overrides the value specified in config\")\n\n\[email protected](context_settings={'help_option_names': ['-h', '--help']})\[email protected]_option(__version__, '-V', '--version')\n@common_options\ndef cli():\n \"\"\"\n MkDocs - Project documentation with Markdown.\n \"\"\"\n\n\[email protected](name=\"serve\")\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_help)\[email protected]('-a', '--dev-addr', help=dev_addr_help, metavar='<IP:PORT>')\[email protected]('-s', '--strict', is_flag=True, help=strict_help)\[email protected]('-t', '--theme', type=click.Choice(theme_choices), help=theme_help)\[email protected]('--livereload/--no-livereload', default=True, help=reload_help)\n@common_options\ndef serve_command(dev_addr, config_file, strict, theme, livereload):\n \"\"\"Run the builtin development server\"\"\"\n\n logging.getLogger('tornado').setLevel(logging.WARNING)\n\n try:\n serve.serve(\n config_file=config_file,\n dev_addr=dev_addr,\n strict=strict,\n theme=theme,\n livereload=livereload,\n )\n except (exceptions.ConfigurationError, socket.error) as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"build\")\[email protected]('-c', '--clean', is_flag=True, help=clean_help)\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_help)\[email protected]('-s', '--strict', is_flag=True, help=strict_help)\[email protected]('-t', '--theme', type=click.Choice(theme_choices), help=theme_help)\[email protected]('-d', '--site-dir', type=click.Path(), help=site_dir_help)\n@common_options\ndef build_command(clean, config_file, strict, theme, site_dir):\n \"\"\"Build the MkDocs documentation\"\"\"\n try:\n build.build(load_config(\n config_file=config_file,\n strict=strict,\n theme=theme,\n site_dir=site_dir\n ), clean_site_dir=clean)\n except exceptions.ConfigurationError as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"json\")\[email protected]('-c', '--clean', is_flag=True, help=clean_help)\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_help)\[email protected]('-s', '--strict', is_flag=True, help=strict_help)\[email protected]('-d', '--site-dir', type=click.Path(), help=site_dir_help)\n@common_options\ndef json_command(clean, config_file, strict, site_dir):\n \"\"\"Build the MkDocs documentation to JSON files\n\n Rather than building your documentation to HTML pages, this\n outputs each page in a simple JSON format. This command is\n useful if you want to index your documentation in an external\n search engine.\n \"\"\"\n\n log.warning(\"The json command is deprecated and will be removed in a \"\n \"future MkDocs release. For details on updating: \"\n \"http://www.mkdocs.org/about/release-notes/\")\n\n try:\n build.build(load_config(\n config_file=config_file,\n strict=strict,\n site_dir=site_dir\n ), dump_json=True, clean_site_dir=clean)\n except exceptions.ConfigurationError as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"gh-deploy\")\[email protected]('-c', '--clean', is_flag=True, help=clean_help)\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_help)\[email protected]('-m', '--message', help=commit_message_help)\[email protected]('-b', '--remote-branch', help=remote_branch_help)\[email protected]('-r', '--remote-name', help=remote_branch_help)\n@common_options\ndef gh_deploy_command(config_file, clean, message, remote_branch, remote_name):\n \"\"\"Deploy your documentation to GitHub Pages\"\"\"\n try:\n config = load_config(\n config_file=config_file,\n remote_branch=remote_branch,\n remote_name=remote_name\n )\n build.build(config, clean_site_dir=clean)\n gh_deploy.gh_deploy(config, message=message)\n except exceptions.ConfigurationError as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"new\")\[email protected](\"project_directory\")\n@common_options\ndef new_command(project_directory):\n \"\"\"Create a new MkDocs project\"\"\"\n new.new(project_directory)\n\nif __name__ == '__main__':\n cli()\n", "path": "mkdocs/__main__.py" } ]
diff --git a/mkdocs/__main__.py b/mkdocs/__main__.py index 6d76f97235..7f788141d2 100644 --- a/mkdocs/__main__.py +++ b/mkdocs/__main__.py @@ -14,6 +14,11 @@ log = logging.getLogger(__name__) +# Disable the warning that Click displays (as of Click version 5.0) when users +# use unicode_literals in Python 2. +# See http://click.pocoo.org/dev/python3/#unicode-literals for more details. +click.disable_unicode_literals_warning = True + class State(object): ''' Maintain logging level.'''
alltheplaces__alltheplaces-2638
Spider costco is broken During the global build at 2021-08-18-14-42-26, spider **costco** failed with **0 features** and **2 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/logs/costco.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/costco.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/costco.geojson))
[ { "content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\nfrom urllib.parse import urlencode\n\nfrom locations.items import GeojsonPointItem\n\nDAYS_NAME = {\n 'm': 'Mo',\n 'mon': 'Mo',\n 't': 'Tu',\n 'w': 'We',\n 's': 'Th',\n 'f': 'Fr',\n 'f ': 'Fr',\n 'sun': 'Su',\n 'sat': 'Sa',\n 'daily': '',\n}\n\n\nclass CostcoSpider(scrapy.Spider):\n name = \"costco\"\n item_attributes = {'brand': 'Costco', 'brand_wikidata': 'Q715583'}\n allowed_domains = ['www.costco.com']\n start_urls = (\n 'https://www.costco.com/warehouse-locations',\n )\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0',\n }\n\n download_delay = 0.5\n\n def parse(self, response):\n url = 'https://www.costco.com/AjaxWarehouseBrowseLookupView?'\n\n params = {\n \"langId\": \"-1\",\n # \"storeId\": \"10301\",\n \"numOfWarehouses\": \"50\", # max allowed\n \"hasGas\": \"false\",\n \"hasTires\": \"false\",\n \"hasFood\": \"false\",\n \"hasHearing\": \"false\",\n \"hasPharmacy\": \"false\",\n \"hasOptical\": \"false\",\n \"hasBusiness\": \"false\",\n \"hasPhotoCenter\": \"false\",\n \"tiresCheckout\": \"0\",\n \"isTransferWarehouse\": \"false\",\n \"populateWarehouseDetails\": \"true\",\n \"warehousePickupCheckout\": \"false\",\n \"countryCode\": \"US\",\n }\n\n with open('./locations/searchable_points/us_centroids_100mile_radius.csv') as points:\n next(points)\n for point in points:\n _, lat, lon = point.strip().split(',')\n params.update({\"latitude\": lat, \"longitude\": lon})\n yield scrapy.Request(url=url + urlencode(params), callback=self.parse_ajax)\n\n def store_hours(self, store_hours):\n opening_hours = []\n\n if not store_hours:\n return None\n\n for day_info in store_hours:\n if day_info.lower().find('close') > -1:\n continue\n\n match = re.match(\n r'^(\\w+)-?[\\.:]?([A-Za-z]*)\\.? *(\\d{1,2}):(\\d{2}) ?(am|pm|) *- +(\\d{1,2}):(\\d{2}) ?(am|pm|hrs\\.)$', day_info)\n if not match:\n self.logger.warn(\"Couldn't match hours: %s\", day_info)\n\n try:\n day_from, day_to, fr_hr, fr_min, fr_ampm, to_hr, to_min, to_ampm = match.groups()\n except ValueError:\n self.logger.warn(\"Couldn't match hours: %s\", day_info)\n raise\n\n day_from = DAYS_NAME[day_from.lower()]\n day_to = DAYS_NAME[day_to.lower()] if day_to else day_from\n\n if day_from != day_to:\n day_str = '{}-{}'.format(day_from, day_to)\n else:\n day_str = '{}'.format(day_from)\n\n day_hours = '%s %02d:%02d-%02d:%02d' % (\n day_str,\n int(fr_hr) + 12 if fr_ampm == 'pm' else int(fr_hr),\n int(fr_min),\n int(to_hr) + 12 if to_ampm == 'pm' else int(to_hr),\n int(to_min),\n )\n\n opening_hours.append(day_hours.strip())\n\n return '; '.join(opening_hours)\n\n def _clean_text(self, text):\n return re.sub(\"[\\r\\n\\t]\", \"\", text).strip()\n\n def parse_ajax(self, response):\n body = json.loads(response.body_as_unicode())\n\n for store in body[1:]:\n if store[\"distance\"] < 110:\n # only process stores that are within 110 miles of query point\n # (to reduce processing a ton of duplicates)\n ref = store['identifier']\n department = store['specialtyDepartments']\n\n fuels = {}\n if 'gasPrices' in store:\n fuels = {\n 'fuel:diesel': 'diesel' in store['gasPrices'],\n 'fuel:octane_87': 'regular' in store['gasPrices'],\n 'fuel:octane_91': 'premium' in store['gasPrices']\n }\n\n properties = {\n 'lat': store.get('latitude'),\n 'lon': store.get('longitude'),\n 'ref': ref,\n 'phone': self._clean_text(store.get('phone')),\n 'name': f\"Costco {store['locationName']}\",\n 'addr_full': store['address1'],\n 'city': store['city'],\n 'state': store['state'],\n 'postcode': store.get('zipCode'),\n 'country': store.get('country'),\n 'website': 'https://www.costco.com/warehouse-locations/store-{}.html'.format(ref),\n 'extras': {\n 'shop': 'supermarket',\n 'number': store[\"displayName\"],\n 'amenity:fuel': store['hasGasDepartment'],\n 'amenity:pharmacy': store['hasPharmacyDepartment'],\n 'atm': any('ATM' == d['name'] for d in department) or None,\n 'fuel:propane': any('Propane' == d['name'] for d in department) or None,\n **fuels\n }\n }\n\n hours = store.get('warehouseHours')\n if hours:\n try:\n properties[\"opening_hours\"] = self.store_hours(hours)\n except:\n pass\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/costco.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\nfrom urllib.parse import urlencode\n\nfrom locations.items import GeojsonPointItem\n\nDAYS_NAME = {\n 'm': 'Mo',\n 'mon': 'Mo',\n 't': 'Tu',\n 'w': 'We',\n 's': 'Th',\n 'f': 'Fr',\n 'f ': 'Fr',\n 'sun': 'Su',\n 'sat': 'Sa',\n 'daily': '',\n}\n\n\nclass CostcoSpider(scrapy.Spider):\n name = \"costco\"\n item_attributes = {'brand': 'Costco', 'brand_wikidata': 'Q715583'}\n allowed_domains = ['www.costco.com']\n start_urls = (\n 'https://www.costco.com/warehouse-locations',\n )\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36',\n }\n\n download_delay = 0.5\n\n def parse(self, response):\n url = 'https://www.costco.com/AjaxWarehouseBrowseLookupView?'\n\n params = {\n \"langId\": \"-1\",\n # \"storeId\": \"10301\",\n \"numOfWarehouses\": \"50\", # max allowed\n \"hasGas\": \"false\",\n \"hasTires\": \"false\",\n \"hasFood\": \"false\",\n \"hasHearing\": \"false\",\n \"hasPharmacy\": \"false\",\n \"hasOptical\": \"false\",\n \"hasBusiness\": \"false\",\n \"hasPhotoCenter\": \"false\",\n \"tiresCheckout\": \"0\",\n \"isTransferWarehouse\": \"false\",\n \"populateWarehouseDetails\": \"true\",\n \"warehousePickupCheckout\": \"false\",\n \"countryCode\": \"US\",\n }\n\n with open('./locations/searchable_points/us_centroids_100mile_radius.csv') as points:\n next(points)\n for point in points:\n _, lat, lon = point.strip().split(',')\n params.update({\"latitude\": lat, \"longitude\": lon})\n yield scrapy.Request(url=url + urlencode(params), callback=self.parse_ajax)\n\n def store_hours(self, store_hours):\n opening_hours = []\n\n if not store_hours:\n return None\n\n for day_info in store_hours:\n if day_info.lower().find('close') > -1:\n continue\n\n match = re.match(\n r'^(\\w+)-?[\\.:]?([A-Za-z]*)\\.? *(\\d{1,2}):(\\d{2}) ?(am|pm|) *- +(\\d{1,2}):(\\d{2}) ?(am|pm|hrs\\.)$', day_info)\n if not match:\n self.logger.warn(\"Couldn't match hours: %s\", day_info)\n\n try:\n day_from, day_to, fr_hr, fr_min, fr_ampm, to_hr, to_min, to_ampm = match.groups()\n except ValueError:\n self.logger.warn(\"Couldn't match hours: %s\", day_info)\n raise\n\n day_from = DAYS_NAME[day_from.lower()]\n day_to = DAYS_NAME[day_to.lower()] if day_to else day_from\n\n if day_from != day_to:\n day_str = '{}-{}'.format(day_from, day_to)\n else:\n day_str = '{}'.format(day_from)\n\n day_hours = '%s %02d:%02d-%02d:%02d' % (\n day_str,\n int(fr_hr) + 12 if fr_ampm == 'pm' else int(fr_hr),\n int(fr_min),\n int(to_hr) + 12 if to_ampm == 'pm' else int(to_hr),\n int(to_min),\n )\n\n opening_hours.append(day_hours.strip())\n\n return '; '.join(opening_hours)\n\n def _clean_text(self, text):\n return re.sub(\"[\\r\\n\\t]\", \"\", text).strip()\n\n def parse_ajax(self, response):\n body = json.loads(response.body_as_unicode())\n\n for store in body[1:]:\n if store[\"distance\"] < 110:\n # only process stores that are within 110 miles of query point\n # (to reduce processing a ton of duplicates)\n ref = store['identifier']\n department = store['specialtyDepartments']\n\n fuels = {}\n if 'gasPrices' in store:\n fuels = {\n 'fuel:diesel': 'diesel' in store['gasPrices'],\n 'fuel:octane_87': 'regular' in store['gasPrices'],\n 'fuel:octane_91': 'premium' in store['gasPrices']\n }\n\n properties = {\n 'lat': store.get('latitude'),\n 'lon': store.get('longitude'),\n 'ref': ref,\n 'phone': self._clean_text(store.get('phone')),\n 'name': f\"Costco {store['locationName']}\",\n 'addr_full': store['address1'],\n 'city': store['city'],\n 'state': store['state'],\n 'postcode': store.get('zipCode'),\n 'country': store.get('country'),\n 'website': 'https://www.costco.com/warehouse-locations/store-{}.html'.format(ref),\n 'extras': {\n 'shop': 'supermarket',\n 'number': store[\"displayName\"],\n 'amenity:fuel': store['hasGasDepartment'],\n 'amenity:pharmacy': store['hasPharmacyDepartment'],\n 'atm': any('ATM' == d['name'] for d in department) or None,\n 'fuel:propane': any('Propane' == d['name'] for d in department) or None,\n **fuels\n }\n }\n\n hours = store.get('warehouseHours')\n if hours:\n try:\n properties[\"opening_hours\"] = self.store_hours(hours)\n except:\n pass\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/costco.py" } ]
diff --git a/locations/spiders/costco.py b/locations/spiders/costco.py index 355fb6314e6..28162637fee 100644 --- a/locations/spiders/costco.py +++ b/locations/spiders/costco.py @@ -28,7 +28,7 @@ class CostcoSpider(scrapy.Spider): 'https://www.costco.com/warehouse-locations', ) custom_settings = { - 'USER_AGENT': 'Mozilla/5.0', + 'USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36', } download_delay = 0.5
cloud-custodian__cloud-custodian-5545
core - source get_resources should early exit on empty set some of the service apis will return back all resources if we issue an api call for an empty set of ids. when using get_resources we should explicitly check for an empty set and return in the base describe source / or query resource manager get_resources method.
[ { "content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nQuery capability built on skew metamodel\n\ntags_spec -> s3, elb, rds\n\"\"\"\nimport functools\nimport itertools\nimport json\n\nimport jmespath\nimport six\nimport os\n\nfrom c7n.actions import ActionRegistry\nfrom c7n.exceptions import ClientError, ResourceLimitExceeded, PolicyExecutionError\nfrom c7n.filters import FilterRegistry, MetricsFilter\nfrom c7n.manager import ResourceManager\nfrom c7n.registry import PluginRegistry\nfrom c7n.tags import register_ec2_tags, register_universal_tags\nfrom c7n.utils import (\n local_session, generate_arn, get_retry, chunks, camelResource)\n\n\ntry:\n from botocore.paginate import PageIterator, Paginator\nexcept ImportError:\n # Likely using another provider in a serverless environment\n class PageIterator:\n pass\n\n class Paginator:\n pass\n\n\nclass ResourceQuery:\n\n def __init__(self, session_factory):\n self.session_factory = session_factory\n\n @staticmethod\n def resolve(resource_type):\n if not isinstance(resource_type, type):\n raise ValueError(resource_type)\n else:\n m = resource_type\n return m\n\n def _invoke_client_enum(self, client, enum_op, params, path, retry=None):\n if client.can_paginate(enum_op):\n p = client.get_paginator(enum_op)\n if retry:\n p.PAGE_ITERATOR_CLS = RetryPageIterator\n results = p.paginate(**params)\n data = results.build_full_result()\n else:\n op = getattr(client, enum_op)\n data = op(**params)\n\n if path:\n path = jmespath.compile(path)\n data = path.search(data)\n\n return data\n\n def filter(self, resource_manager, **params):\n \"\"\"Query a set of resources.\"\"\"\n m = self.resolve(resource_manager.resource_type)\n client = local_session(self.session_factory).client(\n m.service, resource_manager.config.region)\n enum_op, path, extra_args = m.enum_spec\n if extra_args:\n params.update(extra_args)\n return self._invoke_client_enum(\n client, enum_op, params, path,\n getattr(resource_manager, 'retry', None)) or []\n\n def get(self, resource_manager, identities):\n \"\"\"Get resources by identities\n \"\"\"\n m = self.resolve(resource_manager.resource_type)\n params = {}\n client_filter = False\n\n # Try to formulate server side query\n if m.filter_name:\n if m.filter_type == 'list':\n params[m.filter_name] = identities\n elif m.filter_type == 'scalar':\n assert len(identities) == 1, \"Scalar server side filter\"\n params[m.filter_name] = identities[0]\n else:\n client_filter = True\n\n resources = self.filter(resource_manager, **params)\n if client_filter:\n # This logic was added to prevent the issue from:\n # https://github.com/cloud-custodian/cloud-custodian/issues/1398\n if all(map(lambda r: isinstance(r, six.string_types), resources)):\n resources = [r for r in resources if r in identities]\n else:\n resources = [r for r in resources if r[m.id] in identities]\n\n return resources\n\n\nclass ChildResourceQuery(ResourceQuery):\n \"\"\"A resource query for resources that must be queried with parent information.\n\n Several resource types can only be queried in the context of their\n parents identifiers. ie. efs mount targets (parent efs), route53 resource\n records (parent hosted zone), ecs services (ecs cluster).\n \"\"\"\n\n capture_parent_id = False\n parent_key = 'c7n:parent-id'\n\n def __init__(self, session_factory, manager):\n self.session_factory = session_factory\n self.manager = manager\n\n def filter(self, resource_manager, **params):\n \"\"\"Query a set of resources.\"\"\"\n m = self.resolve(resource_manager.resource_type)\n client = local_session(self.session_factory).client(m.service)\n\n enum_op, path, extra_args = m.enum_spec\n if extra_args:\n params.update(extra_args)\n\n parent_type, parent_key, annotate_parent = m.parent_spec\n parents = self.manager.get_resource_manager(parent_type)\n parent_ids = [p[parents.resource_type.id] for p in parents.resources()]\n\n # Bail out with no parent ids...\n existing_param = parent_key in params\n if not existing_param and len(parent_ids) == 0:\n return []\n\n # Handle a query with parent id\n if existing_param:\n return self._invoke_client_enum(client, enum_op, params, path)\n\n # Have to query separately for each parent's children.\n results = []\n for parent_id in parent_ids:\n merged_params = self.get_parent_parameters(params, parent_id, parent_key)\n subset = self._invoke_client_enum(\n client, enum_op, merged_params, path, retry=self.manager.retry)\n if annotate_parent:\n for r in subset:\n r[self.parent_key] = parent_id\n if subset and self.capture_parent_id:\n results.extend([(parent_id, s) for s in subset])\n elif subset:\n results.extend(subset)\n return results\n\n def get_parent_parameters(self, params, parent_id, parent_key):\n return dict(params, **{parent_key: parent_id})\n\n\nclass QueryMeta(type):\n\n def __new__(cls, name, parents, attrs):\n if 'resource_type' not in attrs:\n return super(QueryMeta, cls).__new__(cls, name, parents, attrs)\n\n if 'filter_registry' not in attrs:\n attrs['filter_registry'] = FilterRegistry(\n '%s.filters' % name.lower())\n if 'action_registry' not in attrs:\n attrs['action_registry'] = ActionRegistry(\n '%s.actions' % name.lower())\n\n if attrs['resource_type']:\n m = ResourceQuery.resolve(attrs['resource_type'])\n # Generic cloud watch metrics support\n if m.dimension:\n attrs['filter_registry'].register('metrics', MetricsFilter)\n # EC2 Service boilerplate ...\n if m.service == 'ec2':\n # Generic ec2 resource tag support\n if getattr(m, 'taggable', True):\n register_ec2_tags(\n attrs['filter_registry'], attrs['action_registry'])\n if getattr(m, 'universal_taggable', False):\n compatibility = isinstance(m.universal_taggable, bool) and True or False\n register_universal_tags(\n attrs['filter_registry'], attrs['action_registry'],\n compatibility=compatibility)\n\n return super(QueryMeta, cls).__new__(cls, name, parents, attrs)\n\n\ndef _napi(op_name):\n return op_name.title().replace('_', '')\n\n\nsources = PluginRegistry('sources')\n\n\[email protected]('describe')\nclass DescribeSource:\n\n resource_query_factory = ResourceQuery\n\n def __init__(self, manager):\n self.manager = manager\n self.query = self.get_query()\n\n def get_resources(self, ids, cache=True):\n return self.query.get(self.manager, ids)\n\n def resources(self, query):\n return self.query.filter(self.manager, **query)\n\n def get_query(self):\n return self.resource_query_factory(self.manager.session_factory)\n\n def get_query_params(self, query_params):\n return query_params\n\n def get_permissions(self):\n m = self.manager.get_model()\n prefix = m.permission_prefix or m.service\n if m.permissions_enum:\n perms = list(m.permissions_enum)\n else:\n perms = ['%s:%s' % (prefix, _napi(m.enum_spec[0]))]\n if m.permissions_augment:\n perms.extend(m.permissions_augment)\n else:\n if getattr(m, 'detail_spec', None):\n perms.append(\"%s:%s\" % (prefix, _napi(m.detail_spec[0])))\n if getattr(m, 'batch_detail_spec', None):\n perms.append(\"%s:%s\" % (prefix, _napi(m.batch_detail_spec[0])))\n return perms\n\n def augment(self, resources):\n model = self.manager.get_model()\n if getattr(model, 'detail_spec', None):\n detail_spec = getattr(model, 'detail_spec', None)\n _augment = _scalar_augment\n elif getattr(model, 'batch_detail_spec', None):\n detail_spec = getattr(model, 'batch_detail_spec', None)\n _augment = _batch_augment\n else:\n return resources\n _augment = functools.partial(\n _augment, self.manager, model, detail_spec)\n with self.manager.executor_factory(\n max_workers=self.manager.max_workers) as w:\n results = list(w.map(\n _augment, chunks(resources, self.manager.chunk_size)))\n return list(itertools.chain(*results))\n\n\[email protected]('describe-child')\nclass ChildDescribeSource(DescribeSource):\n\n resource_query_factory = ChildResourceQuery\n\n def get_query(self):\n return self.resource_query_factory(\n self.manager.session_factory, self.manager)\n\n\[email protected]('config')\nclass ConfigSource:\n\n retry = staticmethod(get_retry(('ThrottlingException',)))\n\n def __init__(self, manager):\n self.manager = manager\n\n def get_permissions(self):\n return [\"config:GetResourceConfigHistory\",\n \"config:ListDiscoveredResources\"]\n\n def get_resources(self, ids, cache=True):\n client = local_session(self.manager.session_factory).client('config')\n results = []\n m = self.manager.get_model()\n for i in ids:\n revisions = self.retry(\n client.get_resource_config_history,\n resourceId=i,\n resourceType=m.config_type,\n limit=1).get('configurationItems')\n if not revisions:\n continue\n results.append(self.load_resource(revisions[0]))\n return list(filter(None, results))\n\n def get_query_params(self, query):\n \"\"\"Parse config select expression from policy and parameter.\n\n On policy config supports a full statement being given, or\n a clause that will be added to the where expression.\n\n If no query is specified, a default query is utilized.\n\n A valid query should at minimum select fields\n for configuration, supplementaryConfiguration and\n must have resourceType qualifier.\n \"\"\"\n if query and not isinstance(query, dict):\n raise PolicyExecutionError(\"invalid config source query %s\" % (query,))\n\n if query is None and 'query' in self.manager.data:\n _q = [q for q in self.manager.data['query'] if 'expr' in q]\n if _q:\n query = _q.pop()\n\n if query is None and 'query' in self.manager.data:\n _c = [q['clause'] for q in self.manager.data['query'] if 'clause' in q]\n if _c:\n _c = _c.pop()\n elif query:\n return query\n else:\n _c = None\n\n s = \"select configuration, supplementaryConfiguration where resourceType = '{}'\".format(\n self.manager.resource_type.config_type)\n\n if _c:\n s += \"AND {}\".format(_c)\n\n return {'expr': s}\n\n def load_resource(self, item):\n if isinstance(item['configuration'], six.string_types):\n item_config = json.loads(item['configuration'])\n else:\n item_config = item['configuration']\n return camelResource(item_config)\n\n def resources(self, query=None):\n client = local_session(self.manager.session_factory).client('config')\n query = self.get_query_params(query)\n pager = Paginator(\n client.select_resource_config,\n {'input_token': 'NextToken', 'output_token': 'NextToken',\n 'result_key': 'Results'},\n client.meta.service_model.operation_model('SelectResourceConfig'))\n pager.PAGE_ITERATOR_CLS = RetryPageIterator\n\n results = []\n for page in pager.paginate(Expression=query['expr']):\n results.extend([\n self.load_resource(json.loads(r)) for r in page['Results']])\n return results\n\n def augment(self, resources):\n return resources\n\n\[email protected]_metaclass(QueryMeta)\nclass QueryResourceManager(ResourceManager):\n\n resource_type = \"\"\n\n # TODO Check if we can move to describe source\n max_workers = 3\n chunk_size = 20\n\n permissions = ()\n\n _generate_arn = None\n\n retry = staticmethod(\n get_retry((\n 'ThrottlingException',\n 'RequestLimitExceeded',\n 'Throttled',\n 'Throttling',\n 'Client.RequestLimitExceeded')))\n\n def __init__(self, data, options):\n super(QueryResourceManager, self).__init__(data, options)\n self.source = self.get_source(self.source_type)\n\n @property\n def source_type(self):\n return self.data.get('source', 'describe')\n\n def get_source(self, source_type):\n return sources.get(source_type)(self)\n\n @classmethod\n def has_arn(cls):\n if cls.resource_type.arn is not None:\n return bool(cls.resource_type.arn)\n elif getattr(cls.resource_type, 'arn_type', None) is not None:\n return True\n elif cls.__dict__.get('get_arns'):\n return True\n return False\n\n @classmethod\n def get_model(cls):\n return ResourceQuery.resolve(cls.resource_type)\n\n @classmethod\n def match_ids(cls, ids):\n \"\"\"return ids that match this resource type's id format.\"\"\"\n id_prefix = getattr(cls.get_model(), 'id_prefix', None)\n if id_prefix is not None:\n return [i for i in ids if i.startswith(id_prefix)]\n return ids\n\n def get_permissions(self):\n perms = self.source.get_permissions()\n if getattr(self, 'permissions', None):\n perms.extend(self.permissions)\n return perms\n\n def get_cache_key(self, query):\n return {\n 'account': self.account_id,\n 'region': self.config.region,\n 'resource': str(self.__class__.__name__),\n 'source': self.source_type,\n 'q': query\n }\n\n def resources(self, query=None):\n query = self.source.get_query_params(query)\n cache_key = self.get_cache_key(query)\n resources = None\n\n if self._cache.load():\n resources = self._cache.get(cache_key)\n if resources is not None:\n self.log.debug(\"Using cached %s: %d\" % (\n \"%s.%s\" % (self.__class__.__module__,\n self.__class__.__name__),\n len(resources)))\n\n if resources is None:\n if query is None:\n query = {}\n with self.ctx.tracer.subsegment('resource-fetch'):\n resources = self.source.resources(query)\n with self.ctx.tracer.subsegment('resource-augment'):\n resources = self.augment(resources)\n self._cache.save(cache_key, resources)\n\n resource_count = len(resources)\n with self.ctx.tracer.subsegment('filter'):\n resources = self.filter_resources(resources)\n\n # Check if we're out of a policies execution limits.\n if self.data == self.ctx.policy.data:\n self.check_resource_limit(len(resources), resource_count)\n return resources\n\n def check_resource_limit(self, selection_count, population_count):\n \"\"\"Check if policy's execution affects more resources then its limit.\n\n Ideally this would be at a higher level but we've hidden\n filtering behind the resource manager facade for default usage.\n \"\"\"\n p = self.ctx.policy\n max_resource_limits = MaxResourceLimit(p, selection_count, population_count)\n return max_resource_limits.check_resource_limits()\n\n def _get_cached_resources(self, ids):\n key = self.get_cache_key(None)\n if self._cache.load():\n resources = self._cache.get(key)\n if resources is not None:\n self.log.debug(\"Using cached results for get_resources\")\n m = self.get_model()\n id_set = set(ids)\n return [r for r in resources if r[m.id] in id_set]\n return None\n\n def get_resources(self, ids, cache=True, augment=True):\n if cache:\n resources = self._get_cached_resources(ids)\n if resources is not None:\n return resources\n try:\n resources = self.source.get_resources(ids)\n if augment:\n resources = self.augment(resources)\n return resources\n except ClientError as e:\n self.log.warning(\"event ids not resolved: %s error:%s\" % (ids, e))\n return []\n\n def augment(self, resources):\n \"\"\"subclasses may want to augment resources with additional information.\n\n ie. we want tags by default (rds, elb), and policy, location, acl for\n s3 buckets.\n \"\"\"\n return self.source.augment(resources)\n\n @property\n def account_id(self):\n \"\"\" Return the current account ID.\n\n This should now be passed in using the --account-id flag, but for a\n period of time we will support the old behavior of inferring this from\n IAM.\n \"\"\"\n return self.config.account_id\n\n @property\n def region(self):\n \"\"\" Return the current region.\n \"\"\"\n return self.config.region\n\n def get_arns(self, resources):\n arns = []\n\n m = self.get_model()\n arn_key = getattr(m, 'arn', None)\n if arn_key is False:\n raise ValueError(\"%s do not have arns\" % self.type)\n\n id_key = m.id\n\n for r in resources:\n _id = r[id_key]\n if arn_key:\n arns.append(r[arn_key])\n elif 'arn' in _id[:3]:\n arns.append(_id)\n else:\n arns.append(self.generate_arn(_id))\n return arns\n\n @property\n def generate_arn(self):\n \"\"\" Generates generic arn if ID is not already arn format.\n \"\"\"\n if self._generate_arn is None:\n self._generate_arn = functools.partial(\n generate_arn,\n self.resource_type.arn_service or self.resource_type.service,\n region=not self.resource_type.global_resource and self.config.region or \"\",\n account_id=self.account_id,\n resource_type=self.resource_type.arn_type,\n separator=self.resource_type.arn_separator)\n return self._generate_arn\n\n\nclass MaxResourceLimit:\n\n C7N_MAXRES_OP = os.environ.get(\"C7N_MAXRES_OP\", 'or')\n\n def __init__(self, policy, selection_count, population_count):\n self.p = policy\n self.op = MaxResourceLimit.C7N_MAXRES_OP\n self.selection_count = selection_count\n self.population_count = population_count\n self.amount = None\n self.percentage_amount = None\n self.percent = None\n self._parse_policy()\n\n def _parse_policy(self,):\n if isinstance(self.p.max_resources, dict):\n self.op = self.p.max_resources.get(\"op\", MaxResourceLimit.C7N_MAXRES_OP).lower()\n self.percent = self.p.max_resources.get(\"percent\")\n self.amount = self.p.max_resources.get(\"amount\")\n\n if isinstance(self.p.max_resources, int):\n self.amount = self.p.max_resources\n\n if isinstance(self.p.max_resources_percent, (int, float)):\n self.percent = self.p.max_resources_percent\n\n if self.percent:\n self.percentage_amount = self.population_count * (self.percent / 100.0)\n\n def check_resource_limits(self):\n if self.percentage_amount and self.amount:\n if (self.selection_count > self.amount and\n self.selection_count > self.percentage_amount and self.op == \"and\"):\n raise ResourceLimitExceeded(\n (\"policy:%s exceeded resource-limit:{limit} and percentage-limit:%s%% \"\n \"found:{selection_count} total:{population_count}\")\n % (self.p.name, self.percent), \"max-resource and max-percent\",\n self.amount, self.selection_count, self.population_count)\n\n if self.amount:\n if self.selection_count > self.amount and self.op != \"and\":\n raise ResourceLimitExceeded(\n (\"policy:%s exceeded resource-limit:{limit} \"\n \"found:{selection_count} total: {population_count}\") % self.p.name,\n \"max-resource\", self.amount, self.selection_count, self.population_count)\n\n if self.percentage_amount:\n if self.selection_count > self.percentage_amount and self.op != \"and\":\n raise ResourceLimitExceeded(\n (\"policy:%s exceeded resource-limit:{limit}%% \"\n \"found:{selection_count} total:{population_count}\") % self.p.name,\n \"max-percent\", self.percent, self.selection_count, self.population_count)\n\n\nclass ChildResourceManager(QueryResourceManager):\n\n child_source = 'describe-child'\n\n @property\n def source_type(self):\n source = self.data.get('source', self.child_source)\n if source == 'describe':\n source = self.child_source\n return source\n\n def get_parent_manager(self):\n return self.get_resource_manager(self.resource_type.parent_spec[0])\n\n\ndef _batch_augment(manager, model, detail_spec, resource_set):\n detail_op, param_name, param_key, detail_path, detail_args = detail_spec\n client = local_session(manager.session_factory).client(\n model.service, region_name=manager.config.region)\n op = getattr(client, detail_op)\n if manager.retry:\n args = (op,)\n op = manager.retry\n else:\n args = ()\n kw = {param_name: [param_key and r[param_key] or r for r in resource_set]}\n if detail_args:\n kw.update(detail_args)\n response = op(*args, **kw)\n return response[detail_path]\n\n\ndef _scalar_augment(manager, model, detail_spec, resource_set):\n detail_op, param_name, param_key, detail_path = detail_spec\n client = local_session(manager.session_factory).client(\n model.service, region_name=manager.config.region)\n op = getattr(client, detail_op)\n if manager.retry:\n args = (op,)\n op = manager.retry\n else:\n args = ()\n results = []\n for r in resource_set:\n kw = {param_name: param_key and r[param_key] or r}\n response = op(*args, **kw)\n if detail_path:\n response = response[detail_path]\n else:\n response.pop('ResponseMetadata')\n if param_key is None:\n response[model.id] = r\n r = response\n else:\n r.update(response)\n results.append(r)\n return results\n\n\nclass RetryPageIterator(PageIterator):\n\n retry = staticmethod(QueryResourceManager.retry)\n\n def _make_request(self, current_kwargs):\n return self.retry(self._method, **current_kwargs)\n\n\nclass TypeMeta(type):\n\n def __repr__(cls):\n identifier = None\n if cls.config_type:\n identifier = cls.config_type\n elif cls.arn_type:\n identifier = \"AWS::%s::%s\" % (cls.service.title(), cls.arn_type.title())\n elif cls.enum_spec:\n identifier = \"AWS::%s::%s\" % (cls.service.title(), cls.enum_spec[1])\n else:\n identifier = \"AWS::%s::%s\" % (cls.service.title(), cls.id)\n return \"<TypeInfo %s>\" % identifier\n\n\[email protected]_metaclass(TypeMeta)\nclass TypeInfo:\n \"\"\"Resource Type Metadata\"\"\"\n\n ###########\n # Required\n\n # id field, should be the identifier used for apis\n id = None\n\n # name field, used for display\n name = None\n\n # which aws service (per sdk) has the api for this resource.\n service = None\n\n # used to query the resource by describe-sources\n enum_spec = None\n\n ###########\n # Optional\n\n ############\n # Permissions\n\n # Permission string prefix if not service\n permission_prefix = None\n\n # Permissions for resource enumeration/get. Normally we autogen\n # but in some cases we need to specify statically\n permissions_enum = None\n\n # Permissions for resourcee augment\n permissions_augment = None\n\n ###########\n # Arn handling / generation metadata\n\n # arn resource attribute, when describe format has arn\n arn = None\n\n # type, used for arn construction, also required for universal tag augment\n arn_type = None\n\n # how arn type is separated from rest of arn\n arn_separator = \"/\"\n\n # for services that need custom labeling for arns\n arn_service = None\n\n ##########\n # Resource retrieval\n\n # filter_name, when fetching a single resource via enum_spec\n # technically optional, but effectively required for serverless\n # event policies else we have to enumerate the population.\n filter_name = None\n\n # filter_type, scalar or list\n filter_type = None\n\n # used to enrich the resource descriptions returned by enum_spec\n detail_spec = None\n\n # used when the api supports getting resource details enmasse\n batch_detail_spec = None\n\n ##########\n # Misc\n\n # used for reporting, array of fields\n default_report_fields = ()\n\n # date, latest date associated to resource, generally references\n # either create date or modified date.\n date = None\n\n # dimension, defines that resource has cloud watch metrics and the\n # resource id can be passed as this value. further customizations\n # of dimensions require subclass metrics filter.\n dimension = None\n\n # AWS Config Service resource type name\n config_type = None\n\n # Whether or not resource group tagging api can be used, in which\n # case we'll automatically register tag actions/filters.\n #\n # Note values of True will register legacy tag filters/actions, values\n # of object() will just register current standard tag/filters/actions.\n universal_taggable = False\n\n # Denotes if this resource exists across all regions (iam, cloudfront, r53)\n global_resource = False\n\n # Generally we utilize a service to namespace mapping in the metrics filter\n # however some resources have a type specific namespace (ig. ebs)\n metrics_namespace = None\n\n # specific to ec2 service resources used to disambiguate a resource by its id\n id_prefix = None\n", "path": "c7n/query.py" } ]
[ { "content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nQuery capability built on skew metamodel\n\ntags_spec -> s3, elb, rds\n\"\"\"\nimport functools\nimport itertools\nimport json\n\nimport jmespath\nimport six\nimport os\n\nfrom c7n.actions import ActionRegistry\nfrom c7n.exceptions import ClientError, ResourceLimitExceeded, PolicyExecutionError\nfrom c7n.filters import FilterRegistry, MetricsFilter\nfrom c7n.manager import ResourceManager\nfrom c7n.registry import PluginRegistry\nfrom c7n.tags import register_ec2_tags, register_universal_tags\nfrom c7n.utils import (\n local_session, generate_arn, get_retry, chunks, camelResource)\n\n\ntry:\n from botocore.paginate import PageIterator, Paginator\nexcept ImportError:\n # Likely using another provider in a serverless environment\n class PageIterator:\n pass\n\n class Paginator:\n pass\n\n\nclass ResourceQuery:\n\n def __init__(self, session_factory):\n self.session_factory = session_factory\n\n @staticmethod\n def resolve(resource_type):\n if not isinstance(resource_type, type):\n raise ValueError(resource_type)\n else:\n m = resource_type\n return m\n\n def _invoke_client_enum(self, client, enum_op, params, path, retry=None):\n if client.can_paginate(enum_op):\n p = client.get_paginator(enum_op)\n if retry:\n p.PAGE_ITERATOR_CLS = RetryPageIterator\n results = p.paginate(**params)\n data = results.build_full_result()\n else:\n op = getattr(client, enum_op)\n data = op(**params)\n\n if path:\n path = jmespath.compile(path)\n data = path.search(data)\n\n return data\n\n def filter(self, resource_manager, **params):\n \"\"\"Query a set of resources.\"\"\"\n m = self.resolve(resource_manager.resource_type)\n client = local_session(self.session_factory).client(\n m.service, resource_manager.config.region)\n enum_op, path, extra_args = m.enum_spec\n if extra_args:\n params.update(extra_args)\n return self._invoke_client_enum(\n client, enum_op, params, path,\n getattr(resource_manager, 'retry', None)) or []\n\n def get(self, resource_manager, identities):\n \"\"\"Get resources by identities\n \"\"\"\n m = self.resolve(resource_manager.resource_type)\n params = {}\n client_filter = False\n\n # Try to formulate server side query\n if m.filter_name:\n if m.filter_type == 'list':\n params[m.filter_name] = identities\n elif m.filter_type == 'scalar':\n assert len(identities) == 1, \"Scalar server side filter\"\n params[m.filter_name] = identities[0]\n else:\n client_filter = True\n\n resources = self.filter(resource_manager, **params)\n if client_filter:\n # This logic was added to prevent the issue from:\n # https://github.com/cloud-custodian/cloud-custodian/issues/1398\n if all(map(lambda r: isinstance(r, six.string_types), resources)):\n resources = [r for r in resources if r in identities]\n else:\n resources = [r for r in resources if r[m.id] in identities]\n\n return resources\n\n\nclass ChildResourceQuery(ResourceQuery):\n \"\"\"A resource query for resources that must be queried with parent information.\n\n Several resource types can only be queried in the context of their\n parents identifiers. ie. efs mount targets (parent efs), route53 resource\n records (parent hosted zone), ecs services (ecs cluster).\n \"\"\"\n\n capture_parent_id = False\n parent_key = 'c7n:parent-id'\n\n def __init__(self, session_factory, manager):\n self.session_factory = session_factory\n self.manager = manager\n\n def filter(self, resource_manager, **params):\n \"\"\"Query a set of resources.\"\"\"\n m = self.resolve(resource_manager.resource_type)\n client = local_session(self.session_factory).client(m.service)\n\n enum_op, path, extra_args = m.enum_spec\n if extra_args:\n params.update(extra_args)\n\n parent_type, parent_key, annotate_parent = m.parent_spec\n parents = self.manager.get_resource_manager(parent_type)\n parent_ids = [p[parents.resource_type.id] for p in parents.resources()]\n\n # Bail out with no parent ids...\n existing_param = parent_key in params\n if not existing_param and len(parent_ids) == 0:\n return []\n\n # Handle a query with parent id\n if existing_param:\n return self._invoke_client_enum(client, enum_op, params, path)\n\n # Have to query separately for each parent's children.\n results = []\n for parent_id in parent_ids:\n merged_params = self.get_parent_parameters(params, parent_id, parent_key)\n subset = self._invoke_client_enum(\n client, enum_op, merged_params, path, retry=self.manager.retry)\n if annotate_parent:\n for r in subset:\n r[self.parent_key] = parent_id\n if subset and self.capture_parent_id:\n results.extend([(parent_id, s) for s in subset])\n elif subset:\n results.extend(subset)\n return results\n\n def get_parent_parameters(self, params, parent_id, parent_key):\n return dict(params, **{parent_key: parent_id})\n\n\nclass QueryMeta(type):\n\n def __new__(cls, name, parents, attrs):\n if 'resource_type' not in attrs:\n return super(QueryMeta, cls).__new__(cls, name, parents, attrs)\n\n if 'filter_registry' not in attrs:\n attrs['filter_registry'] = FilterRegistry(\n '%s.filters' % name.lower())\n if 'action_registry' not in attrs:\n attrs['action_registry'] = ActionRegistry(\n '%s.actions' % name.lower())\n\n if attrs['resource_type']:\n m = ResourceQuery.resolve(attrs['resource_type'])\n # Generic cloud watch metrics support\n if m.dimension:\n attrs['filter_registry'].register('metrics', MetricsFilter)\n # EC2 Service boilerplate ...\n if m.service == 'ec2':\n # Generic ec2 resource tag support\n if getattr(m, 'taggable', True):\n register_ec2_tags(\n attrs['filter_registry'], attrs['action_registry'])\n if getattr(m, 'universal_taggable', False):\n compatibility = isinstance(m.universal_taggable, bool) and True or False\n register_universal_tags(\n attrs['filter_registry'], attrs['action_registry'],\n compatibility=compatibility)\n\n return super(QueryMeta, cls).__new__(cls, name, parents, attrs)\n\n\ndef _napi(op_name):\n return op_name.title().replace('_', '')\n\n\nsources = PluginRegistry('sources')\n\n\[email protected]('describe')\nclass DescribeSource:\n\n resource_query_factory = ResourceQuery\n\n def __init__(self, manager):\n self.manager = manager\n self.query = self.get_query()\n\n def get_resources(self, ids, cache=True):\n return self.query.get(self.manager, ids)\n\n def resources(self, query):\n return self.query.filter(self.manager, **query)\n\n def get_query(self):\n return self.resource_query_factory(self.manager.session_factory)\n\n def get_query_params(self, query_params):\n return query_params\n\n def get_permissions(self):\n m = self.manager.get_model()\n prefix = m.permission_prefix or m.service\n if m.permissions_enum:\n perms = list(m.permissions_enum)\n else:\n perms = ['%s:%s' % (prefix, _napi(m.enum_spec[0]))]\n if m.permissions_augment:\n perms.extend(m.permissions_augment)\n else:\n if getattr(m, 'detail_spec', None):\n perms.append(\"%s:%s\" % (prefix, _napi(m.detail_spec[0])))\n if getattr(m, 'batch_detail_spec', None):\n perms.append(\"%s:%s\" % (prefix, _napi(m.batch_detail_spec[0])))\n return perms\n\n def augment(self, resources):\n model = self.manager.get_model()\n if getattr(model, 'detail_spec', None):\n detail_spec = getattr(model, 'detail_spec', None)\n _augment = _scalar_augment\n elif getattr(model, 'batch_detail_spec', None):\n detail_spec = getattr(model, 'batch_detail_spec', None)\n _augment = _batch_augment\n else:\n return resources\n _augment = functools.partial(\n _augment, self.manager, model, detail_spec)\n with self.manager.executor_factory(\n max_workers=self.manager.max_workers) as w:\n results = list(w.map(\n _augment, chunks(resources, self.manager.chunk_size)))\n return list(itertools.chain(*results))\n\n\[email protected]('describe-child')\nclass ChildDescribeSource(DescribeSource):\n\n resource_query_factory = ChildResourceQuery\n\n def get_query(self):\n return self.resource_query_factory(\n self.manager.session_factory, self.manager)\n\n\[email protected]('config')\nclass ConfigSource:\n\n retry = staticmethod(get_retry(('ThrottlingException',)))\n\n def __init__(self, manager):\n self.manager = manager\n\n def get_permissions(self):\n return [\"config:GetResourceConfigHistory\",\n \"config:ListDiscoveredResources\"]\n\n def get_resources(self, ids, cache=True):\n client = local_session(self.manager.session_factory).client('config')\n results = []\n m = self.manager.get_model()\n for i in ids:\n revisions = self.retry(\n client.get_resource_config_history,\n resourceId=i,\n resourceType=m.config_type,\n limit=1).get('configurationItems')\n if not revisions:\n continue\n results.append(self.load_resource(revisions[0]))\n return list(filter(None, results))\n\n def get_query_params(self, query):\n \"\"\"Parse config select expression from policy and parameter.\n\n On policy config supports a full statement being given, or\n a clause that will be added to the where expression.\n\n If no query is specified, a default query is utilized.\n\n A valid query should at minimum select fields\n for configuration, supplementaryConfiguration and\n must have resourceType qualifier.\n \"\"\"\n if query and not isinstance(query, dict):\n raise PolicyExecutionError(\"invalid config source query %s\" % (query,))\n\n if query is None and 'query' in self.manager.data:\n _q = [q for q in self.manager.data['query'] if 'expr' in q]\n if _q:\n query = _q.pop()\n\n if query is None and 'query' in self.manager.data:\n _c = [q['clause'] for q in self.manager.data['query'] if 'clause' in q]\n if _c:\n _c = _c.pop()\n elif query:\n return query\n else:\n _c = None\n\n s = \"select configuration, supplementaryConfiguration where resourceType = '{}'\".format(\n self.manager.resource_type.config_type)\n\n if _c:\n s += \"AND {}\".format(_c)\n\n return {'expr': s}\n\n def load_resource(self, item):\n if isinstance(item['configuration'], six.string_types):\n item_config = json.loads(item['configuration'])\n else:\n item_config = item['configuration']\n return camelResource(item_config)\n\n def resources(self, query=None):\n client = local_session(self.manager.session_factory).client('config')\n query = self.get_query_params(query)\n pager = Paginator(\n client.select_resource_config,\n {'input_token': 'NextToken', 'output_token': 'NextToken',\n 'result_key': 'Results'},\n client.meta.service_model.operation_model('SelectResourceConfig'))\n pager.PAGE_ITERATOR_CLS = RetryPageIterator\n\n results = []\n for page in pager.paginate(Expression=query['expr']):\n results.extend([\n self.load_resource(json.loads(r)) for r in page['Results']])\n return results\n\n def augment(self, resources):\n return resources\n\n\[email protected]_metaclass(QueryMeta)\nclass QueryResourceManager(ResourceManager):\n\n resource_type = \"\"\n\n # TODO Check if we can move to describe source\n max_workers = 3\n chunk_size = 20\n\n permissions = ()\n\n _generate_arn = None\n\n retry = staticmethod(\n get_retry((\n 'ThrottlingException',\n 'RequestLimitExceeded',\n 'Throttled',\n 'Throttling',\n 'Client.RequestLimitExceeded')))\n\n def __init__(self, data, options):\n super(QueryResourceManager, self).__init__(data, options)\n self.source = self.get_source(self.source_type)\n\n @property\n def source_type(self):\n return self.data.get('source', 'describe')\n\n def get_source(self, source_type):\n return sources.get(source_type)(self)\n\n @classmethod\n def has_arn(cls):\n if cls.resource_type.arn is not None:\n return bool(cls.resource_type.arn)\n elif getattr(cls.resource_type, 'arn_type', None) is not None:\n return True\n elif cls.__dict__.get('get_arns'):\n return True\n return False\n\n @classmethod\n def get_model(cls):\n return ResourceQuery.resolve(cls.resource_type)\n\n @classmethod\n def match_ids(cls, ids):\n \"\"\"return ids that match this resource type's id format.\"\"\"\n id_prefix = getattr(cls.get_model(), 'id_prefix', None)\n if id_prefix is not None:\n return [i for i in ids if i.startswith(id_prefix)]\n return ids\n\n def get_permissions(self):\n perms = self.source.get_permissions()\n if getattr(self, 'permissions', None):\n perms.extend(self.permissions)\n return perms\n\n def get_cache_key(self, query):\n return {\n 'account': self.account_id,\n 'region': self.config.region,\n 'resource': str(self.__class__.__name__),\n 'source': self.source_type,\n 'q': query\n }\n\n def resources(self, query=None):\n query = self.source.get_query_params(query)\n cache_key = self.get_cache_key(query)\n resources = None\n\n if self._cache.load():\n resources = self._cache.get(cache_key)\n if resources is not None:\n self.log.debug(\"Using cached %s: %d\" % (\n \"%s.%s\" % (self.__class__.__module__,\n self.__class__.__name__),\n len(resources)))\n\n if resources is None:\n if query is None:\n query = {}\n with self.ctx.tracer.subsegment('resource-fetch'):\n resources = self.source.resources(query)\n with self.ctx.tracer.subsegment('resource-augment'):\n resources = self.augment(resources)\n self._cache.save(cache_key, resources)\n\n resource_count = len(resources)\n with self.ctx.tracer.subsegment('filter'):\n resources = self.filter_resources(resources)\n\n # Check if we're out of a policies execution limits.\n if self.data == self.ctx.policy.data:\n self.check_resource_limit(len(resources), resource_count)\n return resources\n\n def check_resource_limit(self, selection_count, population_count):\n \"\"\"Check if policy's execution affects more resources then its limit.\n\n Ideally this would be at a higher level but we've hidden\n filtering behind the resource manager facade for default usage.\n \"\"\"\n p = self.ctx.policy\n max_resource_limits = MaxResourceLimit(p, selection_count, population_count)\n return max_resource_limits.check_resource_limits()\n\n def _get_cached_resources(self, ids):\n key = self.get_cache_key(None)\n if self._cache.load():\n resources = self._cache.get(key)\n if resources is not None:\n self.log.debug(\"Using cached results for get_resources\")\n m = self.get_model()\n id_set = set(ids)\n return [r for r in resources if r[m.id] in id_set]\n return None\n\n def get_resources(self, ids, cache=True, augment=True):\n if not ids:\n return []\n if cache:\n resources = self._get_cached_resources(ids)\n if resources is not None:\n return resources\n try:\n resources = self.source.get_resources(ids)\n if augment:\n resources = self.augment(resources)\n return resources\n except ClientError as e:\n self.log.warning(\"event ids not resolved: %s error:%s\" % (ids, e))\n return []\n\n def augment(self, resources):\n \"\"\"subclasses may want to augment resources with additional information.\n\n ie. we want tags by default (rds, elb), and policy, location, acl for\n s3 buckets.\n \"\"\"\n return self.source.augment(resources)\n\n @property\n def account_id(self):\n \"\"\" Return the current account ID.\n\n This should now be passed in using the --account-id flag, but for a\n period of time we will support the old behavior of inferring this from\n IAM.\n \"\"\"\n return self.config.account_id\n\n @property\n def region(self):\n \"\"\" Return the current region.\n \"\"\"\n return self.config.region\n\n def get_arns(self, resources):\n arns = []\n\n m = self.get_model()\n arn_key = getattr(m, 'arn', None)\n if arn_key is False:\n raise ValueError(\"%s do not have arns\" % self.type)\n\n id_key = m.id\n\n for r in resources:\n _id = r[id_key]\n if arn_key:\n arns.append(r[arn_key])\n elif 'arn' in _id[:3]:\n arns.append(_id)\n else:\n arns.append(self.generate_arn(_id))\n return arns\n\n @property\n def generate_arn(self):\n \"\"\" Generates generic arn if ID is not already arn format.\n \"\"\"\n if self._generate_arn is None:\n self._generate_arn = functools.partial(\n generate_arn,\n self.resource_type.arn_service or self.resource_type.service,\n region=not self.resource_type.global_resource and self.config.region or \"\",\n account_id=self.account_id,\n resource_type=self.resource_type.arn_type,\n separator=self.resource_type.arn_separator)\n return self._generate_arn\n\n\nclass MaxResourceLimit:\n\n C7N_MAXRES_OP = os.environ.get(\"C7N_MAXRES_OP\", 'or')\n\n def __init__(self, policy, selection_count, population_count):\n self.p = policy\n self.op = MaxResourceLimit.C7N_MAXRES_OP\n self.selection_count = selection_count\n self.population_count = population_count\n self.amount = None\n self.percentage_amount = None\n self.percent = None\n self._parse_policy()\n\n def _parse_policy(self,):\n if isinstance(self.p.max_resources, dict):\n self.op = self.p.max_resources.get(\"op\", MaxResourceLimit.C7N_MAXRES_OP).lower()\n self.percent = self.p.max_resources.get(\"percent\")\n self.amount = self.p.max_resources.get(\"amount\")\n\n if isinstance(self.p.max_resources, int):\n self.amount = self.p.max_resources\n\n if isinstance(self.p.max_resources_percent, (int, float)):\n self.percent = self.p.max_resources_percent\n\n if self.percent:\n self.percentage_amount = self.population_count * (self.percent / 100.0)\n\n def check_resource_limits(self):\n if self.percentage_amount and self.amount:\n if (self.selection_count > self.amount and\n self.selection_count > self.percentage_amount and self.op == \"and\"):\n raise ResourceLimitExceeded(\n (\"policy:%s exceeded resource-limit:{limit} and percentage-limit:%s%% \"\n \"found:{selection_count} total:{population_count}\")\n % (self.p.name, self.percent), \"max-resource and max-percent\",\n self.amount, self.selection_count, self.population_count)\n\n if self.amount:\n if self.selection_count > self.amount and self.op != \"and\":\n raise ResourceLimitExceeded(\n (\"policy:%s exceeded resource-limit:{limit} \"\n \"found:{selection_count} total: {population_count}\") % self.p.name,\n \"max-resource\", self.amount, self.selection_count, self.population_count)\n\n if self.percentage_amount:\n if self.selection_count > self.percentage_amount and self.op != \"and\":\n raise ResourceLimitExceeded(\n (\"policy:%s exceeded resource-limit:{limit}%% \"\n \"found:{selection_count} total:{population_count}\") % self.p.name,\n \"max-percent\", self.percent, self.selection_count, self.population_count)\n\n\nclass ChildResourceManager(QueryResourceManager):\n\n child_source = 'describe-child'\n\n @property\n def source_type(self):\n source = self.data.get('source', self.child_source)\n if source == 'describe':\n source = self.child_source\n return source\n\n def get_parent_manager(self):\n return self.get_resource_manager(self.resource_type.parent_spec[0])\n\n\ndef _batch_augment(manager, model, detail_spec, resource_set):\n detail_op, param_name, param_key, detail_path, detail_args = detail_spec\n client = local_session(manager.session_factory).client(\n model.service, region_name=manager.config.region)\n op = getattr(client, detail_op)\n if manager.retry:\n args = (op,)\n op = manager.retry\n else:\n args = ()\n kw = {param_name: [param_key and r[param_key] or r for r in resource_set]}\n if detail_args:\n kw.update(detail_args)\n response = op(*args, **kw)\n return response[detail_path]\n\n\ndef _scalar_augment(manager, model, detail_spec, resource_set):\n detail_op, param_name, param_key, detail_path = detail_spec\n client = local_session(manager.session_factory).client(\n model.service, region_name=manager.config.region)\n op = getattr(client, detail_op)\n if manager.retry:\n args = (op,)\n op = manager.retry\n else:\n args = ()\n results = []\n for r in resource_set:\n kw = {param_name: param_key and r[param_key] or r}\n response = op(*args, **kw)\n if detail_path:\n response = response[detail_path]\n else:\n response.pop('ResponseMetadata')\n if param_key is None:\n response[model.id] = r\n r = response\n else:\n r.update(response)\n results.append(r)\n return results\n\n\nclass RetryPageIterator(PageIterator):\n\n retry = staticmethod(QueryResourceManager.retry)\n\n def _make_request(self, current_kwargs):\n return self.retry(self._method, **current_kwargs)\n\n\nclass TypeMeta(type):\n\n def __repr__(cls):\n identifier = None\n if cls.config_type:\n identifier = cls.config_type\n elif cls.arn_type:\n identifier = \"AWS::%s::%s\" % (cls.service.title(), cls.arn_type.title())\n elif cls.enum_spec:\n identifier = \"AWS::%s::%s\" % (cls.service.title(), cls.enum_spec[1])\n else:\n identifier = \"AWS::%s::%s\" % (cls.service.title(), cls.id)\n return \"<TypeInfo %s>\" % identifier\n\n\[email protected]_metaclass(TypeMeta)\nclass TypeInfo:\n \"\"\"Resource Type Metadata\"\"\"\n\n ###########\n # Required\n\n # id field, should be the identifier used for apis\n id = None\n\n # name field, used for display\n name = None\n\n # which aws service (per sdk) has the api for this resource.\n service = None\n\n # used to query the resource by describe-sources\n enum_spec = None\n\n ###########\n # Optional\n\n ############\n # Permissions\n\n # Permission string prefix if not service\n permission_prefix = None\n\n # Permissions for resource enumeration/get. Normally we autogen\n # but in some cases we need to specify statically\n permissions_enum = None\n\n # Permissions for resourcee augment\n permissions_augment = None\n\n ###########\n # Arn handling / generation metadata\n\n # arn resource attribute, when describe format has arn\n arn = None\n\n # type, used for arn construction, also required for universal tag augment\n arn_type = None\n\n # how arn type is separated from rest of arn\n arn_separator = \"/\"\n\n # for services that need custom labeling for arns\n arn_service = None\n\n ##########\n # Resource retrieval\n\n # filter_name, when fetching a single resource via enum_spec\n # technically optional, but effectively required for serverless\n # event policies else we have to enumerate the population.\n filter_name = None\n\n # filter_type, scalar or list\n filter_type = None\n\n # used to enrich the resource descriptions returned by enum_spec\n detail_spec = None\n\n # used when the api supports getting resource details enmasse\n batch_detail_spec = None\n\n ##########\n # Misc\n\n # used for reporting, array of fields\n default_report_fields = ()\n\n # date, latest date associated to resource, generally references\n # either create date or modified date.\n date = None\n\n # dimension, defines that resource has cloud watch metrics and the\n # resource id can be passed as this value. further customizations\n # of dimensions require subclass metrics filter.\n dimension = None\n\n # AWS Config Service resource type name\n config_type = None\n\n # Whether or not resource group tagging api can be used, in which\n # case we'll automatically register tag actions/filters.\n #\n # Note values of True will register legacy tag filters/actions, values\n # of object() will just register current standard tag/filters/actions.\n universal_taggable = False\n\n # Denotes if this resource exists across all regions (iam, cloudfront, r53)\n global_resource = False\n\n # Generally we utilize a service to namespace mapping in the metrics filter\n # however some resources have a type specific namespace (ig. ebs)\n metrics_namespace = None\n\n # specific to ec2 service resources used to disambiguate a resource by its id\n id_prefix = None\n", "path": "c7n/query.py" } ]
diff --git a/c7n/query.py b/c7n/query.py index b0e6faa0997..3eff822c2b1 100644 --- a/c7n/query.py +++ b/c7n/query.py @@ -491,6 +491,8 @@ def _get_cached_resources(self, ids): return None def get_resources(self, ids, cache=True, augment=True): + if not ids: + return [] if cache: resources = self._get_cached_resources(ids) if resources is not None:
quantumlib__Cirq-3689
Add to heatmap visualization tests In the `test_colorbar` test there is a comment about testing that the position size and pad arguments are respected.
[ { "content": "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Heatmap class.\n\nSee examples/bristlecone_heatmap_example.py for an example usage in\nan interactive session.\n\"\"\"\n\nfrom typing import Any, Dict, List, Mapping, Optional, SupportsFloat, Tuple, Union\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom matplotlib import collections as mpl_collections\nfrom mpl_toolkits import axes_grid1\n\nfrom cirq.devices import grid_qubit\n\nQubitCoordinate = Union[Tuple[int, int], grid_qubit.GridQubit]\n\n# The value map is qubit coordinate -> a type that supports float conversion.\nValueMap = Union[Dict[grid_qubit.GridQubit, SupportsFloat], Dict[Tuple[int, int], SupportsFloat]]\n\n\ndef _get_qubit_row_col(qubit: QubitCoordinate) -> Tuple[int, int]:\n if isinstance(qubit, grid_qubit.GridQubit):\n return qubit.row, qubit.col\n elif isinstance(qubit, tuple):\n return qubit[0], qubit[1]\n\n\ndef relative_luminance(color: np.ndarray) -> float:\n \"\"\"Returns the relative luminance according to W3C specification.\n\n Spec: https://www.w3.org/TR/WCAG21/#dfn-relative-luminance.\n\n Args:\n color: a numpy array with the first 3 elements red, green, and blue\n with values in [0, 1].\n Returns:\n relative luminance of color in [0, 1].\n \"\"\"\n rgb = color[:3]\n rgb = np.where(rgb <= 0.03928, rgb / 12.92, ((rgb + 0.055) / 1.055) ** 2.4)\n return rgb.dot([0.2126, 0.7152, 0.0722])\n\n\nclass Heatmap:\n \"\"\"Distribution of a value in 2D qubit lattice as a color map.\"\"\"\n\n def __init__(self, value_map: ValueMap) -> None:\n self.set_value_map(value_map)\n self.annot_map = { # Default annotation.\n _get_qubit_row_col(qubit): format(float(value), '.2g')\n for qubit, value in value_map.items()\n }\n self.annot_kwargs: Dict[str, Any] = {}\n self.unset_url_map()\n self.set_colorbar()\n self.set_colormap()\n\n def set_value_map(self, value_map: ValueMap) -> 'Heatmap':\n \"\"\"Sets the values for each qubit.\n\n Args:\n value_map: the values for determining color for each cell.\n \"\"\"\n # Fail fast if float() fails.\n # Keep the original value object for annotation.\n self.value_map = {qubit: (float(value), value) for qubit, value in value_map.items()}\n return self\n\n def set_annotation_map(\n self, annot_map: Mapping[QubitCoordinate, str], **text_options: str\n ) -> 'Heatmap':\n \"\"\"Sets the annotation text for each qubit.\n\n Note that set_annotation_map() and set_annotation_format()\n both sets the annotation map to be used. Whichever is called later wins.\n\n Args:\n annot_map: the texts to be drawn on each qubit cell.\n text_options: keyword arguments passed to matplotlib.text.Text()\n when drawing the annotation texts.\n \"\"\"\n self.annot_map = {_get_qubit_row_col(qubit): value for qubit, value in annot_map.items()}\n self.annot_kwargs = text_options\n return self\n\n def set_annotation_format(self, annot_format: str, **text_options: str) -> 'Heatmap':\n \"\"\"Sets a format string to format values for each qubit.\n\n Args:\n annot_format: the format string for formatting values.\n text_options: keyword arguments to matplotlib.text.Text().\n \"\"\"\n self.annot_map = {\n _get_qubit_row_col(qubit): format(value[1], annot_format)\n for qubit, value in self.value_map.items()\n }\n self.annot_kwargs = text_options\n return self\n\n def unset_annotation(self) -> 'Heatmap':\n \"\"\"Disables annotation. No texts are shown in cells.\"\"\"\n self.annot_map = {}\n return self\n\n def set_url_map(self, url_map: Mapping[QubitCoordinate, str]) -> 'Heatmap':\n \"\"\"Sets the URLs for each cell.\"\"\"\n self.url_map = {_get_qubit_row_col(qubit): value for qubit, value in url_map.items()}\n return self\n\n def unset_url_map(self) -> 'Heatmap':\n \"\"\"Disables URL. No URLs are associated with cells.\"\"\"\n self.url_map = {}\n return self\n\n def set_colorbar(\n self, position: str = 'right', size: str = '5%', pad: str = '2%', **colorbar_options: Any\n ) -> 'Heatmap':\n \"\"\"Sets location and style of colorbar.\n\n Args:\n position: colorbar position, one of 'left'|'right'|'top'|'bottom'.\n size: a string ending in '%' to specify the width of the colorbar.\n Nominally, '100%' means the same width as the heatmap.\n pad: a string ending in '%' to specify the space between the\n colorbar and the heatmap.\n colorbar_options: keyword arguments passed to\n matplotlib.Figure.colorbar().\n \"\"\"\n self.plot_colorbar = True\n self.colorbar_location_options = {'position': position, 'size': size, 'pad': pad}\n self.colorbar_options = colorbar_options\n return self\n\n def unset_colorbar(self) -> 'Heatmap':\n \"\"\"Disables colorbar. No colorbar is drawn.\"\"\"\n self.plot_colorbar = False\n return self\n\n def set_colormap(\n self,\n colormap: Union[str, mpl.colors.Colormap] = 'viridis',\n vmin: Optional[float] = None,\n vmax: Optional[float] = None,\n ) -> 'Heatmap':\n \"\"\"Sets the colormap.\n\n Args:\n colormap: either a colormap name or a Colormap instance.\n vmin: the minimum value to map to the minimum color. Default is\n the minimum value in value_map.\n vmax: the maximum value to map to the maximum color. Default is\n the maximum value in value_map.\n \"\"\"\n self.colormap = colormap\n self.vmin = vmin\n self.vmax = vmax\n return self\n\n def plot(\n self, ax: Optional[plt.Axes] = None, **pcolor_options: Any\n ) -> Tuple[plt.Axes, mpl_collections.Collection, pd.DataFrame]:\n \"\"\"Plots the heatmap on the given Axes.\n\n Args:\n ax: the Axes to plot on. If not given, a new figure is created,\n plotted on, and shown.\n pcolor_options: keyword arguments passed to ax.pcolor().\n\n Returns:\n A 3-tuple ``(ax, mesh, value_table)``. ``ax`` is the `plt.Axes` that\n is plotted on. ``mesh`` is the collection of paths drawn and filled.\n ``value_table`` is the 2-D pandas DataFrame of values constructed\n from the value_map.\n \"\"\"\n show_plot = not ax\n if not ax:\n fig, ax = plt.subplots(figsize=(8, 8))\n # Find the boundary and size of the heatmap.\n coordinate_list = [_get_qubit_row_col(qubit) for qubit in self.value_map.keys()]\n rows = [row for row, _ in coordinate_list]\n cols = [col for _, col in coordinate_list]\n min_row, max_row = min(rows), max(rows)\n min_col, max_col = min(cols), max(cols)\n height, width = max_row - min_row + 1, max_col - min_col + 1\n # Construct the (height x width) table of values. Cells with no values\n # are filled with np.nan.\n value_table = pd.DataFrame(\n np.nan, index=range(min_row, max_row + 1), columns=range(min_col, max_col + 1)\n )\n for qubit, (float_value, _) in self.value_map.items():\n row, col = _get_qubit_row_col(qubit)\n value_table[col][row] = float_value\n # Construct the (height + 1) x (width + 1) cell boundary tables.\n x_table = np.array([np.arange(min_col - 0.5, max_col + 1.5)] * (height + 1))\n y_table = np.array([np.arange(min_row - 0.5, max_row + 1.5)] * (width + 1)).transpose()\n\n # Construct the URL array as an ordered list of URLs for non-nan cells.\n url_array: List[str] = []\n if self.url_map:\n url_array = [self.url_map.get((row, col), '') for row, col in value_table.stack().index]\n\n # Plot the heatmap.\n mesh = ax.pcolor(\n x_table,\n y_table,\n value_table,\n vmin=self.vmin,\n vmax=self.vmax,\n cmap=self.colormap,\n urls=url_array,\n **pcolor_options,\n )\n mesh.update_scalarmappable()\n ax.set(xlabel='column', ylabel='row')\n ax.xaxis.set_ticks(np.arange(min_col, max_col + 1))\n ax.yaxis.set_ticks(np.arange(min_row, max_row + 1))\n ax.set_ylim((max_row + 0.5, min_row - 0.5))\n\n if self.plot_colorbar:\n self._plot_colorbar(mesh, ax)\n\n if self.annot_map:\n self._write_annotations(mesh, ax)\n\n if show_plot:\n fig.show()\n\n return ax, mesh, value_table\n\n def _plot_colorbar(\n self, mappable: mpl.cm.ScalarMappable, ax: plt.Axes\n ) -> mpl.colorbar.Colorbar:\n \"\"\"Plots the colorbar. Internal.\"\"\"\n colorbar_ax = axes_grid1.make_axes_locatable(ax).append_axes(\n **self.colorbar_location_options\n )\n position = self.colorbar_location_options.get('position', 'right')\n orien = 'vertical' if position in ('left', 'right') else 'horizontal'\n colorbar = ax.figure.colorbar(\n mappable, colorbar_ax, ax, orientation=orien, **self.colorbar_options\n )\n colorbar_ax.tick_params(axis='y', direction='out')\n return colorbar\n\n def _write_annotations(self, mesh: mpl_collections.Collection, ax: plt.Axes) -> None:\n \"\"\"Writes annotations to the center of cells. Internal.\"\"\"\n for path, facecolor in zip(mesh.get_paths(), mesh.get_facecolors()):\n # Calculate the center of the cell, assuming that it is a square\n # centered at (x=col, y=row).\n vertices = path.vertices[:4]\n row = int(round(np.mean([v[1] for v in vertices])))\n col = int(round(np.mean([v[0] for v in vertices])))\n annotation = self.annot_map.get((row, col), '')\n if not annotation:\n continue\n face_luminance = relative_luminance(facecolor)\n text_color = 'black' if face_luminance > 0.4 else 'white'\n text_kwargs = dict(color=text_color, ha=\"center\", va=\"center\")\n text_kwargs.update(self.annot_kwargs)\n ax.text(col, row, annotation, **text_kwargs)\n", "path": "cirq/vis/heatmap.py" } ]
[ { "content": "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Heatmap class.\n\nSee examples/bristlecone_heatmap_example.py for an example usage in\nan interactive session.\n\"\"\"\n\nfrom typing import Any, Dict, List, Mapping, Optional, SupportsFloat, Tuple, Union\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom matplotlib import collections as mpl_collections\nfrom mpl_toolkits import axes_grid1\n\nfrom cirq.devices import grid_qubit\n\nQubitCoordinate = Union[Tuple[int, int], grid_qubit.GridQubit]\n\n# The value map is qubit coordinate -> a type that supports float conversion.\nValueMap = Union[Dict[grid_qubit.GridQubit, SupportsFloat], Dict[Tuple[int, int], SupportsFloat]]\n\n\ndef _get_qubit_row_col(qubit: QubitCoordinate) -> Tuple[int, int]:\n if isinstance(qubit, grid_qubit.GridQubit):\n return qubit.row, qubit.col\n elif isinstance(qubit, tuple):\n return int(qubit[0]), int(qubit[1])\n\n\ndef relative_luminance(color: np.ndarray) -> float:\n \"\"\"Returns the relative luminance according to W3C specification.\n\n Spec: https://www.w3.org/TR/WCAG21/#dfn-relative-luminance.\n\n Args:\n color: a numpy array with the first 3 elements red, green, and blue\n with values in [0, 1].\n Returns:\n relative luminance of color in [0, 1].\n \"\"\"\n rgb = color[:3]\n rgb = np.where(rgb <= 0.03928, rgb / 12.92, ((rgb + 0.055) / 1.055) ** 2.4)\n return rgb.dot([0.2126, 0.7152, 0.0722])\n\n\nclass Heatmap:\n \"\"\"Distribution of a value in 2D qubit lattice as a color map.\"\"\"\n\n def __init__(self, value_map: ValueMap) -> None:\n self.set_value_map(value_map)\n self.annot_map = { # Default annotation.\n _get_qubit_row_col(qubit): format(float(value), '.2g')\n for qubit, value in value_map.items()\n }\n self.annot_kwargs: Dict[str, Any] = {}\n self.unset_url_map()\n self.set_colorbar()\n self.set_colormap()\n\n def set_value_map(self, value_map: ValueMap) -> 'Heatmap':\n \"\"\"Sets the values for each qubit.\n\n Args:\n value_map: the values for determining color for each cell.\n \"\"\"\n # Fail fast if float() fails.\n # Keep the original value object for annotation.\n self.value_map = {qubit: (float(value), value) for qubit, value in value_map.items()}\n return self\n\n def set_annotation_map(\n self, annot_map: Mapping[QubitCoordinate, str], **text_options: str\n ) -> 'Heatmap':\n \"\"\"Sets the annotation text for each qubit.\n\n Note that set_annotation_map() and set_annotation_format()\n both sets the annotation map to be used. Whichever is called later wins.\n\n Args:\n annot_map: the texts to be drawn on each qubit cell.\n text_options: keyword arguments passed to matplotlib.text.Text()\n when drawing the annotation texts.\n \"\"\"\n self.annot_map = {_get_qubit_row_col(qubit): value for qubit, value in annot_map.items()}\n self.annot_kwargs = text_options\n return self\n\n def set_annotation_format(self, annot_format: str, **text_options: str) -> 'Heatmap':\n \"\"\"Sets a format string to format values for each qubit.\n\n Args:\n annot_format: the format string for formatting values.\n text_options: keyword arguments to matplotlib.text.Text().\n \"\"\"\n self.annot_map = {\n _get_qubit_row_col(qubit): format(value[1], annot_format)\n for qubit, value in self.value_map.items()\n }\n self.annot_kwargs = text_options\n return self\n\n def unset_annotation(self) -> 'Heatmap':\n \"\"\"Disables annotation. No texts are shown in cells.\"\"\"\n self.annot_map = {}\n return self\n\n def set_url_map(self, url_map: Mapping[QubitCoordinate, str]) -> 'Heatmap':\n \"\"\"Sets the URLs for each cell.\"\"\"\n self.url_map = {_get_qubit_row_col(qubit): value for qubit, value in url_map.items()}\n return self\n\n def unset_url_map(self) -> 'Heatmap':\n \"\"\"Disables URL. No URLs are associated with cells.\"\"\"\n self.url_map = {}\n return self\n\n def set_colorbar(\n self, position: str = 'right', size: str = '5%', pad: str = '2%', **colorbar_options: Any\n ) -> 'Heatmap':\n \"\"\"Sets location and style of colorbar.\n\n Args:\n position: colorbar position, one of 'left'|'right'|'top'|'bottom'.\n size: a string ending in '%' to specify the width of the colorbar.\n Nominally, '100%' means the same width as the heatmap.\n pad: a string ending in '%' to specify the space between the\n colorbar and the heatmap.\n colorbar_options: keyword arguments passed to\n matplotlib.Figure.colorbar().\n \"\"\"\n self.plot_colorbar = True\n self.colorbar_location_options = {'position': position, 'size': size, 'pad': pad}\n self.colorbar_options = colorbar_options\n return self\n\n def unset_colorbar(self) -> 'Heatmap':\n \"\"\"Disables colorbar. No colorbar is drawn.\"\"\"\n self.plot_colorbar = False\n return self\n\n def set_colormap(\n self,\n colormap: Union[str, mpl.colors.Colormap] = 'viridis',\n vmin: Optional[float] = None,\n vmax: Optional[float] = None,\n ) -> 'Heatmap':\n \"\"\"Sets the colormap.\n\n Args:\n colormap: either a colormap name or a Colormap instance.\n vmin: the minimum value to map to the minimum color. Default is\n the minimum value in value_map.\n vmax: the maximum value to map to the maximum color. Default is\n the maximum value in value_map.\n \"\"\"\n self.colormap = colormap\n self.vmin = vmin\n self.vmax = vmax\n return self\n\n def plot(\n self, ax: Optional[plt.Axes] = None, **pcolor_options: Any\n ) -> Tuple[plt.Axes, mpl_collections.Collection, pd.DataFrame]:\n \"\"\"Plots the heatmap on the given Axes.\n\n Args:\n ax: the Axes to plot on. If not given, a new figure is created,\n plotted on, and shown.\n pcolor_options: keyword arguments passed to ax.pcolor().\n\n Returns:\n A 3-tuple ``(ax, mesh, value_table)``. ``ax`` is the `plt.Axes` that\n is plotted on. ``mesh`` is the collection of paths drawn and filled.\n ``value_table`` is the 2-D pandas DataFrame of values constructed\n from the value_map.\n \"\"\"\n show_plot = not ax\n if not ax:\n fig, ax = plt.subplots(figsize=(8, 8))\n # Find the boundary and size of the heatmap.\n coordinate_list = [_get_qubit_row_col(qubit) for qubit in self.value_map.keys()]\n rows = [row for row, _ in coordinate_list]\n cols = [col for _, col in coordinate_list]\n min_row, max_row = min(rows), max(rows)\n min_col, max_col = min(cols), max(cols)\n height, width = max_row - min_row + 1, max_col - min_col + 1\n # Construct the (height x width) table of values. Cells with no values\n # are filled with np.nan.\n value_table = pd.DataFrame(\n np.nan, index=range(min_row, max_row + 1), columns=range(min_col, max_col + 1)\n )\n for qubit, (float_value, _) in self.value_map.items():\n row, col = _get_qubit_row_col(qubit)\n value_table[col][row] = float_value\n # Construct the (height + 1) x (width + 1) cell boundary tables.\n x_table = np.array([np.arange(min_col - 0.5, max_col + 1.5)] * (height + 1))\n y_table = np.array([np.arange(min_row - 0.5, max_row + 1.5)] * (width + 1)).transpose()\n\n # Construct the URL array as an ordered list of URLs for non-nan cells.\n url_array: List[str] = []\n if self.url_map:\n url_array = [self.url_map.get((row, col), '') for row, col in value_table.stack().index]\n\n # Plot the heatmap.\n mesh = ax.pcolor(\n x_table,\n y_table,\n value_table,\n vmin=self.vmin,\n vmax=self.vmax,\n cmap=self.colormap,\n urls=url_array,\n **pcolor_options,\n )\n mesh.update_scalarmappable()\n ax.set(xlabel='column', ylabel='row')\n ax.xaxis.set_ticks(np.arange(min_col, max_col + 1))\n ax.yaxis.set_ticks(np.arange(min_row, max_row + 1))\n ax.set_ylim((max_row + 0.5, min_row - 0.5))\n\n if self.plot_colorbar:\n self._plot_colorbar(mesh, ax)\n\n if self.annot_map:\n self._write_annotations(mesh, ax)\n\n if show_plot:\n fig.show()\n\n return ax, mesh, value_table\n\n def _plot_colorbar(\n self, mappable: mpl.cm.ScalarMappable, ax: plt.Axes\n ) -> mpl.colorbar.Colorbar:\n \"\"\"Plots the colorbar. Internal.\"\"\"\n colorbar_ax = axes_grid1.make_axes_locatable(ax).append_axes(\n **self.colorbar_location_options\n )\n position = self.colorbar_location_options.get('position', 'right')\n orien = 'vertical' if position in ('left', 'right') else 'horizontal'\n colorbar = ax.figure.colorbar(\n mappable, colorbar_ax, ax, orientation=orien, **self.colorbar_options\n )\n colorbar_ax.tick_params(axis='y', direction='out')\n return colorbar\n\n def _write_annotations(self, mesh: mpl_collections.Collection, ax: plt.Axes) -> None:\n \"\"\"Writes annotations to the center of cells. Internal.\"\"\"\n for path, facecolor in zip(mesh.get_paths(), mesh.get_facecolors()):\n # Calculate the center of the cell, assuming that it is a square\n # centered at (x=col, y=row).\n vertices = path.vertices[:4]\n row = int(round(np.mean([v[1] for v in vertices])))\n col = int(round(np.mean([v[0] for v in vertices])))\n annotation = self.annot_map.get((row, col), '')\n if not annotation:\n continue\n face_luminance = relative_luminance(facecolor)\n text_color = 'black' if face_luminance > 0.4 else 'white'\n text_kwargs = dict(color=text_color, ha=\"center\", va=\"center\")\n text_kwargs.update(self.annot_kwargs)\n ax.text(col, row, annotation, **text_kwargs)\n", "path": "cirq/vis/heatmap.py" } ]
diff --git a/cirq/vis/heatmap.py b/cirq/vis/heatmap.py index 392db174213..87033f27806 100644 --- a/cirq/vis/heatmap.py +++ b/cirq/vis/heatmap.py @@ -38,7 +38,7 @@ def _get_qubit_row_col(qubit: QubitCoordinate) -> Tuple[int, int]: if isinstance(qubit, grid_qubit.GridQubit): return qubit.row, qubit.col elif isinstance(qubit, tuple): - return qubit[0], qubit[1] + return int(qubit[0]), int(qubit[1]) def relative_luminance(color: np.ndarray) -> float: diff --git a/cirq/vis/heatmap_test.py b/cirq/vis/heatmap_test.py index c85bc8c4eea..574efbc525b 100644 --- a/cirq/vis/heatmap_test.py +++ b/cirq/vis/heatmap_test.py @@ -13,7 +13,9 @@ # limitations under the License. """Tests for Heatmap.""" +import pathlib import string +from tempfile import mkdtemp import numpy as np import pytest @@ -226,20 +228,72 @@ def test_urls(ax, test_GridQubit): assert mesh.get_urls() == expected_urls -def test_colorbar(ax): [email protected]( + 'position,size,pad', + [ + ('right', "5%", "2%"), + ('right', "5%", "10%"), + ('right', "20%", "2%"), + ('right', "20%", "10%"), + ('left', "5%", "2%"), + ('left', "5%", "10%"), + ('left', "20%", "2%"), + ('left', "20%", "10%"), + ('top', "5%", "2%"), + ('top', "5%", "10%"), + ('top', "20%", "2%"), + ('top', "20%", "10%"), + ('bottom', "5%", "2%"), + ('bottom', "5%", "10%"), + ('bottom', "20%", "2%"), + ('bottom', "20%", "10%"), + ], +) +def test_colorbar(ax, position, size, pad): qubits = ((0, 5), (8, 1), (7, 0), (13, 5), (1, 6), (3, 2), (2, 8)) values = np.random.random(len(qubits)) test_value_map = {qubit: value for qubit, value in zip(qubits, values)} random_heatmap = heatmap.Heatmap(test_value_map).unset_colorbar() fig1, ax1 = plt.subplots() random_heatmap.plot(ax1) - random_heatmap.set_colorbar() + random_heatmap.set_colorbar(position=position, size=size, pad=pad) fig2, ax2 = plt.subplots() random_heatmap.plot(ax2) + # We need to call savefig() explicitly for updating axes position since the figure + # object has been altered in the HeatMap._plot_colorbar function. + tmp_dir = mkdtemp() + fig2.savefig(pathlib.Path(tmp_dir) / 'tmp.png') + # Check that the figure has one more object in it when colorbar is on. assert len(fig2.get_children()) == len(fig1.get_children()) + 1 - # TODO: Make this is a more thorough test, e.g., we should test that the - # position, size and pad arguments are respected. - # Github issue: https://github.com/quantumlib/Cirq/issues/2969 + fig_pos = fig2.get_axes()[0].get_position() + colorbar_pos = fig2.get_axes()[1].get_position() + + origin_axes_size = ( + fig_pos.xmax - fig_pos.xmin + if position in ["left", "right"] + else fig_pos.ymax - fig_pos.ymin + ) + expected_pad = int(pad.replace("%", "")) / 100 * origin_axes_size + expected_size = int(size.replace("%", "")) / 100 * origin_axes_size + + if position == "right": + pad_distance = colorbar_pos.xmin - fig_pos.xmax + colorbar_size = colorbar_pos.xmax - colorbar_pos.xmin + elif position == "left": + pad_distance = fig_pos.xmin - colorbar_pos.xmax + colorbar_size = colorbar_pos.xmax - colorbar_pos.xmin + elif position == "top": + pad_distance = colorbar_pos.ymin - fig_pos.ymax + colorbar_size = colorbar_pos.ymax - colorbar_pos.ymin + elif position == "bottom": + pad_distance = fig_pos.ymin - colorbar_pos.ymax + colorbar_size = colorbar_pos.ymax - colorbar_pos.ymin + + assert np.isclose(colorbar_size, expected_size) + assert np.isclose(pad_distance, expected_pad) + + plt.close(fig1) + plt.close(fig2)
searx__searx-2130
Allow server admins to choose default search method Currently, the default search method used by Searx is `POST` which breaks compatibility with Firefox containers. Since FF's query to `/opensearch.xml` does not include the cookies, the user's preferred method is not reflected and they're forced create a custom search engine with the correct URL formatting and method. A good solution is to make the method parameter configurable in `settings.yml`. This was already mentioned in #703 and looks fairly easy to implement. Let me know if you want me to open a PR.
[ { "content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\"\"\"Searx preferences implementation.\n\"\"\"\n\n# pylint: disable=useless-object-inheritance\n\nfrom base64 import urlsafe_b64encode, urlsafe_b64decode\nfrom zlib import compress, decompress\nfrom sys import version\n\nfrom searx import settings, autocomplete\nfrom searx.languages import language_codes as languages\nfrom searx.utils import match_language\nfrom searx.url_utils import parse_qs, urlencode\n\nif version[0] == '3':\n # pylint: disable=invalid-name\n unicode = str\n\n\nCOOKIE_MAX_AGE = 60 * 60 * 24 * 365 * 5 # 5 years\nLANGUAGE_CODES = [l[0] for l in languages]\nLANGUAGE_CODES.append('all')\nDISABLED = 0\nENABLED = 1\nDOI_RESOLVERS = list(settings['doi_resolvers'])\n\n\nclass MissingArgumentException(Exception):\n \"\"\"Exption from ``cls._post_init`` when a argument is missed.\n \"\"\"\n\n\nclass ValidationException(Exception):\n\n \"\"\"Exption from ``cls._post_init`` when configuration value is invalid.\n \"\"\"\n\n\nclass Setting(object):\n \"\"\"Base class of user settings\"\"\"\n\n def __init__(self, default_value, **kwargs):\n super(Setting, self).__init__()\n self.value = default_value\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n self._post_init()\n\n def _post_init(self):\n pass\n\n def parse(self, data):\n \"\"\"Parse ``data`` and store the result at ``self.value``\n\n If needed, its overwritten in the inheritance.\n \"\"\"\n self.value = data\n\n def get_value(self):\n \"\"\"Returns the value of the setting\n\n If needed, its overwritten in the inheritance.\n \"\"\"\n return self.value\n\n def save(self, name, resp):\n \"\"\"Save cookie ``name`` in the HTTP reponse obect\n\n If needed, its overwritten in the inheritance.\"\"\"\n resp.set_cookie(name, self.value, max_age=COOKIE_MAX_AGE)\n\n\nclass StringSetting(Setting):\n \"\"\"Setting of plain string values\"\"\"\n\n\nclass EnumStringSetting(Setting):\n \"\"\"Setting of a value which can only come from the given choices\"\"\"\n\n def _post_init(self):\n if not hasattr(self, 'choices'):\n raise MissingArgumentException('Missing argument: choices')\n self._validate_selection(self.value)\n\n def _validate_selection(self, selection):\n if selection not in self.choices: # pylint: disable=no-member\n raise ValidationException('Invalid value: \"{0}\"'.format(selection))\n\n def parse(self, data):\n \"\"\"Parse and validate ``data`` and store the result at ``self.value``\n \"\"\"\n self._validate_selection(data)\n self.value = data\n\n\nclass MultipleChoiceSetting(EnumStringSetting):\n \"\"\"Setting of values which can only come from the given choices\"\"\"\n\n def _validate_selections(self, selections):\n for item in selections:\n if item not in self.choices: # pylint: disable=no-member\n raise ValidationException('Invalid value: \"{0}\"'.format(selections))\n\n def _post_init(self):\n if not hasattr(self, 'choices'):\n raise MissingArgumentException('Missing argument: choices')\n self._validate_selections(self.value)\n\n def parse(self, data):\n \"\"\"Parse and validate ``data`` and store the result at ``self.value``\n \"\"\"\n if data == '':\n self.value = []\n return\n\n elements = data.split(',')\n self._validate_selections(elements)\n self.value = elements\n\n def parse_form(self, data): # pylint: disable=missing-function-docstring\n self.value = []\n for choice in data:\n if choice in self.choices and choice not in self.value: # pylint: disable=no-member\n self.value.append(choice)\n\n def save(self, name, resp):\n \"\"\"Save cookie ``name`` in the HTTP reponse obect\n \"\"\"\n resp.set_cookie(name, ','.join(self.value), max_age=COOKIE_MAX_AGE)\n\n\nclass SetSetting(Setting):\n \"\"\"Setting of values of type ``set`` (comma separated string) \"\"\"\n def _post_init(self):\n if not hasattr(self, 'values'):\n self.values = set()\n\n def get_value(self):\n \"\"\"Returns a string with comma separated values.\n \"\"\"\n return ','.join(self.values)\n\n def parse(self, data):\n \"\"\"Parse and validate ``data`` and store the result at ``self.value``\n \"\"\"\n if data == '':\n self.values = set() # pylint: disable=attribute-defined-outside-init\n return\n\n elements = data.split(',')\n for element in elements:\n self.values.add(element)\n\n def parse_form(self, data): # pylint: disable=missing-function-docstring\n elements = data.split(',')\n self.values = set(elements) # pylint: disable=attribute-defined-outside-init\n\n def save(self, name, resp):\n \"\"\"Save cookie ``name`` in the HTTP reponse obect\n \"\"\"\n resp.set_cookie(name, ','.join(self.values), max_age=COOKIE_MAX_AGE)\n\n\nclass SearchLanguageSetting(EnumStringSetting):\n \"\"\"Available choices may change, so user's value may not be in choices anymore\"\"\"\n\n def _validate_selection(self, selection):\n if selection != \"\" and not match_language(\n # pylint: disable=no-member\n selection, self.choices, fallback=None):\n raise ValidationException('Invalid language code: \"{0}\"'.format(selection))\n\n def parse(self, data):\n \"\"\"Parse and validate ``data`` and store the result at ``self.value``\n \"\"\"\n if data not in self.choices and data != self.value: # pylint: disable=no-member\n # hack to give some backwards compatibility with old language cookies\n data = str(data).replace('_', '-')\n lang = data.split('-')[0]\n # pylint: disable=no-member\n if data in self.choices:\n pass\n elif lang in self.choices:\n data = lang\n else:\n data = self.value\n self.value = data\n\n\nclass MapSetting(Setting):\n \"\"\"Setting of a value that has to be translated in order to be storable\"\"\"\n\n def _post_init(self):\n if not hasattr(self, 'map'):\n raise MissingArgumentException('missing argument: map')\n if self.value not in self.map.values(): # pylint: disable=no-member\n raise ValidationException('Invalid default value')\n\n def parse(self, data):\n \"\"\"Parse and validate ``data`` and store the result at ``self.value``\n \"\"\"\n # pylint: disable=no-member\n if data not in self.map:\n raise ValidationException('Invalid choice: {0}'.format(data))\n self.value = self.map[data]\n self.key = data # pylint: disable=attribute-defined-outside-init\n\n def save(self, name, resp):\n \"\"\"Save cookie ``name`` in the HTTP reponse obect\n \"\"\"\n if hasattr(self, 'key'):\n resp.set_cookie(name, self.key, max_age=COOKIE_MAX_AGE)\n\n\nclass SwitchableSetting(Setting):\n \"\"\" Base class for settings that can be turned on && off\"\"\"\n\n def _post_init(self):\n self.disabled = set()\n self.enabled = set()\n if not hasattr(self, 'choices'):\n raise MissingArgumentException('missing argument: choices')\n\n def transform_form_items(self, items): # pylint: disable=missing-function-docstring\n # pylint: disable=no-self-use\n return items\n\n def transform_values(self, values): # pylint: disable=missing-function-docstring\n # pylint: disable=no-self-use\n return values\n\n def parse_cookie(self, data): # pylint: disable=missing-function-docstring\n # pylint: disable=attribute-defined-outside-init\n if data[DISABLED] != '':\n self.disabled = set(data[DISABLED].split(','))\n if data[ENABLED] != '':\n self.enabled = set(data[ENABLED].split(','))\n\n def parse_form(self, items): # pylint: disable=missing-function-docstring\n items = self.transform_form_items(items)\n self.disabled = set() # pylint: disable=attribute-defined-outside-init\n self.enabled = set() # pylint: disable=attribute-defined-outside-init\n for choice in self.choices: # pylint: disable=no-member\n if choice['default_on']:\n if choice['id'] in items:\n self.disabled.add(choice['id'])\n else:\n if choice['id'] not in items:\n self.enabled.add(choice['id'])\n\n def save(self, resp): # pylint: disable=arguments-differ\n \"\"\"Save cookie in the HTTP reponse obect\n \"\"\"\n resp.set_cookie('disabled_{0}'.format(self.value), ','.join(self.disabled), max_age=COOKIE_MAX_AGE)\n resp.set_cookie('enabled_{0}'.format(self.value), ','.join(self.enabled), max_age=COOKIE_MAX_AGE)\n\n def get_disabled(self): # pylint: disable=missing-function-docstring\n disabled = self.disabled\n for choice in self.choices: # pylint: disable=no-member\n if not choice['default_on'] and choice['id'] not in self.enabled:\n disabled.add(choice['id'])\n return self.transform_values(disabled)\n\n def get_enabled(self): # pylint: disable=missing-function-docstring\n enabled = self.enabled\n for choice in self.choices: # pylint: disable=no-member\n if choice['default_on'] and choice['id'] not in self.disabled:\n enabled.add(choice['id'])\n return self.transform_values(enabled)\n\n\nclass EnginesSetting(SwitchableSetting):\n \"\"\"Engine settings\"\"\"\n\n def _post_init(self):\n super(EnginesSetting, self)._post_init()\n transformed_choices = []\n for engine_name, engine in self.choices.items(): # pylint: disable=no-member,access-member-before-definition\n for category in engine.categories:\n transformed_choice = dict()\n transformed_choice['default_on'] = not engine.disabled\n transformed_choice['id'] = '{}__{}'.format(engine_name, category)\n transformed_choices.append(transformed_choice)\n self.choices = transformed_choices\n\n def transform_form_items(self, items):\n return [item[len('engine_'):].replace('_', ' ').replace(' ', '__') for item in items]\n\n def transform_values(self, values):\n if len(values) == 1 and next(iter(values)) == '':\n return list()\n transformed_values = []\n for value in values:\n engine, category = value.split('__')\n transformed_values.append((engine, category))\n return transformed_values\n\n\nclass PluginsSetting(SwitchableSetting):\n \"\"\"Plugin settings\"\"\"\n\n def _post_init(self):\n super(PluginsSetting, self)._post_init()\n transformed_choices = []\n for plugin in self.choices: # pylint: disable=access-member-before-definition\n transformed_choice = dict()\n transformed_choice['default_on'] = plugin.default_on\n transformed_choice['id'] = plugin.id\n transformed_choices.append(transformed_choice)\n self.choices = transformed_choices\n\n def transform_form_items(self, items):\n return [item[len('plugin_'):] for item in items]\n\n\nclass Preferences(object):\n \"\"\"Validates and saves preferences to cookies\"\"\"\n\n def __init__(self, themes, categories, engines, plugins):\n super(Preferences, self).__init__()\n\n self.key_value_settings = {\n 'categories': MultipleChoiceSetting(\n ['general'], choices=categories + ['none']\n ),\n 'language': SearchLanguageSetting(\n settings['search'].get('default_lang', ''),\n choices=list(LANGUAGE_CODES) + ['']\n ),\n 'locale': EnumStringSetting(\n settings['ui'].get('default_locale', ''),\n choices=list(settings['locales'].keys()) + ['']\n ),\n 'autocomplete': EnumStringSetting(\n settings['search'].get('autocomplete', ''),\n choices=list(autocomplete.backends.keys()) + ['']\n ),\n 'image_proxy': MapSetting(\n settings['server'].get('image_proxy', False),\n map={\n '': settings['server'].get('image_proxy', 0),\n '0': False,\n '1': True,\n 'True': True,\n 'False': False\n }\n ),\n 'method': EnumStringSetting(\n 'POST',\n choices=('GET', 'POST')\n ),\n 'safesearch': MapSetting(\n settings['search'].get('safe_search', 0),\n map={\n '0': 0,\n '1': 1,\n '2': 2\n }\n ),\n 'theme': EnumStringSetting(\n settings['ui'].get('default_theme', 'oscar'),\n choices=themes\n ),\n 'results_on_new_tab': MapSetting(\n False,\n map={\n '0': False,\n '1': True,\n 'False': False,\n 'True': True\n }\n ),\n 'doi_resolver': MultipleChoiceSetting(\n ['oadoi.org'], choices=DOI_RESOLVERS\n ),\n 'oscar-style': EnumStringSetting(\n settings['ui'].get('theme_args', {}).get('oscar_style', 'logicodev'),\n choices=['', 'logicodev', 'logicodev-dark', 'pointhi']),\n }\n\n self.engines = EnginesSetting('engines', choices=engines)\n self.plugins = PluginsSetting('plugins', choices=plugins)\n self.tokens = SetSetting('tokens')\n self.unknown_params = {}\n\n def get_as_url_params(self):\n \"\"\"Return preferences as URL parameters\"\"\"\n settings_kv = {}\n for k, v in self.key_value_settings.items():\n if isinstance(v, MultipleChoiceSetting):\n settings_kv[k] = ','.join(v.get_value())\n else:\n settings_kv[k] = v.get_value()\n\n settings_kv['disabled_engines'] = ','.join(self.engines.disabled)\n settings_kv['enabled_engines'] = ','.join(self.engines.enabled)\n\n settings_kv['disabled_plugins'] = ','.join(self.plugins.disabled)\n settings_kv['enabled_plugins'] = ','.join(self.plugins.enabled)\n\n settings_kv['tokens'] = ','.join(self.tokens.values)\n\n return urlsafe_b64encode(compress(urlencode(settings_kv).encode('utf-8'))).decode('utf-8')\n\n def parse_encoded_data(self, input_data):\n \"\"\"parse (base64) preferences from request (``flask.request.form['preferences']``)\"\"\"\n decoded_data = decompress(urlsafe_b64decode(input_data.encode('utf-8')))\n dict_data = {}\n for x, y in parse_qs(decoded_data).items():\n dict_data[x.decode('utf8')] = y[0].decode('utf8')\n self.parse_dict(dict_data)\n\n def parse_dict(self, input_data):\n \"\"\"parse preferences from request (``flask.request.form``)\"\"\"\n for user_setting_name, user_setting in input_data.items():\n if user_setting_name in self.key_value_settings:\n self.key_value_settings[user_setting_name].parse(user_setting)\n elif user_setting_name == 'disabled_engines':\n self.engines.parse_cookie((input_data.get('disabled_engines', ''),\n input_data.get('enabled_engines', '')))\n elif user_setting_name == 'disabled_plugins':\n self.plugins.parse_cookie((input_data.get('disabled_plugins', ''),\n input_data.get('enabled_plugins', '')))\n elif user_setting_name == 'tokens':\n self.tokens.parse(user_setting)\n elif not any(user_setting_name.startswith(x) for x in [\n 'enabled_',\n 'disabled_',\n 'engine_',\n 'category_',\n 'plugin_']):\n self.unknown_params[user_setting_name] = user_setting\n\n def parse_form(self, input_data):\n \"\"\"Parse formular (``<input>``) data from a ``flask.request.form``\"\"\"\n disabled_engines = []\n enabled_categories = []\n disabled_plugins = []\n for user_setting_name, user_setting in input_data.items():\n if user_setting_name in self.key_value_settings:\n self.key_value_settings[user_setting_name].parse(user_setting)\n elif user_setting_name.startswith('engine_'):\n disabled_engines.append(user_setting_name)\n elif user_setting_name.startswith('category_'):\n enabled_categories.append(user_setting_name[len('category_'):])\n elif user_setting_name.startswith('plugin_'):\n disabled_plugins.append(user_setting_name)\n elif user_setting_name == 'tokens':\n self.tokens.parse_form(user_setting)\n else:\n self.unknown_params[user_setting_name] = user_setting\n self.key_value_settings['categories'].parse_form(enabled_categories)\n self.engines.parse_form(disabled_engines)\n self.plugins.parse_form(disabled_plugins)\n\n # cannot be used in case of engines or plugins\n def get_value(self, user_setting_name):\n \"\"\"Returns the value for ``user_setting_name``\n \"\"\"\n ret_val = None\n if user_setting_name in self.key_value_settings:\n ret_val = self.key_value_settings[user_setting_name].get_value()\n if user_setting_name in self.unknown_params:\n ret_val = self.unknown_params[user_setting_name]\n return ret_val\n\n def save(self, resp):\n \"\"\"Save cookie in the HTTP reponse obect\n \"\"\"\n for user_setting_name, user_setting in self.key_value_settings.items():\n user_setting.save(user_setting_name, resp)\n self.engines.save(resp)\n self.plugins.save(resp)\n self.tokens.save('tokens', resp)\n for k, v in self.unknown_params.items():\n resp.set_cookie(k, v, max_age=COOKIE_MAX_AGE)\n return resp\n\n def validate_token(self, engine): # pylint: disable=missing-function-docstring\n valid = True\n if hasattr(engine, 'tokens') and engine.tokens:\n valid = False\n for token in self.tokens.values:\n if token in engine.tokens:\n valid = True\n break\n\n return valid\n", "path": "searx/preferences.py" } ]
[ { "content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\"\"\"Searx preferences implementation.\n\"\"\"\n\n# pylint: disable=useless-object-inheritance\n\nfrom base64 import urlsafe_b64encode, urlsafe_b64decode\nfrom zlib import compress, decompress\nfrom sys import version\n\nfrom searx import settings, autocomplete\nfrom searx.languages import language_codes as languages\nfrom searx.utils import match_language\nfrom searx.url_utils import parse_qs, urlencode\n\nif version[0] == '3':\n # pylint: disable=invalid-name\n unicode = str\n\n\nCOOKIE_MAX_AGE = 60 * 60 * 24 * 365 * 5 # 5 years\nLANGUAGE_CODES = [l[0] for l in languages]\nLANGUAGE_CODES.append('all')\nDISABLED = 0\nENABLED = 1\nDOI_RESOLVERS = list(settings['doi_resolvers'])\n\n\nclass MissingArgumentException(Exception):\n \"\"\"Exption from ``cls._post_init`` when a argument is missed.\n \"\"\"\n\n\nclass ValidationException(Exception):\n\n \"\"\"Exption from ``cls._post_init`` when configuration value is invalid.\n \"\"\"\n\n\nclass Setting(object):\n \"\"\"Base class of user settings\"\"\"\n\n def __init__(self, default_value, **kwargs):\n super(Setting, self).__init__()\n self.value = default_value\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n self._post_init()\n\n def _post_init(self):\n pass\n\n def parse(self, data):\n \"\"\"Parse ``data`` and store the result at ``self.value``\n\n If needed, its overwritten in the inheritance.\n \"\"\"\n self.value = data\n\n def get_value(self):\n \"\"\"Returns the value of the setting\n\n If needed, its overwritten in the inheritance.\n \"\"\"\n return self.value\n\n def save(self, name, resp):\n \"\"\"Save cookie ``name`` in the HTTP reponse obect\n\n If needed, its overwritten in the inheritance.\"\"\"\n resp.set_cookie(name, self.value, max_age=COOKIE_MAX_AGE)\n\n\nclass StringSetting(Setting):\n \"\"\"Setting of plain string values\"\"\"\n\n\nclass EnumStringSetting(Setting):\n \"\"\"Setting of a value which can only come from the given choices\"\"\"\n\n def _post_init(self):\n if not hasattr(self, 'choices'):\n raise MissingArgumentException('Missing argument: choices')\n self._validate_selection(self.value)\n\n def _validate_selection(self, selection):\n if selection not in self.choices: # pylint: disable=no-member\n raise ValidationException('Invalid value: \"{0}\"'.format(selection))\n\n def parse(self, data):\n \"\"\"Parse and validate ``data`` and store the result at ``self.value``\n \"\"\"\n self._validate_selection(data)\n self.value = data\n\n\nclass MultipleChoiceSetting(EnumStringSetting):\n \"\"\"Setting of values which can only come from the given choices\"\"\"\n\n def _validate_selections(self, selections):\n for item in selections:\n if item not in self.choices: # pylint: disable=no-member\n raise ValidationException('Invalid value: \"{0}\"'.format(selections))\n\n def _post_init(self):\n if not hasattr(self, 'choices'):\n raise MissingArgumentException('Missing argument: choices')\n self._validate_selections(self.value)\n\n def parse(self, data):\n \"\"\"Parse and validate ``data`` and store the result at ``self.value``\n \"\"\"\n if data == '':\n self.value = []\n return\n\n elements = data.split(',')\n self._validate_selections(elements)\n self.value = elements\n\n def parse_form(self, data): # pylint: disable=missing-function-docstring\n self.value = []\n for choice in data:\n if choice in self.choices and choice not in self.value: # pylint: disable=no-member\n self.value.append(choice)\n\n def save(self, name, resp):\n \"\"\"Save cookie ``name`` in the HTTP reponse obect\n \"\"\"\n resp.set_cookie(name, ','.join(self.value), max_age=COOKIE_MAX_AGE)\n\n\nclass SetSetting(Setting):\n \"\"\"Setting of values of type ``set`` (comma separated string) \"\"\"\n def _post_init(self):\n if not hasattr(self, 'values'):\n self.values = set()\n\n def get_value(self):\n \"\"\"Returns a string with comma separated values.\n \"\"\"\n return ','.join(self.values)\n\n def parse(self, data):\n \"\"\"Parse and validate ``data`` and store the result at ``self.value``\n \"\"\"\n if data == '':\n self.values = set() # pylint: disable=attribute-defined-outside-init\n return\n\n elements = data.split(',')\n for element in elements:\n self.values.add(element)\n\n def parse_form(self, data): # pylint: disable=missing-function-docstring\n elements = data.split(',')\n self.values = set(elements) # pylint: disable=attribute-defined-outside-init\n\n def save(self, name, resp):\n \"\"\"Save cookie ``name`` in the HTTP reponse obect\n \"\"\"\n resp.set_cookie(name, ','.join(self.values), max_age=COOKIE_MAX_AGE)\n\n\nclass SearchLanguageSetting(EnumStringSetting):\n \"\"\"Available choices may change, so user's value may not be in choices anymore\"\"\"\n\n def _validate_selection(self, selection):\n if selection != \"\" and not match_language(\n # pylint: disable=no-member\n selection, self.choices, fallback=None):\n raise ValidationException('Invalid language code: \"{0}\"'.format(selection))\n\n def parse(self, data):\n \"\"\"Parse and validate ``data`` and store the result at ``self.value``\n \"\"\"\n if data not in self.choices and data != self.value: # pylint: disable=no-member\n # hack to give some backwards compatibility with old language cookies\n data = str(data).replace('_', '-')\n lang = data.split('-')[0]\n # pylint: disable=no-member\n if data in self.choices:\n pass\n elif lang in self.choices:\n data = lang\n else:\n data = self.value\n self.value = data\n\n\nclass MapSetting(Setting):\n \"\"\"Setting of a value that has to be translated in order to be storable\"\"\"\n\n def _post_init(self):\n if not hasattr(self, 'map'):\n raise MissingArgumentException('missing argument: map')\n if self.value not in self.map.values(): # pylint: disable=no-member\n raise ValidationException('Invalid default value')\n\n def parse(self, data):\n \"\"\"Parse and validate ``data`` and store the result at ``self.value``\n \"\"\"\n # pylint: disable=no-member\n if data not in self.map:\n raise ValidationException('Invalid choice: {0}'.format(data))\n self.value = self.map[data]\n self.key = data # pylint: disable=attribute-defined-outside-init\n\n def save(self, name, resp):\n \"\"\"Save cookie ``name`` in the HTTP reponse obect\n \"\"\"\n if hasattr(self, 'key'):\n resp.set_cookie(name, self.key, max_age=COOKIE_MAX_AGE)\n\n\nclass SwitchableSetting(Setting):\n \"\"\" Base class for settings that can be turned on && off\"\"\"\n\n def _post_init(self):\n self.disabled = set()\n self.enabled = set()\n if not hasattr(self, 'choices'):\n raise MissingArgumentException('missing argument: choices')\n\n def transform_form_items(self, items): # pylint: disable=missing-function-docstring\n # pylint: disable=no-self-use\n return items\n\n def transform_values(self, values): # pylint: disable=missing-function-docstring\n # pylint: disable=no-self-use\n return values\n\n def parse_cookie(self, data): # pylint: disable=missing-function-docstring\n # pylint: disable=attribute-defined-outside-init\n if data[DISABLED] != '':\n self.disabled = set(data[DISABLED].split(','))\n if data[ENABLED] != '':\n self.enabled = set(data[ENABLED].split(','))\n\n def parse_form(self, items): # pylint: disable=missing-function-docstring\n items = self.transform_form_items(items)\n self.disabled = set() # pylint: disable=attribute-defined-outside-init\n self.enabled = set() # pylint: disable=attribute-defined-outside-init\n for choice in self.choices: # pylint: disable=no-member\n if choice['default_on']:\n if choice['id'] in items:\n self.disabled.add(choice['id'])\n else:\n if choice['id'] not in items:\n self.enabled.add(choice['id'])\n\n def save(self, resp): # pylint: disable=arguments-differ\n \"\"\"Save cookie in the HTTP reponse obect\n \"\"\"\n resp.set_cookie('disabled_{0}'.format(self.value), ','.join(self.disabled), max_age=COOKIE_MAX_AGE)\n resp.set_cookie('enabled_{0}'.format(self.value), ','.join(self.enabled), max_age=COOKIE_MAX_AGE)\n\n def get_disabled(self): # pylint: disable=missing-function-docstring\n disabled = self.disabled\n for choice in self.choices: # pylint: disable=no-member\n if not choice['default_on'] and choice['id'] not in self.enabled:\n disabled.add(choice['id'])\n return self.transform_values(disabled)\n\n def get_enabled(self): # pylint: disable=missing-function-docstring\n enabled = self.enabled\n for choice in self.choices: # pylint: disable=no-member\n if choice['default_on'] and choice['id'] not in self.disabled:\n enabled.add(choice['id'])\n return self.transform_values(enabled)\n\n\nclass EnginesSetting(SwitchableSetting):\n \"\"\"Engine settings\"\"\"\n\n def _post_init(self):\n super(EnginesSetting, self)._post_init()\n transformed_choices = []\n for engine_name, engine in self.choices.items(): # pylint: disable=no-member,access-member-before-definition\n for category in engine.categories:\n transformed_choice = dict()\n transformed_choice['default_on'] = not engine.disabled\n transformed_choice['id'] = '{}__{}'.format(engine_name, category)\n transformed_choices.append(transformed_choice)\n self.choices = transformed_choices\n\n def transform_form_items(self, items):\n return [item[len('engine_'):].replace('_', ' ').replace(' ', '__') for item in items]\n\n def transform_values(self, values):\n if len(values) == 1 and next(iter(values)) == '':\n return list()\n transformed_values = []\n for value in values:\n engine, category = value.split('__')\n transformed_values.append((engine, category))\n return transformed_values\n\n\nclass PluginsSetting(SwitchableSetting):\n \"\"\"Plugin settings\"\"\"\n\n def _post_init(self):\n super(PluginsSetting, self)._post_init()\n transformed_choices = []\n for plugin in self.choices: # pylint: disable=access-member-before-definition\n transformed_choice = dict()\n transformed_choice['default_on'] = plugin.default_on\n transformed_choice['id'] = plugin.id\n transformed_choices.append(transformed_choice)\n self.choices = transformed_choices\n\n def transform_form_items(self, items):\n return [item[len('plugin_'):] for item in items]\n\n\nclass Preferences(object):\n \"\"\"Validates and saves preferences to cookies\"\"\"\n\n def __init__(self, themes, categories, engines, plugins):\n super(Preferences, self).__init__()\n\n self.key_value_settings = {\n 'categories': MultipleChoiceSetting(\n ['general'], choices=categories + ['none']\n ),\n 'language': SearchLanguageSetting(\n settings['search'].get('default_lang', ''),\n choices=list(LANGUAGE_CODES) + ['']\n ),\n 'locale': EnumStringSetting(\n settings['ui'].get('default_locale', ''),\n choices=list(settings['locales'].keys()) + ['']\n ),\n 'autocomplete': EnumStringSetting(\n settings['search'].get('autocomplete', ''),\n choices=list(autocomplete.backends.keys()) + ['']\n ),\n 'image_proxy': MapSetting(\n settings['server'].get('image_proxy', False),\n map={\n '': settings['server'].get('image_proxy', 0),\n '0': False,\n '1': True,\n 'True': True,\n 'False': False\n }\n ),\n 'method': EnumStringSetting(\n settings['server'].get('method', 'POST'),\n choices=('GET', 'POST')\n ),\n 'safesearch': MapSetting(\n settings['search'].get('safe_search', 0),\n map={\n '0': 0,\n '1': 1,\n '2': 2\n }\n ),\n 'theme': EnumStringSetting(\n settings['ui'].get('default_theme', 'oscar'),\n choices=themes\n ),\n 'results_on_new_tab': MapSetting(\n False,\n map={\n '0': False,\n '1': True,\n 'False': False,\n 'True': True\n }\n ),\n 'doi_resolver': MultipleChoiceSetting(\n ['oadoi.org'], choices=DOI_RESOLVERS\n ),\n 'oscar-style': EnumStringSetting(\n settings['ui'].get('theme_args', {}).get('oscar_style', 'logicodev'),\n choices=['', 'logicodev', 'logicodev-dark', 'pointhi']),\n }\n\n self.engines = EnginesSetting('engines', choices=engines)\n self.plugins = PluginsSetting('plugins', choices=plugins)\n self.tokens = SetSetting('tokens')\n self.unknown_params = {}\n\n def get_as_url_params(self):\n \"\"\"Return preferences as URL parameters\"\"\"\n settings_kv = {}\n for k, v in self.key_value_settings.items():\n if isinstance(v, MultipleChoiceSetting):\n settings_kv[k] = ','.join(v.get_value())\n else:\n settings_kv[k] = v.get_value()\n\n settings_kv['disabled_engines'] = ','.join(self.engines.disabled)\n settings_kv['enabled_engines'] = ','.join(self.engines.enabled)\n\n settings_kv['disabled_plugins'] = ','.join(self.plugins.disabled)\n settings_kv['enabled_plugins'] = ','.join(self.plugins.enabled)\n\n settings_kv['tokens'] = ','.join(self.tokens.values)\n\n return urlsafe_b64encode(compress(urlencode(settings_kv).encode('utf-8'))).decode('utf-8')\n\n def parse_encoded_data(self, input_data):\n \"\"\"parse (base64) preferences from request (``flask.request.form['preferences']``)\"\"\"\n decoded_data = decompress(urlsafe_b64decode(input_data.encode('utf-8')))\n dict_data = {}\n for x, y in parse_qs(decoded_data).items():\n dict_data[x.decode('utf8')] = y[0].decode('utf8')\n self.parse_dict(dict_data)\n\n def parse_dict(self, input_data):\n \"\"\"parse preferences from request (``flask.request.form``)\"\"\"\n for user_setting_name, user_setting in input_data.items():\n if user_setting_name in self.key_value_settings:\n self.key_value_settings[user_setting_name].parse(user_setting)\n elif user_setting_name == 'disabled_engines':\n self.engines.parse_cookie((input_data.get('disabled_engines', ''),\n input_data.get('enabled_engines', '')))\n elif user_setting_name == 'disabled_plugins':\n self.plugins.parse_cookie((input_data.get('disabled_plugins', ''),\n input_data.get('enabled_plugins', '')))\n elif user_setting_name == 'tokens':\n self.tokens.parse(user_setting)\n elif not any(user_setting_name.startswith(x) for x in [\n 'enabled_',\n 'disabled_',\n 'engine_',\n 'category_',\n 'plugin_']):\n self.unknown_params[user_setting_name] = user_setting\n\n def parse_form(self, input_data):\n \"\"\"Parse formular (``<input>``) data from a ``flask.request.form``\"\"\"\n disabled_engines = []\n enabled_categories = []\n disabled_plugins = []\n for user_setting_name, user_setting in input_data.items():\n if user_setting_name in self.key_value_settings:\n self.key_value_settings[user_setting_name].parse(user_setting)\n elif user_setting_name.startswith('engine_'):\n disabled_engines.append(user_setting_name)\n elif user_setting_name.startswith('category_'):\n enabled_categories.append(user_setting_name[len('category_'):])\n elif user_setting_name.startswith('plugin_'):\n disabled_plugins.append(user_setting_name)\n elif user_setting_name == 'tokens':\n self.tokens.parse_form(user_setting)\n else:\n self.unknown_params[user_setting_name] = user_setting\n self.key_value_settings['categories'].parse_form(enabled_categories)\n self.engines.parse_form(disabled_engines)\n self.plugins.parse_form(disabled_plugins)\n\n # cannot be used in case of engines or plugins\n def get_value(self, user_setting_name):\n \"\"\"Returns the value for ``user_setting_name``\n \"\"\"\n ret_val = None\n if user_setting_name in self.key_value_settings:\n ret_val = self.key_value_settings[user_setting_name].get_value()\n if user_setting_name in self.unknown_params:\n ret_val = self.unknown_params[user_setting_name]\n return ret_val\n\n def save(self, resp):\n \"\"\"Save cookie in the HTTP reponse obect\n \"\"\"\n for user_setting_name, user_setting in self.key_value_settings.items():\n user_setting.save(user_setting_name, resp)\n self.engines.save(resp)\n self.plugins.save(resp)\n self.tokens.save('tokens', resp)\n for k, v in self.unknown_params.items():\n resp.set_cookie(k, v, max_age=COOKIE_MAX_AGE)\n return resp\n\n def validate_token(self, engine): # pylint: disable=missing-function-docstring\n valid = True\n if hasattr(engine, 'tokens') and engine.tokens:\n valid = False\n for token in self.tokens.values:\n if token in engine.tokens:\n valid = True\n break\n\n return valid\n", "path": "searx/preferences.py" } ]
diff --git a/searx/preferences.py b/searx/preferences.py index f70aee37aa..34da1b7c68 100644 --- a/searx/preferences.py +++ b/searx/preferences.py @@ -348,7 +348,7 @@ def __init__(self, themes, categories, engines, plugins): } ), 'method': EnumStringSetting( - 'POST', + settings['server'].get('method', 'POST'), choices=('GET', 'POST') ), 'safesearch': MapSetting( diff --git a/searx/settings.yml b/searx/settings.yml index 63685be8bf..68fd0ee6f5 100644 --- a/searx/settings.yml +++ b/searx/settings.yml @@ -16,6 +16,7 @@ server: base_url : False # Set custom base_url. Possible values: False or "https://your.custom.host/location/" image_proxy : False # Proxying image results through searx http_protocol_version : "1.0" # 1.0 and 1.1 are supported + method: "POST" # POST queries are more secure as they don't show up in history but may cause problems when using Firefox containers ui: static_path : "" # Custom static path - leave it blank if you didn't change
optuna__optuna-5055
Use `__future__.annotations` everywhere in the Optuna code base ### Motivation Optuna drops Python 3.6 from v3.1, so we can use `__future__.annotations`, which simplifies the code base. See [PEP 563](https://peps.python.org/pep-0563/), [PEP584](https://peps.python.org/pep-0584/), [PEP 585](https://peps.python.org/pep-0585/), and [PEP 604](https://peps.python.org/pep-0604/) for more details. This issue suggests to use the module and simplifies the code base. ### Suggestion Use `__future__.annotations` for each file and simplify the type annotations. The list of classes whose type annotations can be simplified is [here](https://peps.python.org/pep-0585/#implementation). The list of files where the `__future__.annotations` can be used is as follows. In order to reduce review costs and to encourage more contributors to work on it, please, as a rule, fix one file per PR. - [x] optuna/_convert_positional_args.py - [x] optuna/visualization/_optimization_history.py - [x] optuna/visualization/_hypervolume_history.py - [x] optuna/visualization/_edf.py - [x] optuna/visualization/_pareto_front.py - [x] optuna/visualization/matplotlib/_optimization_history.py - [x] optuna/visualization/matplotlib/_hypervolume_history.py - [x] optuna/visualization/matplotlib/_edf.py - [x] optuna/visualization/matplotlib/_pareto_front.py - [x] optuna/visualization/matplotlib/_contour.py - [x] optuna/visualization/_utils.py - [x] optuna/logging.py - [ ] optuna/storages/_base.py - [ ] optuna/storages/_cached_storage.py - [ ] optuna/storages/__init__.py - [ ] optuna/storages/_heartbeat.py - [ ] optuna/storages/_in_memory.py - [ ] optuna/storages/_rdb/models.py - [ ] optuna/storages/_rdb/storage.py - [ ] optuna/storages/_rdb/alembic/versions/v3.0.0.c.py - [ ] optuna/storages/_rdb/alembic/versions/v3.0.0.d.py - [ ] optuna/storages/_rdb/alembic/versions/v3.0.0.a.py - [ ] optuna/storages/_journal/file.py - [ ] optuna/storages/_journal/redis.py - [ ] optuna/storages/_journal/storage.py - [ ] optuna/storages/_journal/base.py - [ ] optuna/study/_dataframe.py - [ ] optuna/study/_optimize.py - [ ] optuna/study/_tell.py - [ ] optuna/study/_multi_objective.py - [ ] optuna/study/_frozen.py - [ ] optuna/study/study.py - [ ] optuna/study/_study_summary.py - [ ] optuna/search_space/group_decomposed.py - [ ] optuna/search_space/intersection.py - [ ] optuna/_typing.py - [ ] optuna/_deprecated.py - [ ] optuna/pruners/_hyperband.py - [ ] optuna/pruners/_patient.py - [ ] optuna/pruners/_successive_halving.py - [ ] optuna/pruners/_percentile.py - [ ] optuna/pruners/_threshold.py - [ ] optuna/trial/_base.py - [ ] optuna/trial/_fixed.py - [ ] optuna/trial/_trial.py - [ ] optuna/trial/_frozen.py - [ ] optuna/integration/cma.py - [ ] optuna/integration/shap.py - [ ] optuna/integration/lightgbm.py - [ ] optuna/integration/pytorch_distributed.py - [ ] optuna/integration/_lightgbm_tuner/optimize.py - [ ] optuna/integration/_lightgbm_tuner/alias.py - [ ] optuna/integration/mlflow.py - [ ] optuna/integration/wandb.py - [ ] optuna/integration/catboost.py - [ ] optuna/integration/skopt.py - [ ] optuna/integration/botorch.py - [ ] optuna/integration/dask.py - [x] optuna/integration/sklearn.py - [ ] optuna/integration/tensorboard.py - [ ] optuna/terminator/callback.py - [ ] optuna/terminator/terminator.py - [ ] optuna/terminator/improvement/_preprocessing.py - [ ] optuna/terminator/improvement/gp/botorch.py - [ ] optuna/terminator/improvement/gp/base.py - [ ] optuna/terminator/improvement/evaluator.py - [ ] optuna/importance/_base.py - [ ] optuna/importance/_mean_decrease_impurity.py - [ ] optuna/importance/__init__.py - [ ] optuna/importance/_fanova/_fanova.py - [ ] optuna/importance/_fanova/_evaluator.py - [ ] optuna/importance/_fanova/_tree.py - [ ] optuna/_imports.py - [ ] optuna/testing/tempfile_pool.py - [ ] optuna/testing/threading.py - [ ] optuna/testing/distributions.py - [ ] optuna/testing/samplers.py - [ ] optuna/testing/storages.py - [ ] optuna/distributions.py - [ ] optuna/cli.py - [ ] optuna/multi_objective/visualization/_pareto_front.py - [ ] optuna/multi_objective/trial.py - [ ] optuna/multi_objective/samplers/_base.py - [ ] optuna/multi_objective/samplers/_nsga2.py - [ ] optuna/multi_objective/samplers/_adapter.py - [ ] optuna/multi_objective/samplers/_random.py - [ ] optuna/multi_objective/samplers/_motpe.py - [ ] optuna/multi_objective/study.py - [ ] optuna/_experimental.py - [ ] optuna/samplers/_base.py - [ ] optuna/samplers/nsgaii/_crossovers/_undx.py - [ ] optuna/samplers/nsgaii/_crossovers/_spx.py - [ ] optuna/samplers/nsgaii/_crossovers/_sbx.py - [ ] optuna/samplers/nsgaii/_crossovers/_vsbx.py - [ ] optuna/samplers/nsgaii/_sampler.py - [ ] optuna/samplers/nsgaii/_crossover.py - [ ] optuna/samplers/_search_space/intersection.py - [ ] optuna/samplers/_qmc.py - [ ] optuna/samplers/_tpe/probability_distributions.py - [ ] optuna/samplers/_tpe/_truncnorm.py - [ ] optuna/samplers/_tpe/multi_objective_sampler.py - [ ] optuna/samplers/_tpe/parzen_estimator.py - [ ] optuna/samplers/_tpe/sampler.py - [ ] optuna/samplers/_random.py - [ ] optuna/samplers/_cmaes.py - [ ] optuna/samplers/_partial_fixed.py - [ ] optuna/samplers/_brute_force.py - [ ] optuna/samplers/_nsgaiii.py - [ ] optuna/samplers/_grid.py - [ ] optuna/_hypervolume/wfg.py - [ ] optuna/_hypervolume/hssp.py - [ ] optuna/progress_bar.py - [ ] optuna/_transform.py - [ ] optuna/_callbacks.py - [ ] tests/multi_objective_tests/test_study.py - [ ] tests/multi_objective_tests/samplers_tests/test_motpe.py - [ ] tests/multi_objective_tests/samplers_tests/test_nsga2.py - [ ] tests/multi_objective_tests/test_trial.py - [ ] tests/multi_objective_tests/visualization_tests/test_pareto_front.py - [ ] tests/trial_tests/test_frozen.py - [ ] tests/trial_tests/test_trials.py - [ ] tests/trial_tests/test_trial.py - [ ] tests/pruners_tests/test_percentile.py - [ ] tests/pruners_tests/test_median.py - [ ] tests/pruners_tests/test_patient.py - [ ] tests/pruners_tests/test_successive_halving.py - [ ] tests/study_tests/test_optimize.py - [ ] tests/study_tests/test_study.py - [ ] tests/hypervolume_tests/test_hssp.py - [x] tests/integration_tests/test_skopt.py - [x] tests/integration_tests/test_pytorch_lightning.py - [ ] tests/integration_tests/test_shap.py - [ ] tests/integration_tests/test_cma.py - [ ] tests/integration_tests/test_pytorch_distributed.py - [ ] tests/integration_tests/lightgbm_tuner_tests/test_optimize.py - [ ] tests/integration_tests/lightgbm_tuner_tests/test_alias.py - [ ] tests/integration_tests/test_botorch.py - [ ] tests/integration_tests/test_mlflow.py - [ ] tests/integration_tests/test_mxnet.py - [ ] tests/integration_tests/test_wandb.py - [ ] tests/importance_tests/fanova_tests/test_tree.py - [ ] tests/importance_tests/test_mean_decrease_impurity.py - [ ] tests/importance_tests/test_fanova.py - [ ] tests/importance_tests/test_init.py - [ ] tests/test_convert_positional_args.py - [ ] tests/test_deprecated.py - [ ] tests/storages_tests/test_journal.py - [ ] tests/storages_tests/test_heartbeat.py - [ ] tests/storages_tests/test_storages.py - [ ] tests/storages_tests/rdb_tests/test_storage.py - [ ] tests/storages_tests/rdb_tests/create_db.py - [ ] tests/storages_tests/test_with_server.py - [ ] tests/samplers_tests/test_grid.py - [ ] tests/samplers_tests/tpe_tests/test_parzen_estimator.py - [ ] tests/samplers_tests/tpe_tests/test_multi_objective_sampler.py - [ ] tests/samplers_tests/tpe_tests/test_sampler.py - [ ] tests/samplers_tests/test_cmaes.py - [ ] tests/samplers_tests/test_samplers.py - [x] tests/samplers_tests/test_nsgaii.py - [x] tests/samplers_tests/test_nsgaiii.py - [ ] tests/samplers_tests/test_qmc.py - [ ] tests/test_distributions.py - [ ] tests/test_multi_objective.py - [ ] tests/test_cli.py - [ ] tests/visualization_tests/test_hypervolume_history.py - [ ] tests/visualization_tests/test_pareto_front.py - [ ] tests/terminator_tests/improvement_tests/test_evaluator.py - [ ] benchmarks/kurobako/problems/wfg/transformation_functions.py - [ ] benchmarks/bayesmark/report_bayesmark.py - [ ] benchmarks/bayesmark/optuna_optimizer.py ### Additional context (optional) The above list is generated by the following script. <details> <summary>script</summary> ```python import os import pathlib PATTERS = [ "from typing import Union", "from typing import Optional", "from typing import Tuple", "from typing import List", "from typing import Dict", "from typing import Set", "from typing import FrozenSet", "from typing import Type", "from typing import FrozenSet", "from typing import Sequence", ] def get_filenames_to_be_simplified(dir_path): ret = [] for f in os.listdir(dir_path): file_path = os.path.join(dir_path, f) if not os.path.isfile(file_path): ret.extend(get_filenames_to_be_simplified(file_path)) else: try: with open(file_path) as fd: contents = fd.read() if any([s in contents for s in PATTERS]): ret.append(str(file_path)) except UnicodeDecodeError as e: pass return ret def main(): dirs = ["optuna", "tests", "benchmarks"] for dir_name in dirs: filenames = get_filenames_to_be_simplified(pathlib.Path(dir_name)) for filename in filenames: print(f"- [ ] {filename}") if __name__ == "__main__": main() ``` </details>
[ { "content": "import abc\nfrom typing import Optional\n\nfrom optuna._experimental import experimental_class\nfrom optuna.study.study import Study\nfrom optuna.terminator.erroreval import BaseErrorEvaluator\nfrom optuna.terminator.erroreval import CrossValidationErrorEvaluator\nfrom optuna.terminator.erroreval import StaticErrorEvaluator\nfrom optuna.terminator.improvement.evaluator import BaseImprovementEvaluator\nfrom optuna.terminator.improvement.evaluator import BestValueStagnationEvaluator\nfrom optuna.terminator.improvement.evaluator import DEFAULT_MIN_N_TRIALS\nfrom optuna.terminator.improvement.evaluator import RegretBoundEvaluator\nfrom optuna.trial import TrialState\n\n\nclass BaseTerminator(metaclass=abc.ABCMeta):\n \"\"\"Base class for terminators.\"\"\"\n\n @abc.abstractmethod\n def should_terminate(self, study: Study) -> bool:\n pass\n\n\n@experimental_class(\"3.2.0\")\nclass Terminator(BaseTerminator):\n \"\"\"Automatic stopping mechanism for Optuna studies.\n\n This class implements an automatic stopping mechanism for Optuna studies, aiming to prevent\n unnecessary computation. The study is terminated when the statistical error, e.g.\n cross-validation error, exceeds the room left for optimization.\n\n For further information about the algorithm, please refer to the following paper:\n\n - `A. Makarova et al. Automatic termination for hyperparameter optimization.\n <https://arxiv.org/abs/2104.08166>`_\n\n Args:\n improvement_evaluator:\n An evaluator object for assessing the room left for optimization. Defaults to a\n :class:`~optuna.terminator.improvement.evaluator.RegretBoundEvaluator` object.\n error_evaluator:\n An evaluator for calculating the statistical error, e.g. cross-validation error.\n Defaults to a :class:`~optuna.terminator.CrossValidationErrorEvaluator`\n object.\n min_n_trials:\n The minimum number of trials before termination is considered. Defaults to ``20``.\n\n Raises:\n ValueError: If ``min_n_trials`` is not a positive integer.\n\n Example:\n\n .. testcode::\n\n import logging\n import sys\n\n from sklearn.datasets import load_wine\n from sklearn.ensemble import RandomForestClassifier\n from sklearn.model_selection import cross_val_score\n from sklearn.model_selection import KFold\n\n import optuna\n from optuna.terminator import Terminator\n from optuna.terminator import report_cross_validation_scores\n\n\n study = optuna.create_study(direction=\"maximize\")\n terminator = Terminator()\n min_n_trials = 20\n\n while True:\n trial = study.ask()\n\n X, y = load_wine(return_X_y=True)\n\n clf = RandomForestClassifier(\n max_depth=trial.suggest_int(\"max_depth\", 2, 32),\n min_samples_split=trial.suggest_float(\"min_samples_split\", 0, 1),\n criterion=trial.suggest_categorical(\"criterion\", (\"gini\", \"entropy\")),\n )\n\n scores = cross_val_score(clf, X, y, cv=KFold(n_splits=5, shuffle=True))\n report_cross_validation_scores(trial, scores)\n\n value = scores.mean()\n logging.info(f\"Trial #{trial.number} finished with value {value}.\")\n study.tell(trial, value)\n\n if trial.number > min_n_trials and terminator.should_terminate(study):\n logging.info(\"Terminated by Optuna Terminator!\")\n break\n\n .. seealso::\n Please refer to :class:`~optuna.terminator.TerminatorCallback` for how to use\n the terminator mechanism with the :func:`~optuna.study.Study.optimize` method.\n\n \"\"\"\n\n def __init__(\n self,\n improvement_evaluator: Optional[BaseImprovementEvaluator] = None,\n error_evaluator: Optional[BaseErrorEvaluator] = None,\n min_n_trials: int = DEFAULT_MIN_N_TRIALS,\n ) -> None:\n if min_n_trials <= 0:\n raise ValueError(\"`min_n_trials` is expected to be a positive integer.\")\n\n self._improvement_evaluator = improvement_evaluator or RegretBoundEvaluator()\n self._error_evaluator = error_evaluator or self._initialize_error_evalutor()\n self._min_n_trials = min_n_trials\n\n def _initialize_error_evalutor(self) -> BaseErrorEvaluator:\n if isinstance(self._improvement_evaluator, BestValueStagnationEvaluator):\n return StaticErrorEvaluator(constant=0)\n return CrossValidationErrorEvaluator()\n\n def should_terminate(self, study: Study) -> bool:\n \"\"\"Judge whether the study should be terminated based on the reported values.\"\"\"\n trials = study.get_trials(states=[TrialState.COMPLETE])\n\n if len(trials) < self._min_n_trials:\n return False\n\n improvement = self._improvement_evaluator.evaluate(\n trials=study.trials,\n study_direction=study.direction,\n )\n\n error = self._error_evaluator.evaluate(\n trials=study.trials, study_direction=study.direction\n )\n\n should_terminate = improvement < error\n return should_terminate\n", "path": "optuna/terminator/terminator.py" }, { "content": "from typing import Optional\n\nfrom optuna._experimental import experimental_class\nfrom optuna.logging import get_logger\nfrom optuna.study.study import Study\nfrom optuna.terminator.terminator import BaseTerminator\nfrom optuna.terminator.terminator import Terminator\nfrom optuna.trial import FrozenTrial\n\n\n_logger = get_logger(__name__)\n\n\n@experimental_class(\"3.2.0\")\nclass TerminatorCallback:\n \"\"\"A callback that terminates the optimization using Terminator.\n\n This class implements a callback which wraps :class:`~optuna.terminator.Terminator`\n so that it can be used with the :func:`~optuna.study.Study.optimize` method.\n\n Args:\n terminator:\n A terminator object which determines whether to terminate the optimization by\n assessing the room for optimization and statistical error. Defaults to a\n :class:`~optuna.terminator.Terminator` object with default\n ``improvement_evaluator`` and ``error_evaluator``.\n\n Example:\n\n .. testcode::\n\n from sklearn.datasets import load_wine\n from sklearn.ensemble import RandomForestClassifier\n from sklearn.model_selection import cross_val_score\n from sklearn.model_selection import KFold\n\n import optuna\n from optuna.terminator import TerminatorCallback\n from optuna.terminator import report_cross_validation_scores\n\n\n def objective(trial):\n X, y = load_wine(return_X_y=True)\n\n clf = RandomForestClassifier(\n max_depth=trial.suggest_int(\"max_depth\", 2, 32),\n min_samples_split=trial.suggest_float(\"min_samples_split\", 0, 1),\n criterion=trial.suggest_categorical(\"criterion\", (\"gini\", \"entropy\")),\n )\n\n scores = cross_val_score(clf, X, y, cv=KFold(n_splits=5, shuffle=True))\n report_cross_validation_scores(trial, scores)\n return scores.mean()\n\n\n study = optuna.create_study(direction=\"maximize\")\n terminator = TerminatorCallback()\n study.optimize(objective, n_trials=50, callbacks=[terminator])\n\n .. seealso::\n Please refer to :class:`~optuna.terminator.Terminator` for the details of\n the terminator mechanism.\n \"\"\"\n\n def __init__(\n self,\n terminator: Optional[BaseTerminator] = None,\n ) -> None:\n self._terminator = terminator or Terminator()\n\n def __call__(self, study: Study, trial: FrozenTrial) -> None:\n should_terminate = self._terminator.should_terminate(study=study)\n\n if should_terminate:\n _logger.info(\"The study has been stopped by the terminator.\")\n study.stop()\n", "path": "optuna/terminator/callback.py" } ]
[ { "content": "from __future__ import annotations\n\nimport abc\nfrom typing import Optional\n\nfrom optuna._experimental import experimental_class\nfrom optuna.study.study import Study\nfrom optuna.terminator.erroreval import BaseErrorEvaluator\nfrom optuna.terminator.erroreval import CrossValidationErrorEvaluator\nfrom optuna.terminator.erroreval import StaticErrorEvaluator\nfrom optuna.terminator.improvement.evaluator import BaseImprovementEvaluator\nfrom optuna.terminator.improvement.evaluator import BestValueStagnationEvaluator\nfrom optuna.terminator.improvement.evaluator import DEFAULT_MIN_N_TRIALS\nfrom optuna.terminator.improvement.evaluator import RegretBoundEvaluator\nfrom optuna.trial import TrialState\n\n\nclass BaseTerminator(metaclass=abc.ABCMeta):\n \"\"\"Base class for terminators.\"\"\"\n\n @abc.abstractmethod\n def should_terminate(self, study: Study) -> bool:\n pass\n\n\n@experimental_class(\"3.2.0\")\nclass Terminator(BaseTerminator):\n \"\"\"Automatic stopping mechanism for Optuna studies.\n\n This class implements an automatic stopping mechanism for Optuna studies, aiming to prevent\n unnecessary computation. The study is terminated when the statistical error, e.g.\n cross-validation error, exceeds the room left for optimization.\n\n For further information about the algorithm, please refer to the following paper:\n\n - `A. Makarova et al. Automatic termination for hyperparameter optimization.\n <https://arxiv.org/abs/2104.08166>`_\n\n Args:\n improvement_evaluator:\n An evaluator object for assessing the room left for optimization. Defaults to a\n :class:`~optuna.terminator.improvement.evaluator.RegretBoundEvaluator` object.\n error_evaluator:\n An evaluator for calculating the statistical error, e.g. cross-validation error.\n Defaults to a :class:`~optuna.terminator.CrossValidationErrorEvaluator`\n object.\n min_n_trials:\n The minimum number of trials before termination is considered. Defaults to ``20``.\n\n Raises:\n ValueError: If ``min_n_trials`` is not a positive integer.\n\n Example:\n\n .. testcode::\n\n import logging\n import sys\n\n from sklearn.datasets import load_wine\n from sklearn.ensemble import RandomForestClassifier\n from sklearn.model_selection import cross_val_score\n from sklearn.model_selection import KFold\n\n import optuna\n from optuna.terminator import Terminator\n from optuna.terminator import report_cross_validation_scores\n\n\n study = optuna.create_study(direction=\"maximize\")\n terminator = Terminator()\n min_n_trials = 20\n\n while True:\n trial = study.ask()\n\n X, y = load_wine(return_X_y=True)\n\n clf = RandomForestClassifier(\n max_depth=trial.suggest_int(\"max_depth\", 2, 32),\n min_samples_split=trial.suggest_float(\"min_samples_split\", 0, 1),\n criterion=trial.suggest_categorical(\"criterion\", (\"gini\", \"entropy\")),\n )\n\n scores = cross_val_score(clf, X, y, cv=KFold(n_splits=5, shuffle=True))\n report_cross_validation_scores(trial, scores)\n\n value = scores.mean()\n logging.info(f\"Trial #{trial.number} finished with value {value}.\")\n study.tell(trial, value)\n\n if trial.number > min_n_trials and terminator.should_terminate(study):\n logging.info(\"Terminated by Optuna Terminator!\")\n break\n\n .. seealso::\n Please refer to :class:`~optuna.terminator.TerminatorCallback` for how to use\n the terminator mechanism with the :func:`~optuna.study.Study.optimize` method.\n\n \"\"\"\n\n def __init__(\n self,\n improvement_evaluator: Optional[BaseImprovementEvaluator] = None,\n error_evaluator: Optional[BaseErrorEvaluator] = None,\n min_n_trials: int = DEFAULT_MIN_N_TRIALS,\n ) -> None:\n if min_n_trials <= 0:\n raise ValueError(\"`min_n_trials` is expected to be a positive integer.\")\n\n self._improvement_evaluator = improvement_evaluator or RegretBoundEvaluator()\n self._error_evaluator = error_evaluator or self._initialize_error_evalutor()\n self._min_n_trials = min_n_trials\n\n def _initialize_error_evalutor(self) -> BaseErrorEvaluator:\n if isinstance(self._improvement_evaluator, BestValueStagnationEvaluator):\n return StaticErrorEvaluator(constant=0)\n return CrossValidationErrorEvaluator()\n\n def should_terminate(self, study: Study) -> bool:\n \"\"\"Judge whether the study should be terminated based on the reported values.\"\"\"\n trials = study.get_trials(states=[TrialState.COMPLETE])\n\n if len(trials) < self._min_n_trials:\n return False\n\n improvement = self._improvement_evaluator.evaluate(\n trials=study.trials,\n study_direction=study.direction,\n )\n\n error = self._error_evaluator.evaluate(\n trials=study.trials, study_direction=study.direction\n )\n\n should_terminate = improvement < error\n return should_terminate\n", "path": "optuna/terminator/terminator.py" }, { "content": "from __future__ import annotations\n\nfrom typing import Optional\n\nfrom optuna._experimental import experimental_class\nfrom optuna.logging import get_logger\nfrom optuna.study.study import Study\nfrom optuna.terminator.terminator import BaseTerminator\nfrom optuna.terminator.terminator import Terminator\nfrom optuna.trial import FrozenTrial\n\n\n_logger = get_logger(__name__)\n\n\n@experimental_class(\"3.2.0\")\nclass TerminatorCallback:\n \"\"\"A callback that terminates the optimization using Terminator.\n\n This class implements a callback which wraps :class:`~optuna.terminator.Terminator`\n so that it can be used with the :func:`~optuna.study.Study.optimize` method.\n\n Args:\n terminator:\n A terminator object which determines whether to terminate the optimization by\n assessing the room for optimization and statistical error. Defaults to a\n :class:`~optuna.terminator.Terminator` object with default\n ``improvement_evaluator`` and ``error_evaluator``.\n\n Example:\n\n .. testcode::\n\n from sklearn.datasets import load_wine\n from sklearn.ensemble import RandomForestClassifier\n from sklearn.model_selection import cross_val_score\n from sklearn.model_selection import KFold\n\n import optuna\n from optuna.terminator import TerminatorCallback\n from optuna.terminator import report_cross_validation_scores\n\n\n def objective(trial):\n X, y = load_wine(return_X_y=True)\n\n clf = RandomForestClassifier(\n max_depth=trial.suggest_int(\"max_depth\", 2, 32),\n min_samples_split=trial.suggest_float(\"min_samples_split\", 0, 1),\n criterion=trial.suggest_categorical(\"criterion\", (\"gini\", \"entropy\")),\n )\n\n scores = cross_val_score(clf, X, y, cv=KFold(n_splits=5, shuffle=True))\n report_cross_validation_scores(trial, scores)\n return scores.mean()\n\n\n study = optuna.create_study(direction=\"maximize\")\n terminator = TerminatorCallback()\n study.optimize(objective, n_trials=50, callbacks=[terminator])\n\n .. seealso::\n Please refer to :class:`~optuna.terminator.Terminator` for the details of\n the terminator mechanism.\n \"\"\"\n\n def __init__(\n self,\n terminator: Optional[BaseTerminator] = None,\n ) -> None:\n self._terminator = terminator or Terminator()\n\n def __call__(self, study: Study, trial: FrozenTrial) -> None:\n should_terminate = self._terminator.should_terminate(study=study)\n\n if should_terminate:\n _logger.info(\"The study has been stopped by the terminator.\")\n study.stop()\n", "path": "optuna/terminator/callback.py" } ]
diff --git a/optuna/terminator/callback.py b/optuna/terminator/callback.py index 74c484d0e8..dc75f23aa4 100644 --- a/optuna/terminator/callback.py +++ b/optuna/terminator/callback.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from typing import Optional from optuna._experimental import experimental_class diff --git a/optuna/terminator/terminator.py b/optuna/terminator/terminator.py index 4970fd2661..7536079f0d 100644 --- a/optuna/terminator/terminator.py +++ b/optuna/terminator/terminator.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import abc from typing import Optional
piskvorky__gensim-2154
ZeroDivisionError: float division by zero Getting error : ZeroDivisionError: float division by zero https://github.com/RaRe-Technologies/gensim/blob/9481915915bf61aa6e4e719a2f26d509677e6779/gensim/summarization/pagerank_weighted.py#L53 ![error](https://user-images.githubusercontent.com/11848354/40821543-3283e832-6585-11e8-9a66-3a4d8ba95eeb.JPG) ZeroDivisionError: float division by zero Getting error : ZeroDivisionError: float division by zero https://github.com/RaRe-Technologies/gensim/blob/9481915915bf61aa6e4e719a2f26d509677e6779/gensim/summarization/pagerank_weighted.py#L53 ![error](https://user-images.githubusercontent.com/11848354/40821543-3283e832-6585-11e8-9a66-3a4d8ba95eeb.JPG)
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"This module contains functions to find keywords of the text and building graph on tokens from text.\n\nExamples\n--------\nExtract keywords from text\n\n>>> from gensim.summarization import keywords\n>>> text='''Challenges in natural language processing frequently involve\n... speech recognition, natural language understanding, natural language\n... generation (frequently from formal, machine-readable logical forms),\n... connecting language and machine perception, dialog systems, or some\n... combination thereof.'''\n>>> keywords(text).split('\\\\n')\n[u'natural language', u'machine', u'frequently']\n\n\nNotes\n-----\nCheck tags in http://www.clips.ua.ac.be/pages/mbsp-tags and use only first two letters\nfor `INCLUDING_FILTER` and `EXCLUDING_FILTER`\n\nData:\n-----\n.. data:: WINDOW_SIZE - Size of window, number of consecutive tokens in processing.\n.. data:: INCLUDING_FILTER - Including part of speech filters.\n.. data:: EXCLUDING_FILTER - Excluding part of speech filters.\n\n\"\"\"\n\nfrom gensim.summarization.pagerank_weighted import pagerank_weighted as _pagerank\nfrom gensim.summarization.textcleaner import clean_text_by_word as _clean_text_by_word\nfrom gensim.summarization.textcleaner import tokenize_by_word as _tokenize_by_word\nfrom gensim.summarization.commons import build_graph as _build_graph\nfrom gensim.summarization.commons import remove_unreachable_nodes as _remove_unreachable_nodes\nfrom gensim.utils import to_unicode\nfrom itertools import combinations as _combinations\nfrom six.moves.queue import Queue as _Queue\nfrom six.moves import xrange\nfrom six import iteritems\n\n\nWINDOW_SIZE = 2\n\nINCLUDING_FILTER = ['NN', 'JJ']\nEXCLUDING_FILTER = []\n\n\ndef _get_pos_filters():\n \"\"\"Get default including and excluding filters as frozen sets.\n\n Returns\n -------\n (frozenset of str, frozenset of str)\n Including and excluding filters.\n\n \"\"\"\n return frozenset(INCLUDING_FILTER), frozenset(EXCLUDING_FILTER)\n\n\ndef _get_words_for_graph(tokens, pos_filter=None):\n \"\"\"Filters given dictionary of tokens using provided part of speech filters.\n\n Parameters\n ----------\n tokens : dict\n Original units (words) as keys and processed units (tokens) as values.\n pos_filter : iterable\n Part of speech filters, optional. If `None` - using :func:`_get_pos_filters`.\n\n Returns\n -------\n list of str\n Filtered tokens.\n\n Raises\n ------\n ValueError\n If include and exclude filters ar not empty at the same time.\n\n \"\"\"\n if pos_filter is None:\n include_filters, exclude_filters = _get_pos_filters()\n else:\n include_filters = set(pos_filter)\n exclude_filters = frozenset([])\n if include_filters and exclude_filters:\n raise ValueError(\"Can't use both include and exclude filters, should use only one\")\n\n result = []\n for word, unit in iteritems(tokens):\n if exclude_filters and unit.tag in exclude_filters:\n continue\n if (include_filters and unit.tag in include_filters) or not include_filters or not unit.tag:\n result.append(unit.token)\n return result\n\n\ndef _get_first_window(split_text):\n \"\"\"Get first :const:`~gensim.parsing.keywords.WINDOW_SIZE` tokens from given `split_text`.\n\n Parameters\n ----------\n split_text : list of str\n Splitted text.\n\n Returns\n -------\n list of str\n First :const:`~gensim.parsing.keywords.WINDOW_SIZE` tokens.\n\n \"\"\"\n return split_text[:WINDOW_SIZE]\n\n\ndef _set_graph_edge(graph, tokens, word_a, word_b):\n \"\"\"Sets an edge between nodes named word_a and word_b if they exists in `tokens` and `graph`, inplace.\n\n Parameters\n ----------\n graph : :class:~gensim.summarization.graph.Graph\n Given graph.\n tokens : dict\n Original units (words) as keys and processed units (tokens) as values.\n word_a : str\n First word, name of first node.\n word_b : str\n Second word, name of second node.\n\n \"\"\"\n if word_a in tokens and word_b in tokens:\n lemma_a = tokens[word_a].token\n lemma_b = tokens[word_b].token\n edge = (lemma_a, lemma_b)\n\n if graph.has_node(lemma_a) and graph.has_node(lemma_b) and not graph.has_edge(edge):\n graph.add_edge(edge)\n\n\ndef _process_first_window(graph, tokens, split_text):\n \"\"\"Sets an edges between nodes taken from first :const:`~gensim.parsing.keywords.WINDOW_SIZE`\n words of `split_text` if they exist in `tokens` and `graph`, inplace.\n\n Parameters\n ----------\n graph : :class:~gensim.summarization.graph.Graph\n Given graph.\n tokens : dict\n Original units (words) as keys and processed units (tokens) as values.\n split_text : list of str\n Splitted text.\n\n \"\"\"\n first_window = _get_first_window(split_text)\n for word_a, word_b in _combinations(first_window, 2):\n _set_graph_edge(graph, tokens, word_a, word_b)\n\n\ndef _init_queue(split_text):\n \"\"\"Initialize queue by first words from `split_text`.\n\n Parameters\n ----------\n split_text : list of str\n Splitted text.\n\n Returns\n -------\n Queue\n Initialized queue.\n\n \"\"\"\n queue = _Queue()\n first_window = _get_first_window(split_text)\n for word in first_window[1:]:\n queue.put(word)\n return queue\n\n\ndef _process_word(graph, tokens, queue, word):\n \"\"\"Sets edge between `word` and each element in queue in `graph` if such nodes\n exist in `tokens` and `graph`.\n\n Parameters\n ----------\n graph : :class:`~gensim.summarization.graph.Graph`\n Given graph.\n tokens : dict\n Original units (words) as keys and processed units (tokens) as values.\n queue : Queue\n Given queue.\n word : str\n Word, possible `node` in graph and item in `tokens`.\n\n \"\"\"\n for word_to_compare in _queue_iterator(queue):\n _set_graph_edge(graph, tokens, word, word_to_compare)\n\n\ndef _update_queue(queue, word):\n \"\"\"Updates given `queue` (removes last item and puts `word`).\n\n Parameters\n ----------\n queue : Queue\n Given queue.\n word : str\n Word to be added to queue.\n\n \"\"\"\n queue.get()\n queue.put(word)\n assert queue.qsize() == (WINDOW_SIZE - 1)\n\n\ndef _process_text(graph, tokens, split_text):\n \"\"\"Process `split_text` by updating given `graph` with new eges between nodes\n if they exists in `tokens` and `graph`.\n Words are taken from `split_text` with window size :const:`~gensim.parsing.keywords.WINDOW_SIZE`.\n\n Parameters\n ----------\n graph : :class:`~gensim.summarization.graph.Graph`\n Given graph.\n tokens : dict\n Original units (words) as keys and processed units (tokens) as values.\n split_text : list of str\n Splitted text.\n\n \"\"\"\n queue = _init_queue(split_text)\n for i in xrange(WINDOW_SIZE, len(split_text)):\n word = split_text[i]\n _process_word(graph, tokens, queue, word)\n _update_queue(queue, word)\n\n\ndef _queue_iterator(queue):\n \"\"\"Represents iterator of the given queue.\n\n Parameters\n ----------\n queue : Queue\n Given queue.\n\n Yields\n ------\n str\n Current item of queue.\n\n \"\"\"\n iterations = queue.qsize()\n for _ in xrange(iterations):\n var = queue.get()\n yield var\n queue.put(var)\n\n\ndef _set_graph_edges(graph, tokens, split_text):\n \"\"\"Updates given `graph` by setting eges between nodes if they exists in `tokens` and `graph`.\n Words are taken from `split_text` with window size :const:`~gensim.parsing.keywords.WINDOW_SIZE`.\n\n Parameters\n ----------\n graph : :class:~gensim.summarization.graph.Graph\n Given graph.\n tokens : dict\n Original units (words) as keys and processed units (tokens) as values.\n split_text : list of str\n Splitted text.\n\n \"\"\"\n _process_first_window(graph, tokens, split_text)\n _process_text(graph, tokens, split_text)\n\n\ndef _extract_tokens(lemmas, scores, ratio, words):\n \"\"\"Extracts tokens from provided lemmas. Most scored lemmas are used if `words` not provided.\n\n Parameters\n ----------\n lemmas : list of str\n Given lemmas.\n scores : dict\n Dictionary with lemmas and its scores.\n ratio : float\n Proportion of lemmas used for final result.\n words : int\n Number of used words. If no \"words\" option is selected, the number of\n sentences is reduced by the provided ratio, else, the ratio is ignored.\n\n Returns\n -------\n list of (float, str)\n Scores and corresponded lemmas.\n\n \"\"\"\n lemmas.sort(key=lambda s: scores[s], reverse=True)\n length = len(lemmas) * ratio if words is None else words\n return [(scores[lemmas[i]], lemmas[i],) for i in range(int(length))]\n\n\ndef _lemmas_to_words(tokens):\n \"\"\"Get words and lemmas from given tokens. Produces \"reversed\" `tokens`.\n\n Parameters\n ----------\n tokens : dict\n Original units (words) as keys and processed units (tokens) as values.\n\n Returns\n -------\n dict\n Lemmas as keys and lists corresponding words as values.\n\n \"\"\"\n lemma_to_word = {}\n for word, unit in iteritems(tokens):\n lemma = unit.token\n if lemma in lemma_to_word:\n lemma_to_word[lemma].append(word)\n else:\n lemma_to_word[lemma] = [word]\n return lemma_to_word\n\n\ndef _get_keywords_with_score(extracted_lemmas, lemma_to_word):\n \"\"\"Get words of `extracted_lemmas` and its scores, words contains in `lemma_to_word`.\n\n Parameters\n ----------\n extracted_lemmas : list of (float, str)\n Given lemmas with scores\n lemma_to_word : dict\n Lemmas and corresponding words.\n\n Returns\n -------\n dict\n Keywords as keys and its scores as values.\n\n \"\"\"\n\n keywords = {}\n for score, lemma in extracted_lemmas:\n keyword_list = lemma_to_word[lemma]\n for keyword in keyword_list:\n keywords[keyword] = score\n return keywords\n\n\ndef _strip_word(word):\n \"\"\"Get cleaned `word`.\n\n Parameters\n ----------\n word : str\n Given word.\n\n Returns\n -------\n str\n Cleaned word.\n \"\"\"\n stripped_word_list = list(_tokenize_by_word(word))\n return stripped_word_list[0] if stripped_word_list else \"\"\n\n\ndef _get_combined_keywords(_keywords, split_text):\n \"\"\"Get most scored words (`_keywords`) contained in `split_text` and it's combinations.\n\n Parameters\n ----------\n _keywords : dict\n Keywords as keys and its scores as values.\n split_text : list of str\n Splitted text.\n\n Returns\n -------\n list of str\n Keywords and/or its combinations.\n\n \"\"\"\n result = []\n _keywords = _keywords.copy()\n len_text = len(split_text)\n for i in xrange(len_text):\n word = _strip_word(split_text[i])\n if word in _keywords:\n combined_word = [word]\n if i + 1 == len_text:\n result.append(word) # appends last word if keyword and doesn't iterate\n for j in xrange(i + 1, len_text):\n other_word = _strip_word(split_text[j])\n if other_word in _keywords and other_word == split_text[j] and other_word not in combined_word:\n combined_word.append(other_word)\n else:\n for keyword in combined_word:\n _keywords.pop(keyword)\n result.append(\" \".join(combined_word))\n break\n return result\n\n\ndef _get_average_score(concept, _keywords):\n \"\"\"Get average score of words in `concept`.\n\n Parameters\n ----------\n concept : str\n Input text.\n _keywords : dict\n Keywords as keys and its scores as values.\n\n Returns\n -------\n float\n Average score.\n\n \"\"\"\n word_list = concept.split()\n word_counter = 0\n total = 0\n for word in word_list:\n total += _keywords[word]\n word_counter += 1\n return total / word_counter\n\n\ndef _format_results(_keywords, combined_keywords, split, scores):\n \"\"\"Formats, sorts and returns `combined_keywords` in desired format.\n\n Parameters\n ----------\n _keywords : dict\n Keywords as keys and its scores as values.\n combined_keywords : list of str\n Most ranked words and/or its combinations.\n split : bool\n Split result if True or return string otherwise, optional.\n scores : bool\n Whether return `combined_keywords` with scores, optional. If True\n `split` is ignored.\n\n Returns\n -------\n result: list of (str, float)\n If `scores`, keywords with scores **OR**\n result: list of str\n If `split`, keywords only **OR**\n result: str\n Keywords, joined by endl.\n\n \"\"\"\n combined_keywords.sort(key=lambda w: _get_average_score(w, _keywords), reverse=True)\n if scores:\n return [(word, _get_average_score(word, _keywords)) for word in combined_keywords]\n if split:\n return combined_keywords\n return \"\\n\".join(combined_keywords)\n\n\ndef keywords(text, ratio=0.2, words=None, split=False, scores=False, pos_filter=('NN', 'JJ'),\n lemmatize=False, deacc=True):\n \"\"\"Get most ranked words of provided text and/or its combinations.\n\n Parameters\n ----------\n\n text : str\n Input text.\n ratio : float, optional\n If no \"words\" option is selected, the number of sentences is reduced by the provided ratio,\n else, the ratio is ignored.\n words : int, optional\n Number of returned words.\n split : bool, optional\n Whether split keywords if True.\n scores : bool, optional\n Whether score of keyword.\n pos_filter : tuple, optional\n Part of speech filters.\n lemmatize : bool, optional\n If True - lemmatize words.\n deacc : bool, optional\n If True - remove accentuation.\n\n Returns\n -------\n result: list of (str, float)\n If `scores`, keywords with scores **OR**\n result: list of str\n If `split`, keywords only **OR**\n result: str\n Keywords, joined by endl.\n\n \"\"\"\n # Gets a dict of word -> lemma\n text = to_unicode(text)\n tokens = _clean_text_by_word(text, deacc=deacc)\n split_text = list(_tokenize_by_word(text))\n\n # Creates the graph and adds the edges\n graph = _build_graph(_get_words_for_graph(tokens, pos_filter))\n _set_graph_edges(graph, tokens, split_text)\n del split_text # It's no longer used\n\n _remove_unreachable_nodes(graph)\n\n # Ranks the tokens using the PageRank algorithm. Returns dict of lemma -> score\n pagerank_scores = _pagerank(graph)\n\n extracted_lemmas = _extract_tokens(graph.nodes(), pagerank_scores, ratio, words)\n\n # The results can be polluted by many variations of the same word\n if lemmatize:\n lemmas_to_word = {}\n for word, unit in iteritems(tokens):\n lemmas_to_word[unit.token] = [word]\n else:\n lemmas_to_word = _lemmas_to_words(tokens)\n\n keywords = _get_keywords_with_score(extracted_lemmas, lemmas_to_word)\n\n # text.split() to keep numbers and punctuation marks, so separeted concepts are not combined\n combined_keywords = _get_combined_keywords(keywords, text.split())\n\n return _format_results(keywords, combined_keywords, split, scores)\n\n\ndef get_graph(text):\n \"\"\"Creates and returns graph from given text, cleans and tokenize text before building graph.\n\n Parameters\n ----------\n text : str\n Sequence of values.\n\n Returns\n -------\n :class:`~gensim.summarization.graph.Graph`\n Created graph.\n\n \"\"\"\n tokens = _clean_text_by_word(text)\n split_text = list(_tokenize_by_word(text))\n\n graph = _build_graph(_get_words_for_graph(tokens))\n _set_graph_edges(graph, tokens, split_text)\n\n return graph\n", "path": "gensim/summarization/keywords.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"This module contains functions to find keywords of the text and building graph on tokens from text.\n\nExamples\n--------\nExtract keywords from text\n\n>>> from gensim.summarization import keywords\n>>> text='''Challenges in natural language processing frequently involve\n... speech recognition, natural language understanding, natural language\n... generation (frequently from formal, machine-readable logical forms),\n... connecting language and machine perception, dialog systems, or some\n... combination thereof.'''\n>>> keywords(text).split('\\\\n')\n[u'natural language', u'machine', u'frequently']\n\n\nNotes\n-----\nCheck tags in http://www.clips.ua.ac.be/pages/mbsp-tags and use only first two letters\nfor `INCLUDING_FILTER` and `EXCLUDING_FILTER`\n\nData:\n-----\n.. data:: WINDOW_SIZE - Size of window, number of consecutive tokens in processing.\n.. data:: INCLUDING_FILTER - Including part of speech filters.\n.. data:: EXCLUDING_FILTER - Excluding part of speech filters.\n\n\"\"\"\n\nfrom gensim.summarization.pagerank_weighted import pagerank_weighted as _pagerank\nfrom gensim.summarization.textcleaner import clean_text_by_word as _clean_text_by_word\nfrom gensim.summarization.textcleaner import tokenize_by_word as _tokenize_by_word\nfrom gensim.summarization.commons import build_graph as _build_graph\nfrom gensim.summarization.commons import remove_unreachable_nodes as _remove_unreachable_nodes\nfrom gensim.utils import to_unicode\nfrom itertools import combinations as _combinations\nfrom six.moves.queue import Queue as _Queue\nfrom six.moves import xrange\nfrom six import iteritems\n\n\nWINDOW_SIZE = 2\n\nINCLUDING_FILTER = ['NN', 'JJ']\nEXCLUDING_FILTER = []\n\n\ndef _get_pos_filters():\n \"\"\"Get default including and excluding filters as frozen sets.\n\n Returns\n -------\n (frozenset of str, frozenset of str)\n Including and excluding filters.\n\n \"\"\"\n return frozenset(INCLUDING_FILTER), frozenset(EXCLUDING_FILTER)\n\n\ndef _get_words_for_graph(tokens, pos_filter=None):\n \"\"\"Filters given dictionary of tokens using provided part of speech filters.\n\n Parameters\n ----------\n tokens : dict\n Original units (words) as keys and processed units (tokens) as values.\n pos_filter : iterable\n Part of speech filters, optional. If `None` - using :func:`_get_pos_filters`.\n\n Returns\n -------\n list of str\n Filtered tokens.\n\n Raises\n ------\n ValueError\n If include and exclude filters ar not empty at the same time.\n\n \"\"\"\n if pos_filter is None:\n include_filters, exclude_filters = _get_pos_filters()\n else:\n include_filters = set(pos_filter)\n exclude_filters = frozenset([])\n if include_filters and exclude_filters:\n raise ValueError(\"Can't use both include and exclude filters, should use only one\")\n\n result = []\n for word, unit in iteritems(tokens):\n if exclude_filters and unit.tag in exclude_filters:\n continue\n if (include_filters and unit.tag in include_filters) or not include_filters or not unit.tag:\n result.append(unit.token)\n return result\n\n\ndef _get_first_window(split_text):\n \"\"\"Get first :const:`~gensim.parsing.keywords.WINDOW_SIZE` tokens from given `split_text`.\n\n Parameters\n ----------\n split_text : list of str\n Splitted text.\n\n Returns\n -------\n list of str\n First :const:`~gensim.parsing.keywords.WINDOW_SIZE` tokens.\n\n \"\"\"\n return split_text[:WINDOW_SIZE]\n\n\ndef _set_graph_edge(graph, tokens, word_a, word_b):\n \"\"\"Sets an edge between nodes named word_a and word_b if they exists in `tokens` and `graph`, inplace.\n\n Parameters\n ----------\n graph : :class:~gensim.summarization.graph.Graph\n Given graph.\n tokens : dict\n Original units (words) as keys and processed units (tokens) as values.\n word_a : str\n First word, name of first node.\n word_b : str\n Second word, name of second node.\n\n \"\"\"\n if word_a in tokens and word_b in tokens:\n lemma_a = tokens[word_a].token\n lemma_b = tokens[word_b].token\n edge = (lemma_a, lemma_b)\n\n if graph.has_node(lemma_a) and graph.has_node(lemma_b) and not graph.has_edge(edge):\n graph.add_edge(edge)\n\n\ndef _process_first_window(graph, tokens, split_text):\n \"\"\"Sets an edges between nodes taken from first :const:`~gensim.parsing.keywords.WINDOW_SIZE`\n words of `split_text` if they exist in `tokens` and `graph`, inplace.\n\n Parameters\n ----------\n graph : :class:~gensim.summarization.graph.Graph\n Given graph.\n tokens : dict\n Original units (words) as keys and processed units (tokens) as values.\n split_text : list of str\n Splitted text.\n\n \"\"\"\n first_window = _get_first_window(split_text)\n for word_a, word_b in _combinations(first_window, 2):\n _set_graph_edge(graph, tokens, word_a, word_b)\n\n\ndef _init_queue(split_text):\n \"\"\"Initialize queue by first words from `split_text`.\n\n Parameters\n ----------\n split_text : list of str\n Splitted text.\n\n Returns\n -------\n Queue\n Initialized queue.\n\n \"\"\"\n queue = _Queue()\n first_window = _get_first_window(split_text)\n for word in first_window[1:]:\n queue.put(word)\n return queue\n\n\ndef _process_word(graph, tokens, queue, word):\n \"\"\"Sets edge between `word` and each element in queue in `graph` if such nodes\n exist in `tokens` and `graph`.\n\n Parameters\n ----------\n graph : :class:`~gensim.summarization.graph.Graph`\n Given graph.\n tokens : dict\n Original units (words) as keys and processed units (tokens) as values.\n queue : Queue\n Given queue.\n word : str\n Word, possible `node` in graph and item in `tokens`.\n\n \"\"\"\n for word_to_compare in _queue_iterator(queue):\n _set_graph_edge(graph, tokens, word, word_to_compare)\n\n\ndef _update_queue(queue, word):\n \"\"\"Updates given `queue` (removes last item and puts `word`).\n\n Parameters\n ----------\n queue : Queue\n Given queue.\n word : str\n Word to be added to queue.\n\n \"\"\"\n queue.get()\n queue.put(word)\n assert queue.qsize() == (WINDOW_SIZE - 1)\n\n\ndef _process_text(graph, tokens, split_text):\n \"\"\"Process `split_text` by updating given `graph` with new eges between nodes\n if they exists in `tokens` and `graph`.\n Words are taken from `split_text` with window size :const:`~gensim.parsing.keywords.WINDOW_SIZE`.\n\n Parameters\n ----------\n graph : :class:`~gensim.summarization.graph.Graph`\n Given graph.\n tokens : dict\n Original units (words) as keys and processed units (tokens) as values.\n split_text : list of str\n Splitted text.\n\n \"\"\"\n queue = _init_queue(split_text)\n for i in xrange(WINDOW_SIZE, len(split_text)):\n word = split_text[i]\n _process_word(graph, tokens, queue, word)\n _update_queue(queue, word)\n\n\ndef _queue_iterator(queue):\n \"\"\"Represents iterator of the given queue.\n\n Parameters\n ----------\n queue : Queue\n Given queue.\n\n Yields\n ------\n str\n Current item of queue.\n\n \"\"\"\n iterations = queue.qsize()\n for _ in xrange(iterations):\n var = queue.get()\n yield var\n queue.put(var)\n\n\ndef _set_graph_edges(graph, tokens, split_text):\n \"\"\"Updates given `graph` by setting eges between nodes if they exists in `tokens` and `graph`.\n Words are taken from `split_text` with window size :const:`~gensim.parsing.keywords.WINDOW_SIZE`.\n\n Parameters\n ----------\n graph : :class:~gensim.summarization.graph.Graph\n Given graph.\n tokens : dict\n Original units (words) as keys and processed units (tokens) as values.\n split_text : list of str\n Splitted text.\n\n \"\"\"\n _process_first_window(graph, tokens, split_text)\n _process_text(graph, tokens, split_text)\n\n\ndef _extract_tokens(lemmas, scores, ratio, words):\n \"\"\"Extracts tokens from provided lemmas. Most scored lemmas are used if `words` not provided.\n\n Parameters\n ----------\n lemmas : list of str\n Given lemmas.\n scores : dict\n Dictionary with lemmas and its scores.\n ratio : float\n Proportion of lemmas used for final result.\n words : int\n Number of used words. If no \"words\" option is selected, the number of\n sentences is reduced by the provided ratio, else, the ratio is ignored.\n\n Returns\n -------\n list of (float, str)\n Scores and corresponded lemmas.\n\n \"\"\"\n lemmas.sort(key=lambda s: scores[s], reverse=True)\n length = len(lemmas) * ratio if words is None else words\n return [(scores[lemmas[i]], lemmas[i],) for i in range(int(length))]\n\n\ndef _lemmas_to_words(tokens):\n \"\"\"Get words and lemmas from given tokens. Produces \"reversed\" `tokens`.\n\n Parameters\n ----------\n tokens : dict\n Original units (words) as keys and processed units (tokens) as values.\n\n Returns\n -------\n dict\n Lemmas as keys and lists corresponding words as values.\n\n \"\"\"\n lemma_to_word = {}\n for word, unit in iteritems(tokens):\n lemma = unit.token\n if lemma in lemma_to_word:\n lemma_to_word[lemma].append(word)\n else:\n lemma_to_word[lemma] = [word]\n return lemma_to_word\n\n\ndef _get_keywords_with_score(extracted_lemmas, lemma_to_word):\n \"\"\"Get words of `extracted_lemmas` and its scores, words contains in `lemma_to_word`.\n\n Parameters\n ----------\n extracted_lemmas : list of (float, str)\n Given lemmas with scores\n lemma_to_word : dict\n Lemmas and corresponding words.\n\n Returns\n -------\n dict\n Keywords as keys and its scores as values.\n\n \"\"\"\n\n keywords = {}\n for score, lemma in extracted_lemmas:\n keyword_list = lemma_to_word[lemma]\n for keyword in keyword_list:\n keywords[keyword] = score\n return keywords\n\n\ndef _strip_word(word):\n \"\"\"Get cleaned `word`.\n\n Parameters\n ----------\n word : str\n Given word.\n\n Returns\n -------\n str\n Cleaned word.\n \"\"\"\n stripped_word_list = list(_tokenize_by_word(word))\n return stripped_word_list[0] if stripped_word_list else \"\"\n\n\ndef _get_combined_keywords(_keywords, split_text):\n \"\"\"Get most scored words (`_keywords`) contained in `split_text` and it's combinations.\n\n Parameters\n ----------\n _keywords : dict\n Keywords as keys and its scores as values.\n split_text : list of str\n Splitted text.\n\n Returns\n -------\n list of str\n Keywords and/or its combinations.\n\n \"\"\"\n result = []\n _keywords = _keywords.copy()\n len_text = len(split_text)\n for i in xrange(len_text):\n word = _strip_word(split_text[i])\n if word in _keywords:\n combined_word = [word]\n if i + 1 == len_text:\n result.append(word) # appends last word if keyword and doesn't iterate\n for j in xrange(i + 1, len_text):\n other_word = _strip_word(split_text[j])\n if other_word in _keywords and other_word == split_text[j] and other_word not in combined_word:\n combined_word.append(other_word)\n else:\n for keyword in combined_word:\n _keywords.pop(keyword)\n result.append(\" \".join(combined_word))\n break\n return result\n\n\ndef _get_average_score(concept, _keywords):\n \"\"\"Get average score of words in `concept`.\n\n Parameters\n ----------\n concept : str\n Input text.\n _keywords : dict\n Keywords as keys and its scores as values.\n\n Returns\n -------\n float\n Average score.\n\n \"\"\"\n word_list = concept.split()\n word_counter = 0\n total = 0\n for word in word_list:\n total += _keywords[word]\n word_counter += 1\n return total / word_counter\n\n\ndef _format_results(_keywords, combined_keywords, split, scores):\n \"\"\"Formats, sorts and returns `combined_keywords` in desired format.\n\n Parameters\n ----------\n _keywords : dict\n Keywords as keys and its scores as values.\n combined_keywords : list of str\n Most ranked words and/or its combinations.\n split : bool\n Split result if True or return string otherwise, optional.\n scores : bool\n Whether return `combined_keywords` with scores, optional. If True\n `split` is ignored.\n\n Returns\n -------\n result: list of (str, float)\n If `scores`, keywords with scores **OR**\n result: list of str\n If `split`, keywords only **OR**\n result: str\n Keywords, joined by endl.\n\n \"\"\"\n combined_keywords.sort(key=lambda w: _get_average_score(w, _keywords), reverse=True)\n if scores:\n return [(word, _get_average_score(word, _keywords)) for word in combined_keywords]\n if split:\n return combined_keywords\n return \"\\n\".join(combined_keywords)\n\n\ndef keywords(text, ratio=0.2, words=None, split=False, scores=False, pos_filter=('NN', 'JJ'),\n lemmatize=False, deacc=True):\n \"\"\"Get most ranked words of provided text and/or its combinations.\n\n Parameters\n ----------\n\n text : str\n Input text.\n ratio : float, optional\n If no \"words\" option is selected, the number of sentences is reduced by the provided ratio,\n else, the ratio is ignored.\n words : int, optional\n Number of returned words.\n split : bool, optional\n Whether split keywords if True.\n scores : bool, optional\n Whether score of keyword.\n pos_filter : tuple, optional\n Part of speech filters.\n lemmatize : bool, optional\n If True - lemmatize words.\n deacc : bool, optional\n If True - remove accentuation.\n\n Returns\n -------\n result: list of (str, float)\n If `scores`, keywords with scores **OR**\n result: list of str\n If `split`, keywords only **OR**\n result: str\n Keywords, joined by endl.\n\n \"\"\"\n # Gets a dict of word -> lemma\n text = to_unicode(text)\n tokens = _clean_text_by_word(text, deacc=deacc)\n split_text = list(_tokenize_by_word(text))\n\n # Creates the graph and adds the edges\n graph = _build_graph(_get_words_for_graph(tokens, pos_filter))\n _set_graph_edges(graph, tokens, split_text)\n del split_text # It's no longer used\n\n _remove_unreachable_nodes(graph)\n\n if not graph.edges():\n return _format_results([], [], split, scores)\n\n # Ranks the tokens using the PageRank algorithm. Returns dict of lemma -> score\n pagerank_scores = _pagerank(graph)\n\n extracted_lemmas = _extract_tokens(graph.nodes(), pagerank_scores, ratio, words)\n\n # The results can be polluted by many variations of the same word\n if lemmatize:\n lemmas_to_word = {}\n for word, unit in iteritems(tokens):\n lemmas_to_word[unit.token] = [word]\n else:\n lemmas_to_word = _lemmas_to_words(tokens)\n\n keywords = _get_keywords_with_score(extracted_lemmas, lemmas_to_word)\n\n # text.split() to keep numbers and punctuation marks, so separeted concepts are not combined\n combined_keywords = _get_combined_keywords(keywords, text.split())\n\n return _format_results(keywords, combined_keywords, split, scores)\n\n\ndef get_graph(text):\n \"\"\"Creates and returns graph from given text, cleans and tokenize text before building graph.\n\n Parameters\n ----------\n text : str\n Sequence of values.\n\n Returns\n -------\n :class:`~gensim.summarization.graph.Graph`\n Created graph.\n\n \"\"\"\n tokens = _clean_text_by_word(text)\n split_text = list(_tokenize_by_word(text))\n\n graph = _build_graph(_get_words_for_graph(tokens))\n _set_graph_edges(graph, tokens, split_text)\n\n return graph\n", "path": "gensim/summarization/keywords.py" } ]
diff --git a/gensim/summarization/keywords.py b/gensim/summarization/keywords.py index 4074088a04..9f43158146 100644 --- a/gensim/summarization/keywords.py +++ b/gensim/summarization/keywords.py @@ -512,6 +512,9 @@ def keywords(text, ratio=0.2, words=None, split=False, scores=False, pos_filter= _remove_unreachable_nodes(graph) + if not graph.edges(): + return _format_results([], [], split, scores) + # Ranks the tokens using the PageRank algorithm. Returns dict of lemma -> score pagerank_scores = _pagerank(graph) diff --git a/gensim/test/test_keywords.py b/gensim/test/test_keywords.py index c8fae400da..79df82fba6 100644 --- a/gensim/test/test_keywords.py +++ b/gensim/test/test_keywords.py @@ -95,6 +95,12 @@ def test_text_keywords_with_small_graph(self): kwds = keywords(text, words=1, split=True) self.assertTrue(len(kwds)) + def test_text_keywords_without_graph_edges(self): + # regression test, we get graph with no edges on this text + text = 'Sitio construcción. Estaremos línea.' + kwds = keywords(text, deacc=False, scores=True) + self.assertFalse(len(kwds)) + if __name__ == '__main__': logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
vispy__vispy-2059
Transforms broken in SceneCanvas with glfw backend Hello. I'm using vispy 0.5.1 on Debian 8 with a PyQt5 backend. When I run my app, the SceneCanvas is stuck with an identity transform (which looks broken, obviously) until I resize the window or use a mouse key in the window. This also happens with the example https://github.com/vispy/vispy/blob/master/examples/demo/scene/oscilloscope.py when not using fullscreen. My workaround was to call SceneCanvas' _update_transforms() right after creating the canvas.
[ { "content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\"\"\"vispy backend for glfw.\"\"\"\n\n# To install GLFW on Ubuntu, use sudo apt-get install libglfw3.\n# On OSX, consider using brew.\n\nfrom __future__ import division\n\nimport atexit\nfrom time import sleep\nimport gc\nimport os\n\nfrom ..base import (BaseApplicationBackend, BaseCanvasBackend,\n BaseTimerBackend)\nfrom ...util import keys, logger\nfrom ...util.ptime import time\nfrom ... import config\n\nUSE_EGL = config['gl_backend'].lower().startswith('es')\n\n\n# -------------------------------------------------------------------- init ---\n\ntry:\n from ...ext import glfw\n\n # Map native keys to vispy keys\n KEYMAP = {\n glfw.GLFW_KEY_LEFT_SHIFT: keys.SHIFT,\n glfw.GLFW_KEY_RIGHT_SHIFT: keys.SHIFT,\n glfw.GLFW_KEY_LEFT_CONTROL: keys.CONTROL,\n glfw.GLFW_KEY_RIGHT_CONTROL: keys.CONTROL,\n glfw.GLFW_KEY_LEFT_ALT: keys.ALT,\n glfw.GLFW_KEY_RIGHT_ALT: keys.ALT,\n glfw.GLFW_KEY_LEFT_SUPER: keys.META,\n glfw.GLFW_KEY_RIGHT_SUPER: keys.META,\n\n glfw.GLFW_KEY_LEFT: keys.LEFT,\n glfw.GLFW_KEY_UP: keys.UP,\n glfw.GLFW_KEY_RIGHT: keys.RIGHT,\n glfw.GLFW_KEY_DOWN: keys.DOWN,\n glfw.GLFW_KEY_PAGE_UP: keys.PAGEUP,\n glfw.GLFW_KEY_PAGE_DOWN: keys.PAGEDOWN,\n\n glfw.GLFW_KEY_INSERT: keys.INSERT,\n glfw.GLFW_KEY_DELETE: keys.DELETE,\n glfw.GLFW_KEY_HOME: keys.HOME,\n glfw.GLFW_KEY_END: keys.END,\n\n glfw.GLFW_KEY_ESCAPE: keys.ESCAPE,\n glfw.GLFW_KEY_BACKSPACE: keys.BACKSPACE,\n\n glfw.GLFW_KEY_F1: keys.F1,\n glfw.GLFW_KEY_F2: keys.F2,\n glfw.GLFW_KEY_F3: keys.F3,\n glfw.GLFW_KEY_F4: keys.F4,\n glfw.GLFW_KEY_F5: keys.F5,\n glfw.GLFW_KEY_F6: keys.F6,\n glfw.GLFW_KEY_F7: keys.F7,\n glfw.GLFW_KEY_F8: keys.F8,\n glfw.GLFW_KEY_F9: keys.F9,\n glfw.GLFW_KEY_F10: keys.F10,\n glfw.GLFW_KEY_F11: keys.F11,\n glfw.GLFW_KEY_F12: keys.F12,\n\n glfw.GLFW_KEY_SPACE: keys.SPACE,\n glfw.GLFW_KEY_ENTER: keys.ENTER,\n '\\r': keys.ENTER,\n glfw.GLFW_KEY_TAB: keys.TAB,\n }\n\n BUTTONMAP = {glfw.GLFW_MOUSE_BUTTON_LEFT: 1,\n glfw.GLFW_MOUSE_BUTTON_RIGHT: 2,\n glfw.GLFW_MOUSE_BUTTON_MIDDLE: 3\n }\nexcept Exception as exp:\n available, testable, why_not, which = False, False, str(exp), None\nelse:\n if USE_EGL:\n available, testable, why_not = False, False, 'EGL not supported'\n which = 'glfw ' + str(glfw.__version__)\n else:\n available, testable, why_not = True, True, None\n which = 'glfw ' + str(glfw.__version__)\n\nMOD_KEYS = [keys.SHIFT, keys.ALT, keys.CONTROL, keys.META]\n_GLFW_INITIALIZED = False\n_VP_GLFW_ALL_WINDOWS = []\n\n\ndef _get_glfw_windows():\n wins = list()\n for win in _VP_GLFW_ALL_WINDOWS:\n if isinstance(win, CanvasBackend):\n wins.append(win)\n return wins\n\n\n# -------------------------------------------------------------- capability ---\n\ncapability = dict( # things that can be set by the backend\n title=True,\n size=True,\n position=True,\n show=True,\n vsync=True,\n resizable=True,\n decorate=True,\n fullscreen=True,\n context=True,\n multi_window=True,\n scroll=True,\n parent=False,\n always_on_top=True,\n)\n\n\n# ------------------------------------------------------- set_configuration ---\n\ndef _set_config(c):\n \"\"\"Set gl configuration for GLFW.\"\"\"\n glfw.glfwWindowHint(glfw.GLFW_RED_BITS, c['red_size'])\n glfw.glfwWindowHint(glfw.GLFW_GREEN_BITS, c['green_size'])\n glfw.glfwWindowHint(glfw.GLFW_BLUE_BITS, c['blue_size'])\n glfw.glfwWindowHint(glfw.GLFW_ALPHA_BITS, c['alpha_size'])\n\n glfw.glfwWindowHint(glfw.GLFW_ACCUM_RED_BITS, 0)\n glfw.glfwWindowHint(glfw.GLFW_ACCUM_GREEN_BITS, 0)\n glfw.glfwWindowHint(glfw.GLFW_ACCUM_BLUE_BITS, 0)\n glfw.glfwWindowHint(glfw.GLFW_ACCUM_ALPHA_BITS, 0)\n\n glfw.glfwWindowHint(glfw.GLFW_DEPTH_BITS, c['depth_size'])\n glfw.glfwWindowHint(glfw.GLFW_STENCIL_BITS, c['stencil_size'])\n # glfw.glfwWindowHint(glfw.GLFW_CONTEXT_VERSION_MAJOR, c['major_version'])\n # glfw.glfwWindowHint(glfw.GLFW_CONTEXT_VERSION_MINOR, c['minor_version'])\n # glfw.glfwWindowHint(glfw.GLFW_SRGB_CAPABLE, c['srgb'])\n glfw.glfwWindowHint(glfw.GLFW_SAMPLES, c['samples'])\n glfw.glfwWindowHint(glfw.GLFW_STEREO, c['stereo'])\n if not c['double_buffer']:\n raise RuntimeError('GLFW must double buffer, consider using a '\n 'different backend, or using double buffering')\n\n\n# ------------------------------------------------------------- application ---\n\n\n_glfw_errors = []\n\n\ndef _error_callback(num, descr):\n _glfw_errors.append('Error %s: %s' % (num, descr))\n\n\nclass ApplicationBackend(BaseApplicationBackend):\n\n def __init__(self):\n BaseApplicationBackend.__init__(self)\n self._timers = list()\n\n def _add_timer(self, timer):\n if timer not in self._timers:\n self._timers.append(timer)\n\n def _vispy_get_backend_name(self):\n return 'Glfw'\n\n def _vispy_process_events(self):\n glfw.glfwPollEvents()\n for timer in self._timers:\n timer._tick()\n wins = _get_glfw_windows()\n for win in wins:\n if win._needs_draw:\n win._needs_draw = False\n win._on_draw()\n\n def _vispy_run(self):\n wins = _get_glfw_windows()\n while any(w._id is not None and not glfw.glfwWindowShouldClose(w._id)\n for w in wins):\n self._vispy_process_events()\n self._vispy_quit() # to clean up\n\n def _vispy_quit(self):\n # Close windows\n wins = _get_glfw_windows()\n for win in wins:\n if win._vispy_canvas is not None:\n win._vispy_canvas.close()\n # tear down timers\n for timer in self._timers:\n timer._vispy_stop()\n self._timers = []\n\n def _vispy_get_native_app(self):\n global _GLFW_INITIALIZED\n if not _GLFW_INITIALIZED:\n cwd = os.getcwd()\n glfw.glfwSetErrorCallback(_error_callback)\n try:\n if not glfw.glfwInit(): # only ever call once\n raise OSError('Could not init glfw:\\n%r' % _glfw_errors)\n finally:\n os.chdir(cwd)\n glfw.glfwSetErrorCallback(0)\n atexit.register(glfw.glfwTerminate)\n _GLFW_INITIALIZED = True\n return glfw\n\n\n# ------------------------------------------------------------------ canvas ---\n\nclass CanvasBackend(BaseCanvasBackend):\n \"\"\"Glfw backend for Canvas abstract class.\"\"\"\n\n # args are for BaseCanvasBackend, kwargs are for us.\n def __init__(self, *args, **kwargs):\n BaseCanvasBackend.__init__(self, *args)\n p = self._process_backend_kwargs(kwargs)\n self._initialized = False\n\n # Deal with config\n _set_config(p.context.config)\n # Deal with context\n p.context.shared.add_ref('glfw', self)\n if p.context.shared.ref is self:\n share = None\n else:\n share = p.context.shared.ref._id\n\n glfw.glfwWindowHint(glfw.GLFW_REFRESH_RATE, 0) # highest possible\n glfw.glfwSwapInterval(1 if p.vsync else 0)\n glfw.glfwWindowHint(glfw.GLFW_RESIZABLE, int(p.resizable))\n glfw.glfwWindowHint(glfw.GLFW_DECORATED, int(p.decorate))\n glfw.glfwWindowHint(glfw.GLFW_VISIBLE, 0) # start out hidden\n glfw.glfwWindowHint(glfw.GLFW_FLOATING, int(p.always_on_top))\n if p.fullscreen is not False:\n self._fullscreen = True\n if p.fullscreen is True:\n monitor = glfw.glfwGetPrimaryMonitor()\n else:\n monitor = glfw.glfwGetMonitors()\n if p.fullscreen >= len(monitor):\n raise ValueError('fullscreen must be <= %s'\n % len(monitor))\n monitor = monitor[p.fullscreen]\n use_size = glfw.glfwGetVideoMode(monitor)[:2]\n if use_size != tuple(p.size):\n logger.debug('Requested size %s, will be ignored to '\n 'use fullscreen mode %s' % (p.size, use_size))\n size = use_size\n else:\n self._fullscreen = False\n monitor = None\n size = p.size\n\n self._id = glfw.glfwCreateWindow(width=size[0], height=size[1],\n title=p.title, monitor=monitor,\n share=share)\n if not self._id:\n raise RuntimeError('Could not create window')\n\n _VP_GLFW_ALL_WINDOWS.append(self)\n self._mod = list()\n\n # Register callbacks\n glfw.glfwSetWindowRefreshCallback(self._id, self._on_draw)\n glfw.glfwSetWindowSizeCallback(self._id, self._on_resize)\n glfw.glfwSetKeyCallback(self._id, self._on_key_press)\n glfw.glfwSetCharCallback(self._id, self._on_key_char)\n glfw.glfwSetMouseButtonCallback(self._id, self._on_mouse_button)\n glfw.glfwSetScrollCallback(self._id, self._on_mouse_scroll)\n glfw.glfwSetCursorPosCallback(self._id, self._on_mouse_motion)\n glfw.glfwSetWindowCloseCallback(self._id, self._on_close)\n self._vispy_canvas_ = None\n self._needs_draw = False\n self._vispy_canvas.set_current()\n if p.position is not None:\n self._vispy_set_position(*p.position)\n if p.show:\n glfw.glfwShowWindow(self._id)\n\n # Init\n self._initialized = True\n self._next_key_events = []\n self._next_key_text = {}\n self._vispy_canvas.set_current()\n self._vispy_canvas.events.initialize()\n\n def _vispy_warmup(self):\n etime = time() + 0.25\n while time() < etime:\n sleep(0.01)\n self._vispy_canvas.set_current()\n self._vispy_canvas.app.process_events()\n\n def _vispy_set_current(self):\n if self._id is None:\n return\n # Make this the current context\n glfw.glfwMakeContextCurrent(self._id)\n\n def _vispy_swap_buffers(self):\n if self._id is None:\n return\n # Swap front and back buffer\n glfw.glfwSwapBuffers(self._id)\n\n def _vispy_set_title(self, title):\n if self._id is None:\n return\n # Set the window title. Has no effect for widgets\n glfw.glfwSetWindowTitle(self._id, title.encode('utf-8'))\n\n def _vispy_set_size(self, w, h):\n if self._id is None:\n return\n # Set size of the widget or window\n glfw.glfwSetWindowSize(self._id, w, h)\n\n def _vispy_set_position(self, x, y):\n if self._id is None:\n return\n # Set position of the widget or window. May have no effect for widgets\n glfw.glfwSetWindowPos(self._id, x, y)\n\n def _vispy_set_visible(self, visible):\n # Show or hide the window or widget\n if self._id is None:\n return\n if visible:\n glfw.glfwShowWindow(self._id)\n # this ensures that the show takes effect\n self._vispy_update()\n else:\n glfw.glfwHideWindow(self._id)\n\n def _vispy_set_fullscreen(self, fullscreen):\n logger.warn('Cannot change fullscreen mode for GLFW backend')\n\n def _vispy_update(self):\n # Invoke a redraw, passing it on to the canvas\n if self._vispy_canvas is None or self._id is None:\n return\n # Mark that this window wants to be drawn on the next loop iter\n self._needs_draw = True\n\n def _vispy_close(self):\n # Force the window or widget to shut down\n if self._id is not None:\n self._vispy_canvas = None\n # glfw.glfwSetWindowShouldClose() # Does not really cause a close\n self._vispy_set_visible(False)\n self._id, id_ = None, self._id\n glfw.glfwDestroyWindow(id_)\n gc.collect() # help ensure context gets destroyed\n\n def _vispy_get_size(self):\n if self._id is None:\n return\n w, h = glfw.glfwGetWindowSize(self._id)\n return w, h\n\n def _vispy_get_physical_size(self):\n if self._id is None:\n return\n w, h = glfw.glfwGetFramebufferSize(self._id)\n return w, h\n\n def _vispy_get_position(self):\n if self._id is None:\n return\n x, y = glfw.glfwGetWindowPos(self._id)\n return x, y\n\n def _vispy_get_fullscreen(self):\n return self._fullscreen\n\n ##########################################\n # Notify vispy of events triggered by GLFW\n def _on_resize(self, _id, w, h):\n if self._vispy_canvas is None:\n return\n self._vispy_canvas.events.resize(\n size=(w, h), physical_size=self._vispy_get_physical_size())\n\n def _on_close(self, _id):\n if self._vispy_canvas is None:\n return\n self._vispy_canvas.close()\n\n def _on_draw(self, _id=None):\n if self._vispy_canvas is None or self._id is None:\n return\n self._vispy_canvas.set_current()\n self._vispy_canvas.events.draw(region=None) # (0, 0, w, h))\n\n def _on_mouse_button(self, _id, button, action, mod):\n if self._vispy_canvas is None and self._id is not None:\n return\n pos = glfw.glfwGetCursorPos(self._id)\n if button < 3:\n # Mouse click event\n button = BUTTONMAP.get(button, 0)\n if action == glfw.GLFW_PRESS:\n fun = self._vispy_mouse_press\n elif action == glfw.GLFW_RELEASE:\n fun = self._vispy_mouse_release\n else:\n return\n fun(pos=pos, button=button, modifiers=self._mod)\n\n def _on_mouse_scroll(self, _id, x_off, y_off):\n if self._vispy_canvas is None and self._id is not None:\n return\n pos = glfw.glfwGetCursorPos(self._id)\n delta = (float(x_off), float(y_off))\n self._vispy_canvas.events.mouse_wheel(pos=pos, delta=delta,\n modifiers=self._mod)\n\n def _on_mouse_motion(self, _id, x, y):\n if self._vispy_canvas is None:\n return\n self._vispy_mouse_move(pos=(x, y), modifiers=self._mod)\n\n def _on_key_press(self, _id, key, scancode, action, mod):\n if self._vispy_canvas is None:\n return\n key, text = self._process_key(key)\n if action == glfw.GLFW_PRESS:\n fun = self._vispy_canvas.events.key_press\n down = True\n elif action == glfw.GLFW_RELEASE:\n fun = self._vispy_canvas.events.key_release\n down = False\n else:\n return\n self._process_mod(key, down=down)\n\n # NOTE: GLFW only provides localized characters via _on_key_char, so if\n # this event contains a character we store all other data and dispatch\n # it once the final unicode character is sent shortly after.\n if text != '' and action == glfw.GLFW_PRESS:\n self._next_key_events.append((fun, key, self._mod))\n else:\n if key in self._next_key_text:\n text = self._next_key_text[key]\n del self._next_key_text[key]\n fun(key=key, text=text, modifiers=self._mod)\n\n def _on_key_char(self, _id, text):\n # Repeat strokes (frequency configured at OS) are sent here only,\n # no regular _on_key_press events. Currently ignored!\n if len(self._next_key_events) == 0:\n return\n\n (fun, key, mod) = self._next_key_events.pop(0)\n fun(key=key, text=chr(text), modifiers=mod)\n self._next_key_text[key] = text\n\n def _process_key(self, key):\n if 32 <= key <= 127:\n return keys.Key(chr(key)), chr(key)\n elif key in KEYMAP:\n return KEYMAP[key], ''\n else:\n return None, ''\n\n def _process_mod(self, key, down):\n \"\"\"Process (possible) keyboard modifiers\n\n GLFW provides \"mod\" with many callbacks, but not (critically) the\n scroll callback, so we keep track on our own here.\n \"\"\"\n if key in MOD_KEYS:\n if down:\n if key not in self._mod:\n self._mod.append(key)\n elif key in self._mod:\n self._mod.pop(self._mod.index(key))\n return self._mod\n\n\n# ------------------------------------------------------------------- timer ---\n\nclass TimerBackend(BaseTimerBackend):\n\n def __init__(self, vispy_timer):\n BaseTimerBackend.__init__(self, vispy_timer)\n vispy_timer._app._backend._add_timer(self)\n self._vispy_stop()\n\n def _vispy_start(self, interval):\n self._interval = interval\n self._next_time = time() + self._interval\n\n def _vispy_stop(self):\n self._next_time = float('inf')\n\n def _tick(self):\n if time() >= self._next_time:\n self._vispy_timer._timeout()\n self._next_time = time() + self._interval\n", "path": "vispy/app/backends/_glfw.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\"\"\"vispy backend for glfw.\"\"\"\n\n# To install GLFW on Ubuntu, use sudo apt-get install libglfw3.\n# On OSX, consider using brew.\n\nfrom __future__ import division\n\nimport atexit\nfrom time import sleep\nimport gc\nimport os\n\nfrom ..base import (BaseApplicationBackend, BaseCanvasBackend,\n BaseTimerBackend)\nfrom ...util import keys, logger\nfrom ...util.ptime import time\nfrom ... import config\n\nUSE_EGL = config['gl_backend'].lower().startswith('es')\n\n\n# -------------------------------------------------------------------- init ---\n\ntry:\n from ...ext import glfw\n\n # Map native keys to vispy keys\n KEYMAP = {\n glfw.GLFW_KEY_LEFT_SHIFT: keys.SHIFT,\n glfw.GLFW_KEY_RIGHT_SHIFT: keys.SHIFT,\n glfw.GLFW_KEY_LEFT_CONTROL: keys.CONTROL,\n glfw.GLFW_KEY_RIGHT_CONTROL: keys.CONTROL,\n glfw.GLFW_KEY_LEFT_ALT: keys.ALT,\n glfw.GLFW_KEY_RIGHT_ALT: keys.ALT,\n glfw.GLFW_KEY_LEFT_SUPER: keys.META,\n glfw.GLFW_KEY_RIGHT_SUPER: keys.META,\n\n glfw.GLFW_KEY_LEFT: keys.LEFT,\n glfw.GLFW_KEY_UP: keys.UP,\n glfw.GLFW_KEY_RIGHT: keys.RIGHT,\n glfw.GLFW_KEY_DOWN: keys.DOWN,\n glfw.GLFW_KEY_PAGE_UP: keys.PAGEUP,\n glfw.GLFW_KEY_PAGE_DOWN: keys.PAGEDOWN,\n\n glfw.GLFW_KEY_INSERT: keys.INSERT,\n glfw.GLFW_KEY_DELETE: keys.DELETE,\n glfw.GLFW_KEY_HOME: keys.HOME,\n glfw.GLFW_KEY_END: keys.END,\n\n glfw.GLFW_KEY_ESCAPE: keys.ESCAPE,\n glfw.GLFW_KEY_BACKSPACE: keys.BACKSPACE,\n\n glfw.GLFW_KEY_F1: keys.F1,\n glfw.GLFW_KEY_F2: keys.F2,\n glfw.GLFW_KEY_F3: keys.F3,\n glfw.GLFW_KEY_F4: keys.F4,\n glfw.GLFW_KEY_F5: keys.F5,\n glfw.GLFW_KEY_F6: keys.F6,\n glfw.GLFW_KEY_F7: keys.F7,\n glfw.GLFW_KEY_F8: keys.F8,\n glfw.GLFW_KEY_F9: keys.F9,\n glfw.GLFW_KEY_F10: keys.F10,\n glfw.GLFW_KEY_F11: keys.F11,\n glfw.GLFW_KEY_F12: keys.F12,\n\n glfw.GLFW_KEY_SPACE: keys.SPACE,\n glfw.GLFW_KEY_ENTER: keys.ENTER,\n '\\r': keys.ENTER,\n glfw.GLFW_KEY_TAB: keys.TAB,\n }\n\n BUTTONMAP = {glfw.GLFW_MOUSE_BUTTON_LEFT: 1,\n glfw.GLFW_MOUSE_BUTTON_RIGHT: 2,\n glfw.GLFW_MOUSE_BUTTON_MIDDLE: 3\n }\nexcept Exception as exp:\n available, testable, why_not, which = False, False, str(exp), None\nelse:\n if USE_EGL:\n available, testable, why_not = False, False, 'EGL not supported'\n which = 'glfw ' + str(glfw.__version__)\n else:\n available, testable, why_not = True, True, None\n which = 'glfw ' + str(glfw.__version__)\n\nMOD_KEYS = [keys.SHIFT, keys.ALT, keys.CONTROL, keys.META]\n_GLFW_INITIALIZED = False\n_VP_GLFW_ALL_WINDOWS = []\n\n\ndef _get_glfw_windows():\n wins = list()\n for win in _VP_GLFW_ALL_WINDOWS:\n if isinstance(win, CanvasBackend):\n wins.append(win)\n return wins\n\n\n# -------------------------------------------------------------- capability ---\n\ncapability = dict( # things that can be set by the backend\n title=True,\n size=True,\n position=True,\n show=True,\n vsync=True,\n resizable=True,\n decorate=True,\n fullscreen=True,\n context=True,\n multi_window=True,\n scroll=True,\n parent=False,\n always_on_top=True,\n)\n\n\n# ------------------------------------------------------- set_configuration ---\n\ndef _set_config(c):\n \"\"\"Set gl configuration for GLFW.\"\"\"\n glfw.glfwWindowHint(glfw.GLFW_RED_BITS, c['red_size'])\n glfw.glfwWindowHint(glfw.GLFW_GREEN_BITS, c['green_size'])\n glfw.glfwWindowHint(glfw.GLFW_BLUE_BITS, c['blue_size'])\n glfw.glfwWindowHint(glfw.GLFW_ALPHA_BITS, c['alpha_size'])\n\n glfw.glfwWindowHint(glfw.GLFW_ACCUM_RED_BITS, 0)\n glfw.glfwWindowHint(glfw.GLFW_ACCUM_GREEN_BITS, 0)\n glfw.glfwWindowHint(glfw.GLFW_ACCUM_BLUE_BITS, 0)\n glfw.glfwWindowHint(glfw.GLFW_ACCUM_ALPHA_BITS, 0)\n\n glfw.glfwWindowHint(glfw.GLFW_DEPTH_BITS, c['depth_size'])\n glfw.glfwWindowHint(glfw.GLFW_STENCIL_BITS, c['stencil_size'])\n # glfw.glfwWindowHint(glfw.GLFW_CONTEXT_VERSION_MAJOR, c['major_version'])\n # glfw.glfwWindowHint(glfw.GLFW_CONTEXT_VERSION_MINOR, c['minor_version'])\n # glfw.glfwWindowHint(glfw.GLFW_SRGB_CAPABLE, c['srgb'])\n glfw.glfwWindowHint(glfw.GLFW_SAMPLES, c['samples'])\n glfw.glfwWindowHint(glfw.GLFW_STEREO, c['stereo'])\n if not c['double_buffer']:\n raise RuntimeError('GLFW must double buffer, consider using a '\n 'different backend, or using double buffering')\n\n\n# ------------------------------------------------------------- application ---\n\n\n_glfw_errors = []\n\n\ndef _error_callback(num, descr):\n _glfw_errors.append('Error %s: %s' % (num, descr))\n\n\nclass ApplicationBackend(BaseApplicationBackend):\n\n def __init__(self):\n BaseApplicationBackend.__init__(self)\n self._timers = list()\n\n def _add_timer(self, timer):\n if timer not in self._timers:\n self._timers.append(timer)\n\n def _vispy_get_backend_name(self):\n return 'Glfw'\n\n def _vispy_process_events(self):\n glfw.glfwPollEvents()\n for timer in self._timers:\n timer._tick()\n wins = _get_glfw_windows()\n for win in wins:\n if win._needs_draw:\n win._needs_draw = False\n win._on_draw()\n\n def _vispy_run(self):\n wins = _get_glfw_windows()\n while any(w._id is not None and not glfw.glfwWindowShouldClose(w._id)\n for w in wins):\n self._vispy_process_events()\n self._vispy_quit() # to clean up\n\n def _vispy_quit(self):\n # Close windows\n wins = _get_glfw_windows()\n for win in wins:\n if win._vispy_canvas is not None:\n win._vispy_canvas.close()\n # tear down timers\n for timer in self._timers:\n timer._vispy_stop()\n self._timers = []\n\n def _vispy_get_native_app(self):\n global _GLFW_INITIALIZED\n if not _GLFW_INITIALIZED:\n cwd = os.getcwd()\n glfw.glfwSetErrorCallback(_error_callback)\n try:\n if not glfw.glfwInit(): # only ever call once\n raise OSError('Could not init glfw:\\n%r' % _glfw_errors)\n finally:\n os.chdir(cwd)\n glfw.glfwSetErrorCallback(0)\n atexit.register(glfw.glfwTerminate)\n _GLFW_INITIALIZED = True\n return glfw\n\n\n# ------------------------------------------------------------------ canvas ---\n\nclass CanvasBackend(BaseCanvasBackend):\n \"\"\"Glfw backend for Canvas abstract class.\"\"\"\n\n # args are for BaseCanvasBackend, kwargs are for us.\n def __init__(self, *args, **kwargs):\n BaseCanvasBackend.__init__(self, *args)\n p = self._process_backend_kwargs(kwargs)\n self._initialized = False\n\n # Deal with config\n _set_config(p.context.config)\n # Deal with context\n p.context.shared.add_ref('glfw', self)\n if p.context.shared.ref is self:\n share = None\n else:\n share = p.context.shared.ref._id\n\n glfw.glfwWindowHint(glfw.GLFW_REFRESH_RATE, 0) # highest possible\n glfw.glfwSwapInterval(1 if p.vsync else 0)\n glfw.glfwWindowHint(glfw.GLFW_RESIZABLE, int(p.resizable))\n glfw.glfwWindowHint(glfw.GLFW_DECORATED, int(p.decorate))\n glfw.glfwWindowHint(glfw.GLFW_VISIBLE, 0) # start out hidden\n glfw.glfwWindowHint(glfw.GLFW_FLOATING, int(p.always_on_top))\n if p.fullscreen is not False:\n self._fullscreen = True\n if p.fullscreen is True:\n monitor = glfw.glfwGetPrimaryMonitor()\n else:\n monitor = glfw.glfwGetMonitors()\n if p.fullscreen >= len(monitor):\n raise ValueError('fullscreen must be <= %s'\n % len(monitor))\n monitor = monitor[p.fullscreen]\n use_size = glfw.glfwGetVideoMode(monitor)[:2]\n if use_size != tuple(p.size):\n logger.debug('Requested size %s, will be ignored to '\n 'use fullscreen mode %s' % (p.size, use_size))\n size = use_size\n else:\n self._fullscreen = False\n monitor = None\n size = p.size\n\n self._id = glfw.glfwCreateWindow(width=size[0], height=size[1],\n title=p.title, monitor=monitor,\n share=share)\n if not self._id:\n raise RuntimeError('Could not create window')\n\n _VP_GLFW_ALL_WINDOWS.append(self)\n self._mod = list()\n\n # Register callbacks\n glfw.glfwSetWindowRefreshCallback(self._id, self._on_draw)\n glfw.glfwSetWindowSizeCallback(self._id, self._on_resize)\n glfw.glfwSetKeyCallback(self._id, self._on_key_press)\n glfw.glfwSetCharCallback(self._id, self._on_key_char)\n glfw.glfwSetMouseButtonCallback(self._id, self._on_mouse_button)\n glfw.glfwSetScrollCallback(self._id, self._on_mouse_scroll)\n glfw.glfwSetCursorPosCallback(self._id, self._on_mouse_motion)\n glfw.glfwSetWindowCloseCallback(self._id, self._on_close)\n self._vispy_canvas_ = None\n self._needs_draw = False\n self._vispy_canvas.set_current()\n if p.position is not None:\n self._vispy_set_position(*p.position)\n if p.show:\n glfw.glfwShowWindow(self._id)\n\n # Init\n self._initialized = True\n self._next_key_events = []\n self._next_key_text = {}\n self._vispy_canvas.set_current()\n self._vispy_canvas.events.initialize()\n self._on_resize(self._id, size[0], size[1])\n\n def _vispy_warmup(self):\n etime = time() + 0.25\n while time() < etime:\n sleep(0.01)\n self._vispy_canvas.set_current()\n self._vispy_canvas.app.process_events()\n\n def _vispy_set_current(self):\n if self._id is None:\n return\n # Make this the current context\n glfw.glfwMakeContextCurrent(self._id)\n\n def _vispy_swap_buffers(self):\n if self._id is None:\n return\n # Swap front and back buffer\n glfw.glfwSwapBuffers(self._id)\n\n def _vispy_set_title(self, title):\n if self._id is None:\n return\n # Set the window title. Has no effect for widgets\n glfw.glfwSetWindowTitle(self._id, title.encode('utf-8'))\n\n def _vispy_set_size(self, w, h):\n if self._id is None:\n return\n # Set size of the widget or window\n glfw.glfwSetWindowSize(self._id, w, h)\n\n def _vispy_set_position(self, x, y):\n if self._id is None:\n return\n # Set position of the widget or window. May have no effect for widgets\n glfw.glfwSetWindowPos(self._id, x, y)\n\n def _vispy_set_visible(self, visible):\n # Show or hide the window or widget\n if self._id is None:\n return\n if visible:\n glfw.glfwShowWindow(self._id)\n # this ensures that the show takes effect\n self._vispy_update()\n else:\n glfw.glfwHideWindow(self._id)\n\n def _vispy_set_fullscreen(self, fullscreen):\n logger.warn('Cannot change fullscreen mode for GLFW backend')\n\n def _vispy_update(self):\n # Invoke a redraw, passing it on to the canvas\n if self._vispy_canvas is None or self._id is None:\n return\n # Mark that this window wants to be drawn on the next loop iter\n self._needs_draw = True\n\n def _vispy_close(self):\n # Force the window or widget to shut down\n if self._id is not None:\n self._vispy_canvas = None\n # glfw.glfwSetWindowShouldClose() # Does not really cause a close\n self._vispy_set_visible(False)\n self._id, id_ = None, self._id\n glfw.glfwDestroyWindow(id_)\n gc.collect() # help ensure context gets destroyed\n\n def _vispy_get_size(self):\n if self._id is None:\n return\n w, h = glfw.glfwGetWindowSize(self._id)\n return w, h\n\n def _vispy_get_physical_size(self):\n if self._id is None:\n return\n w, h = glfw.glfwGetFramebufferSize(self._id)\n return w, h\n\n def _vispy_get_position(self):\n if self._id is None:\n return\n x, y = glfw.glfwGetWindowPos(self._id)\n return x, y\n\n def _vispy_get_fullscreen(self):\n return self._fullscreen\n\n ##########################################\n # Notify vispy of events triggered by GLFW\n def _on_resize(self, _id, w, h):\n if self._vispy_canvas is None:\n return\n self._vispy_canvas.events.resize(\n size=(w, h), physical_size=self._vispy_get_physical_size())\n\n def _on_close(self, _id):\n if self._vispy_canvas is None:\n return\n self._vispy_canvas.close()\n\n def _on_draw(self, _id=None):\n if self._vispy_canvas is None or self._id is None:\n return\n self._vispy_canvas.set_current()\n self._vispy_canvas.events.draw(region=None) # (0, 0, w, h))\n\n def _on_mouse_button(self, _id, button, action, mod):\n if self._vispy_canvas is None and self._id is not None:\n return\n pos = glfw.glfwGetCursorPos(self._id)\n if button < 3:\n # Mouse click event\n button = BUTTONMAP.get(button, 0)\n if action == glfw.GLFW_PRESS:\n fun = self._vispy_mouse_press\n elif action == glfw.GLFW_RELEASE:\n fun = self._vispy_mouse_release\n else:\n return\n fun(pos=pos, button=button, modifiers=self._mod)\n\n def _on_mouse_scroll(self, _id, x_off, y_off):\n if self._vispy_canvas is None and self._id is not None:\n return\n pos = glfw.glfwGetCursorPos(self._id)\n delta = (float(x_off), float(y_off))\n self._vispy_canvas.events.mouse_wheel(pos=pos, delta=delta,\n modifiers=self._mod)\n\n def _on_mouse_motion(self, _id, x, y):\n if self._vispy_canvas is None:\n return\n self._vispy_mouse_move(pos=(x, y), modifiers=self._mod)\n\n def _on_key_press(self, _id, key, scancode, action, mod):\n if self._vispy_canvas is None:\n return\n key, text = self._process_key(key)\n if action == glfw.GLFW_PRESS:\n fun = self._vispy_canvas.events.key_press\n down = True\n elif action == glfw.GLFW_RELEASE:\n fun = self._vispy_canvas.events.key_release\n down = False\n else:\n return\n self._process_mod(key, down=down)\n\n # NOTE: GLFW only provides localized characters via _on_key_char, so if\n # this event contains a character we store all other data and dispatch\n # it once the final unicode character is sent shortly after.\n if text != '' and action == glfw.GLFW_PRESS:\n self._next_key_events.append((fun, key, self._mod))\n else:\n if key in self._next_key_text:\n text = self._next_key_text[key]\n del self._next_key_text[key]\n fun(key=key, text=text, modifiers=self._mod)\n\n def _on_key_char(self, _id, text):\n # Repeat strokes (frequency configured at OS) are sent here only,\n # no regular _on_key_press events. Currently ignored!\n if len(self._next_key_events) == 0:\n return\n\n (fun, key, mod) = self._next_key_events.pop(0)\n fun(key=key, text=chr(text), modifiers=mod)\n self._next_key_text[key] = text\n\n def _process_key(self, key):\n if 32 <= key <= 127:\n return keys.Key(chr(key)), chr(key)\n elif key in KEYMAP:\n return KEYMAP[key], ''\n else:\n return None, ''\n\n def _process_mod(self, key, down):\n \"\"\"Process (possible) keyboard modifiers\n\n GLFW provides \"mod\" with many callbacks, but not (critically) the\n scroll callback, so we keep track on our own here.\n \"\"\"\n if key in MOD_KEYS:\n if down:\n if key not in self._mod:\n self._mod.append(key)\n elif key in self._mod:\n self._mod.pop(self._mod.index(key))\n return self._mod\n\n\n# ------------------------------------------------------------------- timer ---\n\nclass TimerBackend(BaseTimerBackend):\n\n def __init__(self, vispy_timer):\n BaseTimerBackend.__init__(self, vispy_timer)\n vispy_timer._app._backend._add_timer(self)\n self._vispy_stop()\n\n def _vispy_start(self, interval):\n self._interval = interval\n self._next_time = time() + self._interval\n\n def _vispy_stop(self):\n self._next_time = float('inf')\n\n def _tick(self):\n if time() >= self._next_time:\n self._vispy_timer._timeout()\n self._next_time = time() + self._interval\n", "path": "vispy/app/backends/_glfw.py" } ]
diff --git a/vispy/app/backends/_glfw.py b/vispy/app/backends/_glfw.py index f662fbabd1..6c59d96dc9 100644 --- a/vispy/app/backends/_glfw.py +++ b/vispy/app/backends/_glfw.py @@ -289,6 +289,7 @@ def __init__(self, *args, **kwargs): self._next_key_text = {} self._vispy_canvas.set_current() self._vispy_canvas.events.initialize() + self._on_resize(self._id, size[0], size[1]) def _vispy_warmup(self): etime = time() + 0.25
bookwyrm-social__bookwyrm-1864
Invalid table limit error **Describe the bug** When running a fresh dev instance I get an `Invalid table limit` error, coming from `initdb.py`. Not sure if something is broken in the latest main branch, or I need to update my configuration. **To Reproduce** Steps to reproduce the behavior: 1. fetch latest `main` branch 2. `./bw-dev resetdb` 3. Get error (see below) **Expected behavior** BookWyrm resets database and new install works without errors. **Screenshots** ``` Applying sessions.0001_initial... OK + execweb python manage.py initdb + docker-compose exec web python manage.py initdb Traceback (most recent call last): File "/app/manage.py", line 18, in <module> execute_from_command_line(sys.argv) File "/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py", line 419, in execute_from_command_line utility.execute() File "/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py", line 413, in execute self.fetch_command(subcommand).run_from_argv(self.argv) File "/usr/local/lib/python3.9/site-packages/django/core/management/base.py", line 354, in run_from_argv self.execute(*args, **cmd_options) File "/usr/local/lib/python3.9/site-packages/django/core/management/base.py", line 398, in execute output = self.handle(*args, **options) File "/app/bookwyrm/management/commands/initdb.py", line 168, in handle raise Exception("Invalid table limit:", limit) Exception: ('Invalid table limit:', None) ``` **Instance** local development, current `main` branch. **Additional context** I initially started getting this error on a branch I was working on, but it's occuring on the latest `main` branch without any changes. --- **Desktop (please complete the following information):** - OS: MacOS
[ { "content": "\"\"\" What you need in the database to make it work \"\"\"\nfrom django.core.management.base import BaseCommand\nfrom django.contrib.auth.models import Group, Permission\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom bookwyrm import models\n\n\ndef init_groups():\n \"\"\"permission levels\"\"\"\n groups = [\"admin\", \"moderator\", \"editor\"]\n for group in groups:\n Group.objects.create(name=group)\n\n\ndef init_permissions():\n \"\"\"permission types\"\"\"\n permissions = [\n {\n \"codename\": \"edit_instance_settings\",\n \"name\": \"change the instance info\",\n \"groups\": [\n \"admin\",\n ],\n },\n {\n \"codename\": \"set_user_group\",\n \"name\": \"change what group a user is in\",\n \"groups\": [\"admin\", \"moderator\"],\n },\n {\n \"codename\": \"control_federation\",\n \"name\": \"control who to federate with\",\n \"groups\": [\"admin\", \"moderator\"],\n },\n {\n \"codename\": \"create_invites\",\n \"name\": \"issue invitations to join\",\n \"groups\": [\"admin\", \"moderator\"],\n },\n {\n \"codename\": \"moderate_user\",\n \"name\": \"deactivate or silence a user\",\n \"groups\": [\"admin\", \"moderator\"],\n },\n {\n \"codename\": \"moderate_post\",\n \"name\": \"delete other users' posts\",\n \"groups\": [\"admin\", \"moderator\"],\n },\n {\n \"codename\": \"edit_book\",\n \"name\": \"edit book info\",\n \"groups\": [\"admin\", \"moderator\", \"editor\"],\n },\n ]\n\n content_type = models.ContentType.objects.get_for_model(User)\n for permission in permissions:\n permission_obj = Permission.objects.create(\n codename=permission[\"codename\"],\n name=permission[\"name\"],\n content_type=content_type,\n )\n # add the permission to the appropriate groups\n for group_name in permission[\"groups\"]:\n Group.objects.get(name=group_name).permissions.add(permission_obj)\n\n # while the groups and permissions shouldn't be changed because the code\n # depends on them, what permissions go with what groups should be editable\n\n\ndef init_connectors():\n \"\"\"access book data sources\"\"\"\n models.Connector.objects.create(\n identifier=\"bookwyrm.social\",\n name=\"BookWyrm dot Social\",\n connector_file=\"bookwyrm_connector\",\n base_url=\"https://bookwyrm.social\",\n books_url=\"https://bookwyrm.social/book\",\n covers_url=\"https://bookwyrm.social/images/\",\n search_url=\"https://bookwyrm.social/search?q=\",\n isbn_search_url=\"https://bookwyrm.social/isbn/\",\n priority=2,\n )\n\n models.Connector.objects.create(\n identifier=\"inventaire.io\",\n name=\"Inventaire\",\n connector_file=\"inventaire\",\n base_url=\"https://inventaire.io\",\n books_url=\"https://inventaire.io/api/entities\",\n covers_url=\"https://inventaire.io\",\n search_url=\"https://inventaire.io/api/search?types=works&types=works&search=\",\n isbn_search_url=\"https://inventaire.io/api/entities?action=by-uris&uris=isbn%3A\",\n priority=3,\n )\n\n models.Connector.objects.create(\n identifier=\"openlibrary.org\",\n name=\"OpenLibrary\",\n connector_file=\"openlibrary\",\n base_url=\"https://openlibrary.org\",\n books_url=\"https://openlibrary.org\",\n covers_url=\"https://covers.openlibrary.org\",\n search_url=\"https://openlibrary.org/search?q=\",\n isbn_search_url=\"https://openlibrary.org/api/books?jscmd=data&format=json&bibkeys=ISBN:\",\n priority=3,\n )\n\n\ndef init_federated_servers():\n \"\"\"big no to nazis\"\"\"\n built_in_blocks = [\"gab.ai\", \"gab.com\"]\n for server in built_in_blocks:\n models.FederatedServer.objects.create(\n server_name=server,\n status=\"blocked\",\n )\n\n\ndef init_settings():\n \"\"\"info about the instance\"\"\"\n models.SiteSettings.objects.create(\n support_link=\"https://www.patreon.com/bookwyrm\",\n support_title=\"Patreon\",\n )\n\n\ndef init_link_domains(*_):\n \"\"\"safe book links\"\"\"\n domains = [\n (\"standardebooks.org\", \"Standard EBooks\"),\n (\"www.gutenberg.org\", \"Project Gutenberg\"),\n (\"archive.org\", \"Internet Archive\"),\n (\"openlibrary.org\", \"Open Library\"),\n (\"theanarchistlibrary.org\", \"The Anarchist Library\"),\n ]\n for domain, name in domains:\n models.LinkDomain.objects.create(\n domain=domain,\n name=name,\n status=\"approved\",\n )\n\n\nclass Command(BaseCommand):\n help = \"Initializes the database with starter data\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--limit\",\n default=None,\n help=\"Limit init to specific table\",\n )\n\n def handle(self, *args, **options):\n limit = options.get(\"limit\")\n tables = [\n \"group\",\n \"permission\",\n \"connector\",\n \"federatedserver\",\n \"settings\",\n \"linkdomain\",\n ]\n if limit not in tables:\n raise Exception(\"Invalid table limit:\", limit)\n\n if not limit or limit == \"group\":\n init_groups()\n if not limit or limit == \"permission\":\n init_permissions()\n if not limit or limit == \"connector\":\n init_connectors()\n if not limit or limit == \"federatedserver\":\n init_federated_servers()\n if not limit or limit == \"settings\":\n init_settings()\n if not limit or limit == \"linkdomain\":\n init_link_domains()\n", "path": "bookwyrm/management/commands/initdb.py" } ]
[ { "content": "\"\"\" What you need in the database to make it work \"\"\"\nfrom django.core.management.base import BaseCommand\nfrom django.contrib.auth.models import Group, Permission\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom bookwyrm import models\n\n\ndef init_groups():\n \"\"\"permission levels\"\"\"\n groups = [\"admin\", \"moderator\", \"editor\"]\n for group in groups:\n Group.objects.create(name=group)\n\n\ndef init_permissions():\n \"\"\"permission types\"\"\"\n permissions = [\n {\n \"codename\": \"edit_instance_settings\",\n \"name\": \"change the instance info\",\n \"groups\": [\n \"admin\",\n ],\n },\n {\n \"codename\": \"set_user_group\",\n \"name\": \"change what group a user is in\",\n \"groups\": [\"admin\", \"moderator\"],\n },\n {\n \"codename\": \"control_federation\",\n \"name\": \"control who to federate with\",\n \"groups\": [\"admin\", \"moderator\"],\n },\n {\n \"codename\": \"create_invites\",\n \"name\": \"issue invitations to join\",\n \"groups\": [\"admin\", \"moderator\"],\n },\n {\n \"codename\": \"moderate_user\",\n \"name\": \"deactivate or silence a user\",\n \"groups\": [\"admin\", \"moderator\"],\n },\n {\n \"codename\": \"moderate_post\",\n \"name\": \"delete other users' posts\",\n \"groups\": [\"admin\", \"moderator\"],\n },\n {\n \"codename\": \"edit_book\",\n \"name\": \"edit book info\",\n \"groups\": [\"admin\", \"moderator\", \"editor\"],\n },\n ]\n\n content_type = models.ContentType.objects.get_for_model(User)\n for permission in permissions:\n permission_obj = Permission.objects.create(\n codename=permission[\"codename\"],\n name=permission[\"name\"],\n content_type=content_type,\n )\n # add the permission to the appropriate groups\n for group_name in permission[\"groups\"]:\n Group.objects.get(name=group_name).permissions.add(permission_obj)\n\n # while the groups and permissions shouldn't be changed because the code\n # depends on them, what permissions go with what groups should be editable\n\n\ndef init_connectors():\n \"\"\"access book data sources\"\"\"\n models.Connector.objects.create(\n identifier=\"bookwyrm.social\",\n name=\"BookWyrm dot Social\",\n connector_file=\"bookwyrm_connector\",\n base_url=\"https://bookwyrm.social\",\n books_url=\"https://bookwyrm.social/book\",\n covers_url=\"https://bookwyrm.social/images/\",\n search_url=\"https://bookwyrm.social/search?q=\",\n isbn_search_url=\"https://bookwyrm.social/isbn/\",\n priority=2,\n )\n\n models.Connector.objects.create(\n identifier=\"inventaire.io\",\n name=\"Inventaire\",\n connector_file=\"inventaire\",\n base_url=\"https://inventaire.io\",\n books_url=\"https://inventaire.io/api/entities\",\n covers_url=\"https://inventaire.io\",\n search_url=\"https://inventaire.io/api/search?types=works&types=works&search=\",\n isbn_search_url=\"https://inventaire.io/api/entities?action=by-uris&uris=isbn%3A\",\n priority=3,\n )\n\n models.Connector.objects.create(\n identifier=\"openlibrary.org\",\n name=\"OpenLibrary\",\n connector_file=\"openlibrary\",\n base_url=\"https://openlibrary.org\",\n books_url=\"https://openlibrary.org\",\n covers_url=\"https://covers.openlibrary.org\",\n search_url=\"https://openlibrary.org/search?q=\",\n isbn_search_url=\"https://openlibrary.org/api/books?jscmd=data&format=json&bibkeys=ISBN:\",\n priority=3,\n )\n\n\ndef init_federated_servers():\n \"\"\"big no to nazis\"\"\"\n built_in_blocks = [\"gab.ai\", \"gab.com\"]\n for server in built_in_blocks:\n models.FederatedServer.objects.create(\n server_name=server,\n status=\"blocked\",\n )\n\n\ndef init_settings():\n \"\"\"info about the instance\"\"\"\n models.SiteSettings.objects.create(\n support_link=\"https://www.patreon.com/bookwyrm\",\n support_title=\"Patreon\",\n )\n\n\ndef init_link_domains(*_):\n \"\"\"safe book links\"\"\"\n domains = [\n (\"standardebooks.org\", \"Standard EBooks\"),\n (\"www.gutenberg.org\", \"Project Gutenberg\"),\n (\"archive.org\", \"Internet Archive\"),\n (\"openlibrary.org\", \"Open Library\"),\n (\"theanarchistlibrary.org\", \"The Anarchist Library\"),\n ]\n for domain, name in domains:\n models.LinkDomain.objects.create(\n domain=domain,\n name=name,\n status=\"approved\",\n )\n\n\nclass Command(BaseCommand):\n help = \"Initializes the database with starter data\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--limit\",\n default=None,\n help=\"Limit init to specific table\",\n )\n\n def handle(self, *args, **options):\n limit = options.get(\"limit\")\n tables = [\n \"group\",\n \"permission\",\n \"connector\",\n \"federatedserver\",\n \"settings\",\n \"linkdomain\",\n ]\n if limit and limit not in tables:\n raise Exception(\"Invalid table limit:\", limit)\n\n if not limit or limit == \"group\":\n init_groups()\n if not limit or limit == \"permission\":\n init_permissions()\n if not limit or limit == \"connector\":\n init_connectors()\n if not limit or limit == \"federatedserver\":\n init_federated_servers()\n if not limit or limit == \"settings\":\n init_settings()\n if not limit or limit == \"linkdomain\":\n init_link_domains()\n", "path": "bookwyrm/management/commands/initdb.py" } ]
diff --git a/bookwyrm/management/commands/initdb.py b/bookwyrm/management/commands/initdb.py index 37dd66af4d..b54055744e 100644 --- a/bookwyrm/management/commands/initdb.py +++ b/bookwyrm/management/commands/initdb.py @@ -164,7 +164,7 @@ def handle(self, *args, **options): "settings", "linkdomain", ] - if limit not in tables: + if limit and limit not in tables: raise Exception("Invalid table limit:", limit) if not limit or limit == "group":
horovod__horovod-2651
Wrong default for horovod.tensorflow.keras.allreduce(average... In Horovod 0.21.1 the default for `average` in `allreduce` is still `True` leading to > ValueError: The op parameter supersedes average. Please provide only one of them. when using `op=...` (only). This is only in in `horovod.tensorflow.keras`, not in `horovod.tensorflow` BTW: In TF2, is there any benefit of using `horovod.tensorflow.keras` over `horovod.tensorflow` when not disabling eager execution (which in my tests is pretty much unfeasible)
[ { "content": "# Copyright 2018 Uber Technologies, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport inspect\n\nimport tensorflow as tf\n\nfrom tensorflow import keras\nfrom tensorflow.python.keras import backend as K\n\nfrom horovod.tensorflow import init\nfrom horovod.tensorflow import shutdown\nfrom horovod.tensorflow import is_initialized, start_timeline, stop_timeline\nfrom horovod.tensorflow import size\nfrom horovod.tensorflow import local_size\nfrom horovod.tensorflow import rank\nfrom horovod.tensorflow import local_rank\nfrom horovod.tensorflow import mpi_threads_supported, mpi_enabled, mpi_built\nfrom horovod.tensorflow import gloo_enabled, gloo_built\nfrom horovod.tensorflow import nccl_built, ddl_built, ccl_built, cuda_built, rocm_built\nfrom horovod.tensorflow import Average, Compression, Sum\n\nimport horovod._keras as _impl\nfrom horovod.tensorflow.keras import callbacks, elastic\n\n\ntry:\n # In later versions of TensorFlow, optimizers are spread across multiple modules. This set is used to distinguish\n # stock optimizers that come with tf.keras from custom optimizers that may need to be wrapped specially.\n _OPTIMIZER_MODULES = set([obj.__module__ for name, obj in inspect.getmembers(tf.keras.optimizers)\n if isinstance(obj, type(tf.keras.optimizers.Optimizer))])\nexcept:\n _OPTIMIZER_MODULES = set()\n\n\ndef DistributedOptimizer(optimizer, name=None,\n device_dense='', device_sparse='',\n compression=Compression.none,\n sparse_as_dense=False,\n gradient_predivide_factor=1.0,\n op=Average,\n backward_passes_per_step=1,\n average_aggregated_gradients=False):\n \"\"\"\n An optimizer that wraps another keras.optimizers.Optimizer, using an allreduce to\n average gradient values before applying gradients to model weights.\n\n Args:\n optimizer: Optimizer to use for computing gradients and applying updates.\n name: Optional name prefix for the operations created when applying\n gradients. Defaults to \"Distributed\" followed by the provided\n optimizer type.\n device_dense: Device to be used for dense tensors. Uses GPU by default\n if Horovod was build with HOROVOD_GPU_OPERATIONS.\n device_sparse: Device to be used for sparse tensors. Uses GPU by default\n if Horovod was build with HOROVOD_GPU_OPERATIONS.\n compression: Compression algorithm used to reduce the amount of data\n sent and received by each worker node. Defaults to not\n using compression.\n sparse_as_dense: Treat all sparse gradients as dense tensors. This can\n help improve performance and memory utilization if\n the original sparse gradient has high density.\n Defaults to false.\n gradient_predivide_factor: gradient_predivide_factor splits the averaging\n before and after the sum. Gradients are scaled by\n 1.0 / gradient_predivide_factor before the sum and\n gradient_predivide_factor / size after the sum.\n op: The reduction operation to use when combining gradients across\n different ranks. Defaults to Average.\n backward_passes_per_step: Number of backward passes to perform before calling\n hvd.allreduce. This allows accumulating updates over\n multiple mini-batches before reducing and applying them.\n average_aggregated_gradients: Whether to average the aggregated gradients that\n have been accumulated over multiple mini-batches.\n If true divides gradient updates by\n backward_passes_per_step.\n Only applicable for backward_passes_per_step > 1.\n \"\"\"\n if gradient_predivide_factor != 1.0 and rocm_built():\n raise ValueError('gradient_predivide_factor not supported yet with ROCm')\n\n if op != Average and op != Sum:\n raise ValueError('op currently only supports Average and Sum')\n\n return _impl.create_distributed_optimizer(\n keras=keras,\n optimizer=optimizer,\n name=name,\n device_dense=device_dense,\n device_sparse=device_sparse,\n compression=compression,\n sparse_as_dense=sparse_as_dense,\n gradient_predivide_factor=gradient_predivide_factor,\n op=op,\n backward_passes_per_step=backward_passes_per_step,\n average_aggregated_gradients=average_aggregated_gradients,\n )\n\n\ndef broadcast_global_variables(root_rank):\n \"\"\"Broadcasts all global variables from root rank to all other processes.\n\n Arguments:\n root_rank: Rank of the process from which global variables will be broadcasted\n to all other processes.\n \"\"\"\n return _impl.broadcast_global_variables(K, root_rank)\n\n\ndef allreduce(value, name=None, average=True,\n prescale_factor=1.0,\n postscale_factor=1.0,\n op=None,\n compression=Compression.none):\n \"\"\"\n Perform an allreduce on a tensor-compatible value.\n\n Arguments:\n value: A tensor-compatible value to reduce.\n The shape of the input must be identical across all ranks.\n name: Optional name for the constants created by this operation.\n average:\n .. warning:: .. deprecated:: 0.19.0\n\n Use `op` instead. Will be removed in v0.21.0.\n\n prescale_factor: Multiplicative factor to scale tensor before allreduce.\n postscale_factor: Multiplicative factor to scale tensor after allreduce.\n op: The reduction operation to combine tensors across different ranks.\n Defaults to Average if None is given.\n compression: Compression algorithm used to reduce the amount of data\n sent and received by each worker node. Defaults to not\n using compression.\n \"\"\"\n return _impl.allreduce(\n backend=K,\n value=value,\n name=name,\n average=average,\n prescale_factor=prescale_factor,\n postscale_factor=postscale_factor,\n op=op,\n compression=compression)\n\n\ndef allgather(value, name=None):\n \"\"\"\n Perform an allgather on a tensor-compatible value.\n\n The concatenation is done on the first dimension, so the input values on the\n different processes must have the same rank and shape, except for the first\n dimension, which is allowed to be different.\n\n Arguments:\n value: A tensor-compatible value to gather.\n name: Optional name prefix for the constants created by this operation.\n \"\"\"\n return _impl.allgather(K, value, name)\n\n\ndef broadcast(value, root_rank, name=None):\n \"\"\"\n Perform a broadcast on a tensor-compatible value.\n\n Arguments:\n value: A tensor-compatible value to reduce.\n The shape of the input must be identical across all ranks.\n root_rank: Rank of the process from which global variables will be\n broadcasted to all other processes.\n name: Optional name for the constants created by this operation.\n \"\"\"\n return _impl.broadcast(K, value, root_rank, name)\n\n\ndef load_model(filepath, custom_optimizers=None, custom_objects=None, compression=Compression.none):\n \"\"\"\n Loads a saved Keras model with a Horovod DistributedOptimizer.\n\n The DistributedOptimizer will wrap the underlying optimizer used to train\n the saved model, so that the optimizer state (params and weights) will\n be picked up for retraining.\n\n By default, all optimizers in the module `keras.optimizers` will be loaded\n and wrapped without needing to specify any `custom_optimizers` or\n `custom_objects`.\n\n Arguments:\n filepath: One of the following:\n - string, path to the saved model, or\n - h5py.File object from which to load the model\n custom_optimizers: Optional list of Optimizer subclasses to support\n during loading.\n custom_objects: Optional dictionary mapping names (strings) to custom\n classes or functions to be considered during deserialization.\n compression: Compression algorithm used to reduce the amount of data\n sent and received by each worker node. Defaults to not\n using compression.\n\n Returns:\n A Keras model instance.\n\n Raises:\n ImportError: If h5py is not available.\n ValueError: In case of an invalid savefile.\n \"\"\"\n def wrap_optimizer(cls):\n return lambda **kwargs: DistributedOptimizer(cls(**kwargs), compression=compression)\n return _impl.load_model(keras, wrap_optimizer, _OPTIMIZER_MODULES, filepath, custom_optimizers, custom_objects)\n\n", "path": "horovod/tensorflow/keras/__init__.py" } ]
[ { "content": "# Copyright 2018 Uber Technologies, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport inspect\n\nimport tensorflow as tf\n\nfrom tensorflow import keras\nfrom tensorflow.python.keras import backend as K\n\nfrom horovod.tensorflow import init\nfrom horovod.tensorflow import shutdown\nfrom horovod.tensorflow import is_initialized, start_timeline, stop_timeline\nfrom horovod.tensorflow import size\nfrom horovod.tensorflow import local_size\nfrom horovod.tensorflow import rank\nfrom horovod.tensorflow import local_rank\nfrom horovod.tensorflow import mpi_threads_supported, mpi_enabled, mpi_built\nfrom horovod.tensorflow import gloo_enabled, gloo_built\nfrom horovod.tensorflow import nccl_built, ddl_built, ccl_built, cuda_built, rocm_built\nfrom horovod.tensorflow import Average, Compression, Sum\n\nimport horovod._keras as _impl\nfrom horovod.tensorflow.keras import callbacks, elastic\n\n\ntry:\n # In later versions of TensorFlow, optimizers are spread across multiple modules. This set is used to distinguish\n # stock optimizers that come with tf.keras from custom optimizers that may need to be wrapped specially.\n _OPTIMIZER_MODULES = set([obj.__module__ for name, obj in inspect.getmembers(tf.keras.optimizers)\n if isinstance(obj, type(tf.keras.optimizers.Optimizer))])\nexcept:\n _OPTIMIZER_MODULES = set()\n\n\ndef DistributedOptimizer(optimizer, name=None,\n device_dense='', device_sparse='',\n compression=Compression.none,\n sparse_as_dense=False,\n gradient_predivide_factor=1.0,\n op=Average,\n backward_passes_per_step=1,\n average_aggregated_gradients=False):\n \"\"\"\n An optimizer that wraps another keras.optimizers.Optimizer, using an allreduce to\n average gradient values before applying gradients to model weights.\n\n Args:\n optimizer: Optimizer to use for computing gradients and applying updates.\n name: Optional name prefix for the operations created when applying\n gradients. Defaults to \"Distributed\" followed by the provided\n optimizer type.\n device_dense: Device to be used for dense tensors. Uses GPU by default\n if Horovod was build with HOROVOD_GPU_OPERATIONS.\n device_sparse: Device to be used for sparse tensors. Uses GPU by default\n if Horovod was build with HOROVOD_GPU_OPERATIONS.\n compression: Compression algorithm used to reduce the amount of data\n sent and received by each worker node. Defaults to not\n using compression.\n sparse_as_dense: Treat all sparse gradients as dense tensors. This can\n help improve performance and memory utilization if\n the original sparse gradient has high density.\n Defaults to false.\n gradient_predivide_factor: gradient_predivide_factor splits the averaging\n before and after the sum. Gradients are scaled by\n 1.0 / gradient_predivide_factor before the sum and\n gradient_predivide_factor / size after the sum.\n op: The reduction operation to use when combining gradients across\n different ranks. Defaults to Average.\n backward_passes_per_step: Number of backward passes to perform before calling\n hvd.allreduce. This allows accumulating updates over\n multiple mini-batches before reducing and applying them.\n average_aggregated_gradients: Whether to average the aggregated gradients that\n have been accumulated over multiple mini-batches.\n If true divides gradient updates by\n backward_passes_per_step.\n Only applicable for backward_passes_per_step > 1.\n \"\"\"\n if gradient_predivide_factor != 1.0 and rocm_built():\n raise ValueError('gradient_predivide_factor not supported yet with ROCm')\n\n if op != Average and op != Sum:\n raise ValueError('op currently only supports Average and Sum')\n\n return _impl.create_distributed_optimizer(\n keras=keras,\n optimizer=optimizer,\n name=name,\n device_dense=device_dense,\n device_sparse=device_sparse,\n compression=compression,\n sparse_as_dense=sparse_as_dense,\n gradient_predivide_factor=gradient_predivide_factor,\n op=op,\n backward_passes_per_step=backward_passes_per_step,\n average_aggregated_gradients=average_aggregated_gradients,\n )\n\n\ndef broadcast_global_variables(root_rank):\n \"\"\"Broadcasts all global variables from root rank to all other processes.\n\n Arguments:\n root_rank: Rank of the process from which global variables will be broadcasted\n to all other processes.\n \"\"\"\n return _impl.broadcast_global_variables(K, root_rank)\n\n\ndef allreduce(value, name=None, average=None,\n prescale_factor=1.0,\n postscale_factor=1.0,\n op=None,\n compression=Compression.none):\n \"\"\"\n Perform an allreduce on a tensor-compatible value.\n\n Arguments:\n value: A tensor-compatible value to reduce.\n The shape of the input must be identical across all ranks.\n name: Optional name for the constants created by this operation.\n average:\n .. warning:: .. deprecated:: 0.19.0\n\n Use `op` instead. Will be removed in v0.21.0.\n\n prescale_factor: Multiplicative factor to scale tensor before allreduce.\n postscale_factor: Multiplicative factor to scale tensor after allreduce.\n op: The reduction operation to combine tensors across different ranks.\n Defaults to Average if None is given.\n compression: Compression algorithm used to reduce the amount of data\n sent and received by each worker node. Defaults to not\n using compression.\n \"\"\"\n return _impl.allreduce(\n backend=K,\n value=value,\n name=name,\n average=average,\n prescale_factor=prescale_factor,\n postscale_factor=postscale_factor,\n op=op,\n compression=compression)\n\n\ndef allgather(value, name=None):\n \"\"\"\n Perform an allgather on a tensor-compatible value.\n\n The concatenation is done on the first dimension, so the input values on the\n different processes must have the same rank and shape, except for the first\n dimension, which is allowed to be different.\n\n Arguments:\n value: A tensor-compatible value to gather.\n name: Optional name prefix for the constants created by this operation.\n \"\"\"\n return _impl.allgather(K, value, name)\n\n\ndef broadcast(value, root_rank, name=None):\n \"\"\"\n Perform a broadcast on a tensor-compatible value.\n\n Arguments:\n value: A tensor-compatible value to reduce.\n The shape of the input must be identical across all ranks.\n root_rank: Rank of the process from which global variables will be\n broadcasted to all other processes.\n name: Optional name for the constants created by this operation.\n \"\"\"\n return _impl.broadcast(K, value, root_rank, name)\n\n\ndef load_model(filepath, custom_optimizers=None, custom_objects=None, compression=Compression.none):\n \"\"\"\n Loads a saved Keras model with a Horovod DistributedOptimizer.\n\n The DistributedOptimizer will wrap the underlying optimizer used to train\n the saved model, so that the optimizer state (params and weights) will\n be picked up for retraining.\n\n By default, all optimizers in the module `keras.optimizers` will be loaded\n and wrapped without needing to specify any `custom_optimizers` or\n `custom_objects`.\n\n Arguments:\n filepath: One of the following:\n - string, path to the saved model, or\n - h5py.File object from which to load the model\n custom_optimizers: Optional list of Optimizer subclasses to support\n during loading.\n custom_objects: Optional dictionary mapping names (strings) to custom\n classes or functions to be considered during deserialization.\n compression: Compression algorithm used to reduce the amount of data\n sent and received by each worker node. Defaults to not\n using compression.\n\n Returns:\n A Keras model instance.\n\n Raises:\n ImportError: If h5py is not available.\n ValueError: In case of an invalid savefile.\n \"\"\"\n def wrap_optimizer(cls):\n return lambda **kwargs: DistributedOptimizer(cls(**kwargs), compression=compression)\n return _impl.load_model(keras, wrap_optimizer, _OPTIMIZER_MODULES, filepath, custom_optimizers, custom_objects)\n\n", "path": "horovod/tensorflow/keras/__init__.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 27d8ecad07..3033704106 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - Fixed `local_rank` support for Ray. ([#2596](https://github.com/horovod/horovod/pull/2596)) +- Fixed wrong default for horovod.tensorflow.keras.allreduce average ([#2627](https://github.com/horovod/horovod/pull/2627)) + ## [v0.21.1] - 2021-01-06 ### Added diff --git a/horovod/tensorflow/keras/__init__.py b/horovod/tensorflow/keras/__init__.py index fdf4bca91d..aaf7ac66cd 100644 --- a/horovod/tensorflow/keras/__init__.py +++ b/horovod/tensorflow/keras/__init__.py @@ -119,7 +119,7 @@ def broadcast_global_variables(root_rank): return _impl.broadcast_global_variables(K, root_rank) -def allreduce(value, name=None, average=True, +def allreduce(value, name=None, average=None, prescale_factor=1.0, postscale_factor=1.0, op=None,
e-valuation__EvaP-1321
Evaluation preview button visibility As a teaching assistant, I might be a contributor to a given course and therefore get my own feedback in the main evaluation. If that course also has an exam evaluation, I see that listed on my "own evaluations" page with the option to preview the questionnaire. However, as not being responsible, I miss the access rights to preview the linked page, resulting in an error. I would like to either don't have the preview button (it already knows while rendering that page that I am not a contributor, shown through the corresponding icon next to the exam evaluation title) or to give me the rights to preview the questionnaire.
[ { "content": "from django.forms import TypedChoiceField\nfrom django.template import Library\n\nfrom evap.evaluation.models import BASE_UNIPOLAR_CHOICES\nfrom evap.evaluation.tools import STATES_ORDERED, STATE_DESCRIPTIONS\nfrom evap.rewards.tools import can_reward_points_be_used_by\nfrom evap.student.forms import HeadingField\n\n\nregister = Library()\n\n\[email protected](name='zip')\ndef _zip(a, b):\n return zip(a, b)\n\n\[email protected]\ndef ordering_index(evaluation):\n if evaluation.state in ['new', 'prepared', 'editor_approved', 'approved']:\n return evaluation.days_until_evaluation\n elif evaluation.state == \"in_evaluation\":\n return 100000 + evaluation.days_left_for_evaluation\n return 200000 + evaluation.days_left_for_evaluation\n\n\n# from http://www.jongales.com/blog/2009/10/19/percentage-django-template-tag/\[email protected]\ndef percentage(fraction, population):\n try:\n return \"{0:.0f}%\".format(int(float(fraction) / float(population) * 100))\n except ValueError:\n return None\n except ZeroDivisionError:\n return None\n\n\[email protected]\ndef percentage_one_decimal(fraction, population):\n try:\n return \"{0:.1f}%\".format((float(fraction) / float(population)) * 100)\n except ValueError:\n return None\n except ZeroDivisionError:\n return None\n\n\[email protected]\ndef percentage_value(fraction, population):\n try:\n return \"{0:0f}\".format((float(fraction) / float(population)) * 100)\n except ValueError:\n return None\n except ZeroDivisionError:\n return None\n\n\[email protected]\ndef to_colors(choices):\n if not choices:\n # When displaying the course distribution, there are no associated voting choices.\n # In that case, we just use the colors of a unipolar scale.\n return BASE_UNIPOLAR_CHOICES['colors']\n return choices.colors\n\n\[email protected]\ndef statename(state):\n return STATES_ORDERED.get(state)\n\n\[email protected]\ndef statedescription(state):\n return STATE_DESCRIPTIONS.get(state)\n\n\[email protected]\ndef can_results_page_be_seen_by(evaluation, user):\n return evaluation.can_results_page_be_seen_by(user)\n\n\[email protected](name='can_reward_points_be_used_by')\ndef _can_reward_points_be_used_by(user):\n return can_reward_points_be_used_by(user)\n\n\[email protected]\ndef is_choice_field(field):\n return isinstance(field.field, TypedChoiceField)\n\n\[email protected]\ndef is_heading_field(field):\n return isinstance(field.field, HeadingField)\n\n\[email protected]\ndef is_user_editor_or_delegate(evaluation, user):\n return evaluation.is_user_editor_or_delegate(user)\n\n\[email protected]\ndef message_class(level):\n return {\n 'debug': 'info',\n 'info': 'info',\n 'success': 'success',\n 'warning': 'warning',\n 'error': 'danger',\n }.get(level, 'info')\n\n\[email protected]\ndef hours_and_minutes(time_left_for_evaluation):\n hours = time_left_for_evaluation.seconds // 3600\n minutes = (time_left_for_evaluation.seconds // 60) % 60\n return \"{:02}:{:02}\".format(hours, minutes)\n\n\[email protected]\ndef has_nonresponsible_editor(evaluation):\n return evaluation.contributions.filter(can_edit=True).exclude(contributor__in=evaluation.course.responsibles.all()).exists()\n", "path": "evap/evaluation/templatetags/evaluation_filters.py" } ]
[ { "content": "from django.forms import TypedChoiceField\nfrom django.template import Library\n\nfrom evap.evaluation.models import BASE_UNIPOLAR_CHOICES\nfrom evap.evaluation.tools import STATES_ORDERED, STATE_DESCRIPTIONS\nfrom evap.rewards.tools import can_reward_points_be_used_by\nfrom evap.student.forms import HeadingField\n\n\nregister = Library()\n\n\[email protected](name='zip')\ndef _zip(a, b):\n return zip(a, b)\n\n\[email protected]\ndef ordering_index(evaluation):\n if evaluation.state in ['new', 'prepared', 'editor_approved', 'approved']:\n return evaluation.days_until_evaluation\n elif evaluation.state == \"in_evaluation\":\n return 100000 + evaluation.days_left_for_evaluation\n return 200000 + evaluation.days_left_for_evaluation\n\n\n# from http://www.jongales.com/blog/2009/10/19/percentage-django-template-tag/\[email protected]\ndef percentage(fraction, population):\n try:\n return \"{0:.0f}%\".format(int(float(fraction) / float(population) * 100))\n except ValueError:\n return None\n except ZeroDivisionError:\n return None\n\n\[email protected]\ndef percentage_one_decimal(fraction, population):\n try:\n return \"{0:.1f}%\".format((float(fraction) / float(population)) * 100)\n except ValueError:\n return None\n except ZeroDivisionError:\n return None\n\n\[email protected]\ndef percentage_value(fraction, population):\n try:\n return \"{0:0f}\".format((float(fraction) / float(population)) * 100)\n except ValueError:\n return None\n except ZeroDivisionError:\n return None\n\n\[email protected]\ndef to_colors(choices):\n if not choices:\n # When displaying the course distribution, there are no associated voting choices.\n # In that case, we just use the colors of a unipolar scale.\n return BASE_UNIPOLAR_CHOICES['colors']\n return choices.colors\n\n\[email protected]\ndef statename(state):\n return STATES_ORDERED.get(state)\n\n\[email protected]\ndef statedescription(state):\n return STATE_DESCRIPTIONS.get(state)\n\n\[email protected]\ndef can_results_page_be_seen_by(evaluation, user):\n return evaluation.can_results_page_be_seen_by(user)\n\n\[email protected](name='can_reward_points_be_used_by')\ndef _can_reward_points_be_used_by(user):\n return can_reward_points_be_used_by(user)\n\n\[email protected]\ndef is_choice_field(field):\n return isinstance(field.field, TypedChoiceField)\n\n\[email protected]\ndef is_heading_field(field):\n return isinstance(field.field, HeadingField)\n\n\[email protected]\ndef is_user_editor_or_delegate(evaluation, user):\n return evaluation.is_user_editor_or_delegate(user)\n\n\[email protected]\ndef is_user_responsible_or_contributor_or_delegate(evaluation, user):\n return evaluation.is_user_responsible_or_contributor_or_delegate(user)\n\[email protected]\ndef message_class(level):\n return {\n 'debug': 'info',\n 'info': 'info',\n 'success': 'success',\n 'warning': 'warning',\n 'error': 'danger',\n }.get(level, 'info')\n\n\[email protected]\ndef hours_and_minutes(time_left_for_evaluation):\n hours = time_left_for_evaluation.seconds // 3600\n minutes = (time_left_for_evaluation.seconds // 60) % 60\n return \"{:02}:{:02}\".format(hours, minutes)\n\n\[email protected]\ndef has_nonresponsible_editor(evaluation):\n return evaluation.contributions.filter(can_edit=True).exclude(contributor__in=evaluation.course.responsibles.all()).exists()\n", "path": "evap/evaluation/templatetags/evaluation_filters.py" } ]
diff --git a/evap/contributor/templates/contributor_index.html b/evap/contributor/templates/contributor_index.html index 0769b1d562..04724bcecb 100644 --- a/evap/contributor/templates/contributor_index.html +++ b/evap/contributor/templates/contributor_index.html @@ -180,10 +180,12 @@ </a> {% endif %} {% endif %} - <a href="{% url 'contributor:evaluation_preview' evaluation.id %}" class="btn btn-sm btn-light" - data-toggle="tooltip" data-placement="top" title="{% trans 'Preview' %}"> - <span class="fas fa-eye"></span> - </a> + {% if evaluation|is_user_responsible_or_contributor_or_delegate:user %} + <a href="{% url 'contributor:evaluation_preview' evaluation.id %}" class="btn btn-sm btn-light" + data-toggle="tooltip" data-placement="top" title="{% trans 'Preview' %}"> + <span class="fas fa-eye"></span> + </a> + {% endif %} {% elif evaluation.state != 'published' and evaluation.is_single_result %} <div class="d-flex" data-toggle="tooltip" data-placement="left" title="{% trans 'You will receive an email when the results are published.' %}"> {% include 'distribution_bar_disabled.html' with icon="fas fa-hourglass-half" weight=evaluation.weight weight_sum=evaluation.course.evaluation_weight_sum %} diff --git a/evap/evaluation/templatetags/evaluation_filters.py b/evap/evaluation/templatetags/evaluation_filters.py index b582d17402..3dfcaae851 100644 --- a/evap/evaluation/templatetags/evaluation_filters.py +++ b/evap/evaluation/templatetags/evaluation_filters.py @@ -99,6 +99,10 @@ def is_user_editor_or_delegate(evaluation, user): return evaluation.is_user_editor_or_delegate(user) [email protected] +def is_user_responsible_or_contributor_or_delegate(evaluation, user): + return evaluation.is_user_responsible_or_contributor_or_delegate(user) + @register.filter def message_class(level): return {
mitmproxy__mitmproxy-1336
Snapshots have weird filename due to py3 ##### Steps to reproduce the problem: 1. Look at https://snapshots.mitmproxy.org/v0.18/ ``` mitmproxy-0.18dev0636-0xg588dad1-py2-none-any.whl 26-Jun-2016 21:55 985K mitmproxy-0.18dev0759-0xg22c0db3-win32.zip 09-Jul-2016 20:45 35M mitmproxy-0.18dev0775-0xb'g6bb267c'-osx.tar.gz 10-Jul-2016 09:29 35M mitmproxy-0.18dev0775-0xb'g6bb267c'-py2.py3-non..> 10-Jul-2016 09:28 986K pathod-0.18dev0759-0xg22c0db3-win32.zip 09-Jul-2016 20:46 12M pathod-0.18dev0775-0xb'g6bb267c'-osx.tar.gz 10-Jul-2016 09:28 13M ``` ##### What is the expected behavior? Have proper filename. ##### What went wrong? Some files contain e.g. `0xb'g6bb267c'`, others contain `0xg22c0db3`. The `0x` part is ok, but the other thing looks like a bytes vs. str problem introduced by creating snapshots with Python 3.
[ { "content": "#!/usr/bin/env python\nfrom __future__ import absolute_import, print_function, division\nfrom os.path import join\nimport contextlib\nimport os\nimport shutil\nimport subprocess\nimport re\nimport shlex\nimport runpy\nimport zipfile\nimport tarfile\nimport platform\nimport click\nimport pysftp\nimport fnmatch\n\n# https://virtualenv.pypa.io/en/latest/userguide.html#windows-notes\n# scripts and executables on Windows go in ENV\\Scripts\\ instead of ENV/bin/\nimport sys\n\nif platform.system() == \"Windows\":\n VENV_BIN = \"Scripts\"\nelse:\n VENV_BIN = \"bin\"\n\nif platform.system() == \"Windows\":\n def Archive(name):\n a = zipfile.ZipFile(name, \"w\")\n a.add = a.write\n return a\nelse:\n def Archive(name):\n return tarfile.open(name, \"w:gz\")\n\nRELEASE_DIR = join(os.path.dirname(os.path.realpath(__file__)))\nDIST_DIR = join(RELEASE_DIR, \"dist\")\nROOT_DIR = os.path.normpath(join(RELEASE_DIR, \"..\"))\nRELEASE_SPEC_DIR = join(RELEASE_DIR, \"specs\")\nVERSION_FILE = join(ROOT_DIR, \"netlib/version.py\")\n\nBUILD_DIR = join(RELEASE_DIR, \"build\")\nPYINSTALLER_TEMP = join(BUILD_DIR, \"pyinstaller\")\nPYINSTALLER_DIST = join(BUILD_DIR, \"binaries\")\n\nVENV_DIR = join(BUILD_DIR, \"venv\")\nVENV_PIP = join(VENV_DIR, VENV_BIN, \"pip\")\nVENV_PYINSTALLER = join(VENV_DIR, VENV_BIN, \"pyinstaller\")\n\nproject = {\n \"name\": \"mitmproxy\",\n \"tools\": [\"pathod\", \"pathoc\", \"mitmproxy\", \"mitmdump\", \"mitmweb\"],\n \"bdists\": {\n \"mitmproxy\": [\"mitmproxy\", \"mitmdump\", \"mitmweb\"],\n \"pathod\": [\"pathoc\", \"pathod\"]\n },\n \"dir\": ROOT_DIR,\n \"python_version\": \"py2.py3\",\n}\nif platform.system() == \"Windows\":\n project[\"tools\"].remove(\"mitmproxy\")\n project[\"bdists\"][\"mitmproxy\"].remove(\"mitmproxy\")\n\n\ndef get_version():\n return runpy.run_path(VERSION_FILE)[\"VERSION\"]\n\n\ndef get_snapshot_version():\n last_tag, tag_dist, commit = git(\"describe --tags --long\").strip().rsplit(b\"-\", 2)\n tag_dist = int(tag_dist)\n if tag_dist == 0:\n return get_version()\n else:\n # The wheel build tag (we use the commit) must start with a digit, so we include \"0x\"\n return \"{version}dev{tag_dist:04}-0x{commit}\".format(\n version=get_version(), # this should already be the next version\n tag_dist=tag_dist,\n commit=commit\n )\n\n\ndef archive_name(project):\n platform_tag = {\n \"Darwin\": \"osx\",\n \"Windows\": \"win32\",\n \"Linux\": \"linux\"\n }.get(platform.system(), platform.system())\n if platform.system() == \"Windows\":\n ext = \"zip\"\n else:\n ext = \"tar.gz\"\n return \"{project}-{version}-{platform}.{ext}\".format(\n project=project,\n version=get_version(),\n platform=platform_tag,\n ext=ext\n )\n\n\ndef wheel_name():\n return \"{project}-{version}-{py_version}-none-any.whl\".format(\n project=project[\"name\"],\n version=get_version(),\n py_version=project[\"python_version\"]\n )\n\n\[email protected]\ndef empty_pythonpath():\n \"\"\"\n Make sure that the regular python installation is not on the python path,\n which would give us access to modules installed outside of our virtualenv.\n \"\"\"\n pythonpath = os.environ.get(\"PYTHONPATH\", \"\")\n os.environ[\"PYTHONPATH\"] = \"\"\n yield\n os.environ[\"PYTHONPATH\"] = pythonpath\n\n\[email protected]\ndef chdir(path):\n old_dir = os.getcwd()\n os.chdir(path)\n yield\n os.chdir(old_dir)\n\n\ndef git(args):\n with chdir(ROOT_DIR):\n return subprocess.check_output([\"git\"] + shlex.split(args))\n\n\[email protected](chain=True)\ndef cli():\n \"\"\"\n mitmproxy build tool\n \"\"\"\n pass\n\n\[email protected](\"contributors\")\ndef contributors():\n \"\"\"\n Update CONTRIBUTORS.md\n \"\"\"\n with chdir(ROOT_DIR):\n print(\"Updating CONTRIBUTORS...\")\n contributors_data = git(\"shortlog -n -s\")\n with open(\"CONTRIBUTORS\", \"w\") as f:\n f.write(contributors_data)\n\n\[email protected](\"set-version\")\[email protected]('version')\ndef set_version(version):\n \"\"\"\n Update version information\n \"\"\"\n print(\"Update versions...\")\n version = \", \".join(version.split(\".\"))\n print(\"Update %s...\" % VERSION_FILE)\n with open(VERSION_FILE, \"rb\") as f:\n content = f.read()\n new_content = re.sub(\n r\"IVERSION\\s*=\\s*\\([\\d,\\s]+\\)\", \"IVERSION = (%s)\" % version,\n content\n )\n with open(VERSION_FILE, \"wb\") as f:\n f.write(new_content)\n\n\[email protected](\"wheels\")\ndef wheels():\n \"\"\"\n Build wheels\n \"\"\"\n with empty_pythonpath():\n print(\"Building release...\")\n if os.path.exists(DIST_DIR):\n shutil.rmtree(DIST_DIR)\n\n print(\"Creating wheel for %s ...\" % project[\"name\"])\n subprocess.check_call(\n [\n \"python\", \"./setup.py\", \"-q\",\n \"bdist_wheel\", \"--dist-dir\", DIST_DIR, \"--universal\"\n ],\n cwd=project[\"dir\"]\n )\n\n print(\"Creating virtualenv for test install...\")\n if os.path.exists(VENV_DIR):\n shutil.rmtree(VENV_DIR)\n subprocess.check_call([\"virtualenv\", \"-q\", VENV_DIR])\n\n with chdir(DIST_DIR):\n print(\"Installing %s...\" % project[\"name\"])\n # lxml...\n if platform.system() == \"Windows\" and sys.version_info[0] == 3:\n subprocess.check_call([VENV_PIP, \"install\", \"-q\", \"https://snapshots.mitmproxy.org/misc/lxml-3.6.0-cp35-cp35m-win32.whl\"])\n subprocess.check_call([VENV_PIP, \"install\", \"-q\", wheel_name()])\n\n print(\"Running binaries...\")\n for tool in project[\"tools\"]:\n tool = join(VENV_DIR, VENV_BIN, tool)\n print(\"> %s --version\" % tool)\n print(subprocess.check_output([tool, \"--version\"]))\n\n print(\"Virtualenv available for further testing:\")\n print(\"source %s\" % os.path.normpath(join(VENV_DIR, VENV_BIN, \"activate\")))\n\n\[email protected](\"bdist\")\[email protected](\"--use-existing-wheels/--no-use-existing-wheels\", default=False)\[email protected](\"pyinstaller_version\", envvar=\"PYINSTALLER_VERSION\", default=\"PyInstaller~=3.1.1\")\[email protected]_context\ndef bdist(ctx, use_existing_wheels, pyinstaller_version):\n \"\"\"\n Build a binary distribution\n \"\"\"\n if os.path.exists(PYINSTALLER_TEMP):\n shutil.rmtree(PYINSTALLER_TEMP)\n if os.path.exists(PYINSTALLER_DIST):\n shutil.rmtree(PYINSTALLER_DIST)\n\n if not use_existing_wheels:\n ctx.invoke(wheels)\n\n print(\"Installing PyInstaller...\")\n subprocess.check_call([VENV_PIP, \"install\", \"-q\", pyinstaller_version])\n\n for bdist_project, tools in project[\"bdists\"].items():\n with Archive(join(DIST_DIR, archive_name(bdist_project))) as archive:\n for tool in tools:\n # This is PyInstaller, so it messes up paths.\n # We need to make sure that we are in the spec folder.\n with chdir(RELEASE_SPEC_DIR):\n print(\"Building %s binary...\" % tool)\n subprocess.check_call(\n [\n VENV_PYINSTALLER,\n \"--clean\",\n \"--workpath\", PYINSTALLER_TEMP,\n \"--distpath\", PYINSTALLER_DIST,\n # This is PyInstaller, so setting a\n # different log level obviously breaks it :-)\n # \"--log-level\", \"WARN\",\n \"%s.spec\" % tool\n ]\n )\n\n # Test if it works at all O:-)\n executable = join(PYINSTALLER_DIST, tool)\n if platform.system() == \"Windows\":\n executable += \".exe\"\n print(\"> %s --version\" % executable)\n subprocess.check_call([executable, \"--version\"])\n\n archive.add(executable, os.path.basename(executable))\n print(\"Packed {}.\".format(archive_name(bdist_project)))\n\n\[email protected](\"upload-release\")\[email protected]('--username', prompt=True)\[email protected]_option(confirmation_prompt=False)\[email protected]('--repository', default=\"pypi\")\ndef upload_release(username, password, repository):\n \"\"\"\n Upload wheels to PyPI\n \"\"\"\n filename = wheel_name()\n print(\"Uploading {} to {}...\".format(filename, repository))\n subprocess.check_call([\n \"twine\",\n \"upload\",\n \"-u\", username,\n \"-p\", password,\n \"-r\", repository,\n join(DIST_DIR, filename)\n ])\n\n\[email protected](\"upload-snapshot\")\[email protected](\"--host\", envvar=\"SNAPSHOT_HOST\", prompt=True)\[email protected](\"--port\", envvar=\"SNAPSHOT_PORT\", type=int, default=22)\[email protected](\"--user\", envvar=\"SNAPSHOT_USER\", prompt=True)\[email protected](\"--private-key\", default=join(RELEASE_DIR, \"rtool.pem\"))\[email protected](\"--private-key-password\", envvar=\"SNAPSHOT_PASS\", prompt=True, hide_input=True)\[email protected](\"--wheel/--no-wheel\", default=False)\[email protected](\"--bdist/--no-bdist\", default=False)\ndef upload_snapshot(host, port, user, private_key, private_key_password, wheel, bdist):\n \"\"\"\n Upload snapshot to snapshot server\n \"\"\"\n with pysftp.Connection(host=host,\n port=port,\n username=user,\n private_key=private_key,\n private_key_pass=private_key_password) as sftp:\n\n dir_name = \"snapshots/v{}\".format(get_version())\n sftp.makedirs(dir_name)\n with sftp.cd(dir_name):\n files = []\n if wheel:\n files.append(wheel_name())\n for bdist in project[\"bdists\"].keys():\n files.append(archive_name(bdist))\n\n for f in files:\n local_path = join(DIST_DIR, f)\n remote_filename = f.replace(get_version(), get_snapshot_version())\n symlink_path = \"../{}\".format(f.replace(get_version(), \"latest\"))\n\n # Delete old versions\n old_version = f.replace(get_version(), \"*\")\n for f_old in sftp.listdir():\n if fnmatch.fnmatch(f_old, old_version):\n print(\"Removing {}...\".format(f_old))\n sftp.remove(f_old)\n\n # Upload new version\n print(\"Uploading {} as {}...\".format(f, remote_filename))\n with click.progressbar(length=os.stat(local_path).st_size) as bar:\n sftp.put(\n local_path,\n \".\" + remote_filename,\n callback=lambda done, total: bar.update(done - bar.pos)\n )\n # We hide the file during upload.\n sftp.rename(\".\" + remote_filename, remote_filename)\n\n # update symlink for the latest release\n if sftp.lexists(symlink_path):\n print(\"Removing {}...\".format(symlink_path))\n sftp.remove(symlink_path)\n sftp.symlink(\"v{}/{}\".format(get_version(), remote_filename), symlink_path)\n\n\[email protected](\"wizard\")\[email protected]('--next-version', prompt=True)\[email protected]('--username', prompt=\"PyPI Username\")\[email protected]_option(confirmation_prompt=False, prompt=\"PyPI Password\")\[email protected]('--repository', default=\"pypi\")\[email protected]_context\ndef wizard(ctx, next_version, username, password, repository):\n \"\"\"\n Interactive Release Wizard\n \"\"\"\n is_dirty = git(\"status --porcelain\")\n if is_dirty:\n raise RuntimeError(\"Repository is not clean.\")\n\n # update contributors file\n ctx.invoke(contributors)\n\n # Build test release\n ctx.invoke(bdist)\n\n try:\n click.confirm(\"Please test the release now. Is it ok?\", abort=True)\n except click.Abort:\n # undo changes\n git(\"checkout CONTRIBUTORS\")\n raise\n\n # Everything ok - let's ship it!\n git(\"tag v{}\".format(get_version()))\n git(\"push --tags\")\n ctx.invoke(\n upload_release,\n username=username, password=password, repository=repository\n )\n\n click.confirm(\"Now please wait until CI has built binaries. Finished?\")\n\n # version bump commit\n ctx.invoke(set_version, version=next_version)\n git(\"commit -a -m \\\"bump version\\\"\")\n git(\"push\")\n\n click.echo(\"All done!\")\n\n\nif __name__ == \"__main__\":\n cli()\n", "path": "release/rtool.py" } ]
[ { "content": "#!/usr/bin/env python\nfrom __future__ import absolute_import, print_function, division\nfrom os.path import join\nimport contextlib\nimport os\nimport shutil\nimport subprocess\nimport re\nimport shlex\nimport runpy\nimport zipfile\nimport tarfile\nimport platform\nimport click\nimport pysftp\nimport fnmatch\n\n# https://virtualenv.pypa.io/en/latest/userguide.html#windows-notes\n# scripts and executables on Windows go in ENV\\Scripts\\ instead of ENV/bin/\nimport sys\n\nif platform.system() == \"Windows\":\n VENV_BIN = \"Scripts\"\nelse:\n VENV_BIN = \"bin\"\n\nif platform.system() == \"Windows\":\n def Archive(name):\n a = zipfile.ZipFile(name, \"w\")\n a.add = a.write\n return a\nelse:\n def Archive(name):\n return tarfile.open(name, \"w:gz\")\n\nRELEASE_DIR = join(os.path.dirname(os.path.realpath(__file__)))\nDIST_DIR = join(RELEASE_DIR, \"dist\")\nROOT_DIR = os.path.normpath(join(RELEASE_DIR, \"..\"))\nRELEASE_SPEC_DIR = join(RELEASE_DIR, \"specs\")\nVERSION_FILE = join(ROOT_DIR, \"netlib/version.py\")\n\nBUILD_DIR = join(RELEASE_DIR, \"build\")\nPYINSTALLER_TEMP = join(BUILD_DIR, \"pyinstaller\")\nPYINSTALLER_DIST = join(BUILD_DIR, \"binaries\")\n\nVENV_DIR = join(BUILD_DIR, \"venv\")\nVENV_PIP = join(VENV_DIR, VENV_BIN, \"pip\")\nVENV_PYINSTALLER = join(VENV_DIR, VENV_BIN, \"pyinstaller\")\n\nproject = {\n \"name\": \"mitmproxy\",\n \"tools\": [\"pathod\", \"pathoc\", \"mitmproxy\", \"mitmdump\", \"mitmweb\"],\n \"bdists\": {\n \"mitmproxy\": [\"mitmproxy\", \"mitmdump\", \"mitmweb\"],\n \"pathod\": [\"pathoc\", \"pathod\"]\n },\n \"dir\": ROOT_DIR,\n \"python_version\": \"py2.py3\",\n}\nif platform.system() == \"Windows\":\n project[\"tools\"].remove(\"mitmproxy\")\n project[\"bdists\"][\"mitmproxy\"].remove(\"mitmproxy\")\n\n\ndef get_version():\n return runpy.run_path(VERSION_FILE)[\"VERSION\"]\n\n\ndef get_snapshot_version():\n last_tag, tag_dist, commit = git(\"describe --tags --long\").strip().rsplit(b\"-\", 2)\n tag_dist = int(tag_dist)\n if tag_dist == 0:\n return get_version()\n else:\n # The wheel build tag (we use the commit) must start with a digit, so we include \"0x\"\n return \"{version}dev{tag_dist:04}-0x{commit}\".format(\n version=get_version(), # this should already be the next version\n tag_dist=tag_dist,\n commit=commit.decode()\n )\n\n\ndef archive_name(project):\n platform_tag = {\n \"Darwin\": \"osx\",\n \"Windows\": \"win32\",\n \"Linux\": \"linux\"\n }.get(platform.system(), platform.system())\n if platform.system() == \"Windows\":\n ext = \"zip\"\n else:\n ext = \"tar.gz\"\n return \"{project}-{version}-{platform}.{ext}\".format(\n project=project,\n version=get_version(),\n platform=platform_tag,\n ext=ext\n )\n\n\ndef wheel_name():\n return \"{project}-{version}-{py_version}-none-any.whl\".format(\n project=project[\"name\"],\n version=get_version(),\n py_version=project[\"python_version\"]\n )\n\n\[email protected]\ndef empty_pythonpath():\n \"\"\"\n Make sure that the regular python installation is not on the python path,\n which would give us access to modules installed outside of our virtualenv.\n \"\"\"\n pythonpath = os.environ.get(\"PYTHONPATH\", \"\")\n os.environ[\"PYTHONPATH\"] = \"\"\n yield\n os.environ[\"PYTHONPATH\"] = pythonpath\n\n\[email protected]\ndef chdir(path):\n old_dir = os.getcwd()\n os.chdir(path)\n yield\n os.chdir(old_dir)\n\n\ndef git(args):\n with chdir(ROOT_DIR):\n return subprocess.check_output([\"git\"] + shlex.split(args))\n\n\[email protected](chain=True)\ndef cli():\n \"\"\"\n mitmproxy build tool\n \"\"\"\n pass\n\n\[email protected](\"contributors\")\ndef contributors():\n \"\"\"\n Update CONTRIBUTORS.md\n \"\"\"\n with chdir(ROOT_DIR):\n print(\"Updating CONTRIBUTORS...\")\n contributors_data = git(\"shortlog -n -s\")\n with open(\"CONTRIBUTORS\", \"w\") as f:\n f.write(contributors_data)\n\n\[email protected](\"set-version\")\[email protected]('version')\ndef set_version(version):\n \"\"\"\n Update version information\n \"\"\"\n print(\"Update versions...\")\n version = \", \".join(version.split(\".\"))\n print(\"Update %s...\" % VERSION_FILE)\n with open(VERSION_FILE, \"rb\") as f:\n content = f.read()\n new_content = re.sub(\n r\"IVERSION\\s*=\\s*\\([\\d,\\s]+\\)\", \"IVERSION = (%s)\" % version,\n content\n )\n with open(VERSION_FILE, \"wb\") as f:\n f.write(new_content)\n\n\[email protected](\"wheels\")\ndef wheels():\n \"\"\"\n Build wheels\n \"\"\"\n with empty_pythonpath():\n print(\"Building release...\")\n if os.path.exists(DIST_DIR):\n shutil.rmtree(DIST_DIR)\n\n print(\"Creating wheel for %s ...\" % project[\"name\"])\n subprocess.check_call(\n [\n \"python\", \"./setup.py\", \"-q\",\n \"bdist_wheel\", \"--dist-dir\", DIST_DIR, \"--universal\"\n ],\n cwd=project[\"dir\"]\n )\n\n print(\"Creating virtualenv for test install...\")\n if os.path.exists(VENV_DIR):\n shutil.rmtree(VENV_DIR)\n subprocess.check_call([\"virtualenv\", \"-q\", VENV_DIR])\n\n with chdir(DIST_DIR):\n print(\"Installing %s...\" % project[\"name\"])\n # lxml...\n if platform.system() == \"Windows\" and sys.version_info[0] == 3:\n subprocess.check_call([VENV_PIP, \"install\", \"-q\", \"https://snapshots.mitmproxy.org/misc/lxml-3.6.0-cp35-cp35m-win32.whl\"])\n subprocess.check_call([VENV_PIP, \"install\", \"-q\", wheel_name()])\n\n print(\"Running binaries...\")\n for tool in project[\"tools\"]:\n tool = join(VENV_DIR, VENV_BIN, tool)\n print(\"> %s --version\" % tool)\n print(subprocess.check_output([tool, \"--version\"]))\n\n print(\"Virtualenv available for further testing:\")\n print(\"source %s\" % os.path.normpath(join(VENV_DIR, VENV_BIN, \"activate\")))\n\n\[email protected](\"bdist\")\[email protected](\"--use-existing-wheels/--no-use-existing-wheels\", default=False)\[email protected](\"pyinstaller_version\", envvar=\"PYINSTALLER_VERSION\", default=\"PyInstaller~=3.1.1\")\[email protected]_context\ndef bdist(ctx, use_existing_wheels, pyinstaller_version):\n \"\"\"\n Build a binary distribution\n \"\"\"\n if os.path.exists(PYINSTALLER_TEMP):\n shutil.rmtree(PYINSTALLER_TEMP)\n if os.path.exists(PYINSTALLER_DIST):\n shutil.rmtree(PYINSTALLER_DIST)\n\n if not use_existing_wheels:\n ctx.invoke(wheels)\n\n print(\"Installing PyInstaller...\")\n subprocess.check_call([VENV_PIP, \"install\", \"-q\", pyinstaller_version])\n\n for bdist_project, tools in project[\"bdists\"].items():\n with Archive(join(DIST_DIR, archive_name(bdist_project))) as archive:\n for tool in tools:\n # This is PyInstaller, so it messes up paths.\n # We need to make sure that we are in the spec folder.\n with chdir(RELEASE_SPEC_DIR):\n print(\"Building %s binary...\" % tool)\n subprocess.check_call(\n [\n VENV_PYINSTALLER,\n \"--clean\",\n \"--workpath\", PYINSTALLER_TEMP,\n \"--distpath\", PYINSTALLER_DIST,\n # This is PyInstaller, so setting a\n # different log level obviously breaks it :-)\n # \"--log-level\", \"WARN\",\n \"%s.spec\" % tool\n ]\n )\n\n # Test if it works at all O:-)\n executable = join(PYINSTALLER_DIST, tool)\n if platform.system() == \"Windows\":\n executable += \".exe\"\n print(\"> %s --version\" % executable)\n subprocess.check_call([executable, \"--version\"])\n\n archive.add(executable, os.path.basename(executable))\n print(\"Packed {}.\".format(archive_name(bdist_project)))\n\n\[email protected](\"upload-release\")\[email protected]('--username', prompt=True)\[email protected]_option(confirmation_prompt=False)\[email protected]('--repository', default=\"pypi\")\ndef upload_release(username, password, repository):\n \"\"\"\n Upload wheels to PyPI\n \"\"\"\n filename = wheel_name()\n print(\"Uploading {} to {}...\".format(filename, repository))\n subprocess.check_call([\n \"twine\",\n \"upload\",\n \"-u\", username,\n \"-p\", password,\n \"-r\", repository,\n join(DIST_DIR, filename)\n ])\n\n\[email protected](\"upload-snapshot\")\[email protected](\"--host\", envvar=\"SNAPSHOT_HOST\", prompt=True)\[email protected](\"--port\", envvar=\"SNAPSHOT_PORT\", type=int, default=22)\[email protected](\"--user\", envvar=\"SNAPSHOT_USER\", prompt=True)\[email protected](\"--private-key\", default=join(RELEASE_DIR, \"rtool.pem\"))\[email protected](\"--private-key-password\", envvar=\"SNAPSHOT_PASS\", prompt=True, hide_input=True)\[email protected](\"--wheel/--no-wheel\", default=False)\[email protected](\"--bdist/--no-bdist\", default=False)\ndef upload_snapshot(host, port, user, private_key, private_key_password, wheel, bdist):\n \"\"\"\n Upload snapshot to snapshot server\n \"\"\"\n with pysftp.Connection(host=host,\n port=port,\n username=user,\n private_key=private_key,\n private_key_pass=private_key_password) as sftp:\n\n dir_name = \"snapshots/v{}\".format(get_version())\n sftp.makedirs(dir_name)\n with sftp.cd(dir_name):\n files = []\n if wheel:\n files.append(wheel_name())\n for bdist in project[\"bdists\"].keys():\n files.append(archive_name(bdist))\n\n for f in files:\n local_path = join(DIST_DIR, f)\n remote_filename = f.replace(get_version(), get_snapshot_version())\n symlink_path = \"../{}\".format(f.replace(get_version(), \"latest\"))\n\n # Delete old versions\n old_version = f.replace(get_version(), \"*\")\n for f_old in sftp.listdir():\n if fnmatch.fnmatch(f_old, old_version):\n print(\"Removing {}...\".format(f_old))\n sftp.remove(f_old)\n\n # Upload new version\n print(\"Uploading {} as {}...\".format(f, remote_filename))\n with click.progressbar(length=os.stat(local_path).st_size) as bar:\n sftp.put(\n local_path,\n \".\" + remote_filename,\n callback=lambda done, total: bar.update(done - bar.pos)\n )\n # We hide the file during upload.\n sftp.rename(\".\" + remote_filename, remote_filename)\n\n # update symlink for the latest release\n if sftp.lexists(symlink_path):\n print(\"Removing {}...\".format(symlink_path))\n sftp.remove(symlink_path)\n sftp.symlink(\"v{}/{}\".format(get_version(), remote_filename), symlink_path)\n\n\[email protected](\"wizard\")\[email protected]('--next-version', prompt=True)\[email protected]('--username', prompt=\"PyPI Username\")\[email protected]_option(confirmation_prompt=False, prompt=\"PyPI Password\")\[email protected]('--repository', default=\"pypi\")\[email protected]_context\ndef wizard(ctx, next_version, username, password, repository):\n \"\"\"\n Interactive Release Wizard\n \"\"\"\n is_dirty = git(\"status --porcelain\")\n if is_dirty:\n raise RuntimeError(\"Repository is not clean.\")\n\n # update contributors file\n ctx.invoke(contributors)\n\n # Build test release\n ctx.invoke(bdist)\n\n try:\n click.confirm(\"Please test the release now. Is it ok?\", abort=True)\n except click.Abort:\n # undo changes\n git(\"checkout CONTRIBUTORS\")\n raise\n\n # Everything ok - let's ship it!\n git(\"tag v{}\".format(get_version()))\n git(\"push --tags\")\n ctx.invoke(\n upload_release,\n username=username, password=password, repository=repository\n )\n\n click.confirm(\"Now please wait until CI has built binaries. Finished?\")\n\n # version bump commit\n ctx.invoke(set_version, version=next_version)\n git(\"commit -a -m \\\"bump version\\\"\")\n git(\"push\")\n\n click.echo(\"All done!\")\n\n\nif __name__ == \"__main__\":\n cli()\n", "path": "release/rtool.py" } ]
diff --git a/release/rtool.py b/release/rtool.py index 04e1249d07..4e43eaefd2 100755 --- a/release/rtool.py +++ b/release/rtool.py @@ -76,7 +76,7 @@ def get_snapshot_version(): return "{version}dev{tag_dist:04}-0x{commit}".format( version=get_version(), # this should already be the next version tag_dist=tag_dist, - commit=commit + commit=commit.decode() )
cloud-custodian__cloud-custodian-3597
cli metrics subcommand and azure - throws errors @kapilt what should the expected behavior be here? ``` (cloud-custodian) $ custodian metrics policies/policy.yml 2019-02-20 11:19:18,346: custodian.azure.session:INFO Creating session with Azure CLI Authentication 2019-02-20 11:19:18,347: custodian.azure.session:INFO Session using Subscription ID: <my sub redacted> 2019-02-20 11:19:18,347: custodian.commands:INFO Getting <Policy resource: azure.resourcegroup name: delete-empty-resource-groups region: > metrics Traceback (most recent call last): File "/Users/andyluong/Projects/forks/cloud-custodian/bin/custodian", line 11, in <module> load_entry_point('c7n', 'console_scripts', 'custodian')() File "/Users/andyluong/Projects/forks/cloud-custodian/c7n/cli.py", line 368, in main command(config) File "/Users/andyluong/Projects/forks/cloud-custodian/c7n/commands.py", line 136, in _load_policies return f(options, list(policies)) File "/Users/andyluong/Projects/forks/cloud-custodian/c7n/commands.py", line 491, in metrics_cmd data[p.name] = p.get_metrics(start, end, options.period) File "/Users/andyluong/Projects/forks/cloud-custodian/c7n/policy.py", line 912, in get_metrics return mode.get_metrics(start, end, period) File "/Users/andyluong/Projects/forks/cloud-custodian/c7n/policy.py", line 170, in get_metrics client = session.client('cloudwatch') File "/Users/andyluong/Projects/forks/cloud-custodian/tools/c7n_azure/c7n_azure/session.py", line 148, in client service_name, client_name = client.rsplit('.', 1) ValueError: not enough values to unpack (expected 2, got 1) ```
[ { "content": "# Copyright 2015-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom collections import Counter, defaultdict\nfrom datetime import timedelta, datetime\nfrom functools import wraps\nimport inspect\nimport json\nimport logging\nimport os\nimport pprint\nimport sys\nimport time\n\nimport six\nimport yaml\n\nfrom c7n.exceptions import ClientError\nfrom c7n.provider import clouds\nfrom c7n.policy import Policy, PolicyCollection, load as policy_load\nfrom c7n.utils import dumps, load_file, local_session\nfrom c7n.config import Bag, Config\nfrom c7n import provider\nfrom c7n.resources import load_resources\n\n\nlog = logging.getLogger('custodian.commands')\n\n\ndef policy_command(f):\n\n @wraps(f)\n def _load_policies(options):\n\n validate = True\n if 'skip_validation' in options:\n validate = not options.skip_validation\n\n if not validate:\n log.debug('Policy validation disabled')\n\n load_resources()\n vars = _load_vars(options)\n\n errors = 0\n all_policies = PolicyCollection.from_data({}, options)\n\n # for a default region for policy loading, we'll expand regions later.\n options.region = \"\"\n for fp in options.configs:\n try:\n collection = policy_load(options, fp, validate=validate, vars=vars)\n except IOError:\n log.error('policy file does not exist ({})'.format(fp))\n errors += 1\n continue\n except yaml.YAMLError as e:\n log.error(\n \"yaml syntax error loading policy file ({}) error:\\n {}\".format(\n fp, e))\n errors += 1\n continue\n except ValueError as e:\n log.error('problem loading policy file ({}) error: {}'.format(\n fp, str(e)))\n errors += 1\n continue\n\n if collection is None:\n log.debug('Loaded file {}. Contained no policies.'.format(fp))\n else:\n log.debug(\n 'Loaded file {}. Contains {} policies'.format(\n fp, len(collection)))\n all_policies = all_policies + collection\n\n if errors > 0:\n log.error('Found {} errors. Exiting.'.format(errors))\n sys.exit(1)\n\n # filter by name and resource type\n policies = all_policies.filter(\n getattr(options, 'policy_filter', None),\n getattr(options, 'resource_type', None))\n\n # provider initialization\n provider_policies = {}\n for p in policies:\n provider_policies.setdefault(p.provider_name, []).append(p)\n\n policies = PolicyCollection.from_data({}, options)\n for provider_name in provider_policies:\n provider = clouds[provider_name]()\n p_options = provider.initialize(options)\n policies += provider.initialize_policies(\n PolicyCollection(provider_policies[provider_name], p_options),\n p_options)\n\n if len(policies) == 0:\n _print_no_policies_warning(options, all_policies)\n # If we filtered out all the policies we want to exit with a\n # non-zero status. But if the policy file is empty then continue\n # on to the specific command to determine the exit status.\n if len(all_policies) > 0:\n sys.exit(1)\n\n # Do not allow multiple policies in a region with the same name,\n # even across files\n policies_by_region = defaultdict(list)\n for p in policies:\n policies_by_region[p.options.region].append(p)\n for region in policies_by_region.keys():\n counts = Counter([p.name for p in policies_by_region[region]])\n for policy, count in six.iteritems(counts):\n if count > 1:\n log.error(\"duplicate policy name '{}'\".format(policy))\n sys.exit(1)\n\n # Variable expansion and non schema validation (not optional)\n for p in policies:\n p.expand_variables(p.get_variables())\n p.validate()\n\n return f(options, list(policies))\n\n return _load_policies\n\n\ndef _load_vars(options):\n vars = None\n if options.vars:\n try:\n vars = load_file(options.vars)\n except IOError as e:\n log.error('Problem loading vars file \"{}\": {}'.format(options.vars, e.strerror))\n sys.exit(1)\n\n # TODO - provide builtin vars here (such as account)\n\n return vars\n\n\ndef _print_no_policies_warning(options, policies):\n if options.policy_filter or options.resource_type:\n log.warning(\"Warning: no policies matched the filters provided.\")\n\n log.warning(\"Filters:\")\n if options.policy_filter:\n log.warning(\" Policy name filter (-p): \" + options.policy_filter)\n if options.resource_type:\n log.warning(\" Resource type filter (-t): \" + options.resource_type)\n\n log.warning(\"Available policies:\")\n for policy in policies:\n log.warning(\" - {} ({})\".format(policy.name, policy.resource_type))\n if not policies:\n log.warning(\" (none)\")\n else:\n log.warning('Empty policy file(s). Nothing to do.')\n\n\ndef validate(options):\n from c7n import schema\n load_resources()\n if len(options.configs) < 1:\n log.error('no config files specified')\n sys.exit(1)\n\n used_policy_names = set()\n schm = schema.generate()\n errors = []\n\n for config_file in options.configs:\n config_file = os.path.expanduser(config_file)\n if not os.path.exists(config_file):\n raise ValueError(\"Invalid path for config %r\" % config_file)\n\n options.dryrun = True\n fmt = config_file.rsplit('.', 1)[-1]\n with open(config_file) as fh:\n if fmt in ('yml', 'yaml'):\n data = yaml.safe_load(fh.read())\n elif fmt in ('json',):\n data = json.load(fh)\n else:\n log.error(\"The config file must end in .json, .yml or .yaml.\")\n raise ValueError(\"The config file must end in .json, .yml or .yaml.\")\n\n errors += schema.validate(data, schm)\n conf_policy_names = {\n p.get('name', 'unknown') for p in data.get('policies', ())}\n dupes = conf_policy_names.intersection(used_policy_names)\n if len(dupes) >= 1:\n errors.append(ValueError(\n \"Only one policy with a given name allowed, duplicates: %s\" % (\n \", \".join(dupes)\n )\n ))\n used_policy_names = used_policy_names.union(conf_policy_names)\n if not errors:\n null_config = Config.empty(dryrun=True, account_id='na', region='na')\n for p in data.get('policies', ()):\n try:\n policy = Policy(p, null_config, Bag())\n policy.validate()\n except Exception as e:\n msg = \"Policy: %s is invalid: %s\" % (\n p.get('name', 'unknown'), e)\n errors.append(msg)\n if not errors:\n log.info(\"Configuration valid: {}\".format(config_file))\n continue\n\n log.error(\"Configuration invalid: {}\".format(config_file))\n for e in errors:\n log.error(\"%s\" % e)\n if errors:\n sys.exit(1)\n\n\n@policy_command\ndef run(options, policies):\n exit_code = 0\n\n # AWS - Sanity check that we have an assumable role before executing policies\n # Todo - move this behind provider interface\n if options.assume_role and [p for p in policies if p.provider_name == 'aws']:\n try:\n local_session(clouds['aws']().get_session_factory(options))\n except ClientError:\n log.exception(\"Unable to assume role %s\", options.assume_role)\n sys.exit(1)\n\n for policy in policies:\n try:\n policy()\n except Exception:\n exit_code = 2\n if options.debug:\n raise\n log.exception(\n \"Error while executing policy %s, continuing\" % (\n policy.name))\n if exit_code != 0:\n sys.exit(exit_code)\n\n\n@policy_command\ndef report(options, policies):\n from c7n.reports import report as do_report\n if len(policies) == 0:\n log.error('Error: must supply at least one policy')\n sys.exit(1)\n\n resources = set([p.resource_type for p in policies])\n if len(resources) > 1:\n log.error('Error: Report subcommand can accept multiple policies, '\n 'but they must all be for the same resource.')\n sys.exit(1)\n\n delta = timedelta(days=options.days)\n begin_date = datetime.now() - delta\n do_report(\n policies, begin_date, options, sys.stdout, raw_output_fh=options.raw)\n\n\n@policy_command\ndef logs(options, policies):\n if len(policies) != 1:\n log.error(\"Log subcommand requires exactly one policy\")\n sys.exit(1)\n\n policy = policies.pop()\n # initialize policy execution context for access to outputs\n policy.ctx.initialize()\n\n for e in policy.get_logs(options.start, options.end):\n print(\"%s: %s\" % (\n time.strftime(\n \"%Y-%m-%d %H:%M:%S\", time.localtime(e['timestamp'] / 1000)),\n e['message']))\n\n\ndef _schema_get_docstring(starting_class):\n \"\"\" Given a class, return its docstring.\n\n If no docstring is present for the class, search base classes in MRO for a\n docstring.\n \"\"\"\n for cls in inspect.getmro(starting_class):\n if inspect.getdoc(cls):\n return inspect.getdoc(cls)\n\n\ndef schema_completer(prefix):\n \"\"\" For tab-completion via argcomplete, return completion options.\n\n For the given prefix so far, return the possible options. Note that\n filtering via startswith happens after this list is returned.\n \"\"\"\n from c7n import schema\n load_resources()\n components = prefix.split('.')\n\n if components[0] in provider.clouds.keys():\n cloud_provider = components.pop(0)\n provider_resources = provider.resources(cloud_provider)\n else:\n cloud_provider = 'aws'\n provider_resources = provider.resources('aws')\n components[0] = \"aws.%s\" % components[0]\n\n # Completions for resource\n if len(components) == 1:\n choices = [r for r in provider.resources().keys()\n if r.startswith(components[0])]\n if len(choices) == 1:\n choices += ['{}{}'.format(choices[0], '.')]\n return choices\n\n if components[0] not in provider_resources.keys():\n return []\n\n # Completions for category\n if len(components) == 2:\n choices = ['{}.{}'.format(components[0], x)\n for x in ('actions', 'filters') if x.startswith(components[1])]\n if len(choices) == 1:\n choices += ['{}{}'.format(choices[0], '.')]\n return choices\n\n # Completions for item\n elif len(components) == 3:\n resource_mapping = schema.resource_vocabulary(cloud_provider)\n return ['{}.{}.{}'.format(components[0], components[1], x)\n for x in resource_mapping[components[0]][components[1]]]\n\n return []\n\n\ndef schema_cmd(options):\n \"\"\" Print info about the resources, actions and filters available. \"\"\"\n from c7n import schema\n if options.json:\n schema.json_dump(options.resource)\n return\n\n load_resources()\n resource_mapping = schema.resource_vocabulary()\n if options.summary:\n schema.summary(resource_mapping)\n return\n\n # Here are the formats for what we accept:\n # - No argument\n # - List all available RESOURCES\n # - PROVIDER\n # - List all available RESOURCES for supplied PROVIDER\n # - RESOURCE\n # - List all available actions and filters for supplied RESOURCE\n # - RESOURCE.actions\n # - List all available actions for supplied RESOURCE\n # - RESOURCE.actions.ACTION\n # - Show class doc string and schema for supplied action\n # - RESOURCE.filters\n # - List all available filters for supplied RESOURCE\n # - RESOURCE.filters.FILTER\n # - Show class doc string and schema for supplied filter\n\n if not options.resource:\n resource_list = {'resources': sorted(provider.resources().keys())}\n print(yaml.safe_dump(resource_list, default_flow_style=False))\n return\n\n # Format is [PROVIDER].RESOURCE.CATEGORY.ITEM\n # optional provider defaults to aws for compatibility\n components = options.resource.lower().split('.')\n if len(components) == 1 and components[0] in provider.clouds.keys():\n resource_list = {'resources': sorted(\n provider.resources(cloud_provider=components[0]).keys())}\n print(yaml.safe_dump(resource_list, default_flow_style=False))\n return\n if components[0] in provider.clouds.keys():\n cloud_provider = components.pop(0)\n resource_mapping = schema.resource_vocabulary(\n cloud_provider)\n components[0] = '%s.%s' % (cloud_provider, components[0])\n else:\n resource_mapping = schema.resource_vocabulary('aws')\n components[0] = 'aws.%s' % components[0]\n #\n # Handle resource\n #\n resource = components[0]\n if resource not in resource_mapping:\n log.error('{} is not a valid resource'.format(resource))\n sys.exit(1)\n\n if len(components) == 1:\n del(resource_mapping[resource]['classes'])\n output = {resource: resource_mapping[resource]}\n print(yaml.safe_dump(output))\n return\n\n #\n # Handle category\n #\n category = components[1]\n if category not in ('actions', 'filters'):\n log.error(\"Valid choices are 'actions' and 'filters'. You supplied '{}'\".format(category))\n sys.exit(1)\n\n if len(components) == 2:\n output = \"No {} available for resource {}.\".format(category, resource)\n if category in resource_mapping[resource]:\n output = {resource: {\n category: resource_mapping[resource][category]}}\n print(yaml.safe_dump(output))\n return\n\n #\n # Handle item\n #\n item = components[2]\n if item not in resource_mapping[resource][category]:\n log.error('{} is not in the {} list for resource {}'.format(item, category, resource))\n sys.exit(1)\n\n if len(components) == 3:\n cls = resource_mapping[resource]['classes'][category][item]\n\n # Print docstring\n docstring = _schema_get_docstring(cls)\n print(\"\\nHelp\\n----\\n\")\n if docstring:\n print(docstring)\n else:\n # Shouldn't ever hit this, so exclude from cover\n print(\"No help is available for this item.\") # pragma: no cover\n\n # Print schema\n print(\"\\nSchema\\n------\\n\")\n if hasattr(cls, 'schema'):\n component_schema = dict(cls.schema)\n component_schema.pop('additionalProperties', None)\n component_schema.pop('type', None)\n print(yaml.safe_dump(component_schema))\n else:\n # Shouldn't ever hit this, so exclude from cover\n print(\"No schema is available for this item.\", file=sys.sterr) # pragma: no cover\n print('')\n return\n\n # We received too much (e.g. s3.actions.foo.bar)\n log.error(\"Invalid selector '{}'. Max of 3 components in the \"\n \"format RESOURCE.CATEGORY.ITEM\".format(options.resource))\n sys.exit(1)\n\n\ndef _metrics_get_endpoints(options):\n \"\"\" Determine the start and end dates based on user-supplied options. \"\"\"\n if bool(options.start) ^ bool(options.end):\n log.error('--start and --end must be specified together')\n sys.exit(1)\n\n if options.start and options.end:\n start = options.start\n end = options.end\n else:\n end = datetime.utcnow()\n start = end - timedelta(options.days)\n\n return start, end\n\n\n@policy_command\ndef metrics_cmd(options, policies):\n start, end = _metrics_get_endpoints(options)\n data = {}\n for p in policies:\n log.info('Getting %s metrics', p)\n data[p.name] = p.get_metrics(start, end, options.period)\n print(dumps(data, indent=2))\n\n\ndef version_cmd(options):\n from c7n.version import version\n\n if not options.debug:\n print(version)\n return\n\n indent = 13\n pp = pprint.PrettyPrinter(indent=indent)\n\n print(\"\\nPlease copy/paste the following info along with any bug reports:\\n\")\n print(\"Custodian: \", version)\n pyversion = sys.version.replace('\\n', '\\n' + ' ' * indent) # For readability\n print(\"Python: \", pyversion)\n # os.uname is only available on recent versions of Unix\n try:\n print(\"Platform: \", os.uname())\n except Exception: # pragma: no cover\n print(\"Platform: \", sys.platform)\n print(\"Using venv: \", hasattr(sys, 'real_prefix'))\n print(\"PYTHONPATH: \")\n pp.pprint(sys.path)\n", "path": "c7n/commands.py" } ]
[ { "content": "# Copyright 2015-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom collections import Counter, defaultdict\nfrom datetime import timedelta, datetime\nfrom functools import wraps\nimport inspect\nimport json\nimport logging\nimport os\nimport pprint\nimport sys\nimport time\n\nimport six\nimport yaml\n\nfrom c7n.exceptions import ClientError\nfrom c7n.provider import clouds\nfrom c7n.policy import Policy, PolicyCollection, load as policy_load\nfrom c7n.utils import dumps, load_file, local_session\nfrom c7n.config import Bag, Config\nfrom c7n import provider\nfrom c7n.resources import load_resources\n\n\nlog = logging.getLogger('custodian.commands')\n\n\ndef policy_command(f):\n\n @wraps(f)\n def _load_policies(options):\n\n validate = True\n if 'skip_validation' in options:\n validate = not options.skip_validation\n\n if not validate:\n log.debug('Policy validation disabled')\n\n load_resources()\n vars = _load_vars(options)\n\n errors = 0\n all_policies = PolicyCollection.from_data({}, options)\n\n # for a default region for policy loading, we'll expand regions later.\n options.region = \"\"\n for fp in options.configs:\n try:\n collection = policy_load(options, fp, validate=validate, vars=vars)\n except IOError:\n log.error('policy file does not exist ({})'.format(fp))\n errors += 1\n continue\n except yaml.YAMLError as e:\n log.error(\n \"yaml syntax error loading policy file ({}) error:\\n {}\".format(\n fp, e))\n errors += 1\n continue\n except ValueError as e:\n log.error('problem loading policy file ({}) error: {}'.format(\n fp, str(e)))\n errors += 1\n continue\n\n if collection is None:\n log.debug('Loaded file {}. Contained no policies.'.format(fp))\n else:\n log.debug(\n 'Loaded file {}. Contains {} policies'.format(\n fp, len(collection)))\n all_policies = all_policies + collection\n\n if errors > 0:\n log.error('Found {} errors. Exiting.'.format(errors))\n sys.exit(1)\n\n # filter by name and resource type\n policies = all_policies.filter(\n getattr(options, 'policy_filter', None),\n getattr(options, 'resource_type', None))\n\n # provider initialization\n provider_policies = {}\n for p in policies:\n provider_policies.setdefault(p.provider_name, []).append(p)\n\n policies = PolicyCollection.from_data({}, options)\n for provider_name in provider_policies:\n provider = clouds[provider_name]()\n p_options = provider.initialize(options)\n policies += provider.initialize_policies(\n PolicyCollection(provider_policies[provider_name], p_options),\n p_options)\n\n if len(policies) == 0:\n _print_no_policies_warning(options, all_policies)\n # If we filtered out all the policies we want to exit with a\n # non-zero status. But if the policy file is empty then continue\n # on to the specific command to determine the exit status.\n if len(all_policies) > 0:\n sys.exit(1)\n\n # Do not allow multiple policies in a region with the same name,\n # even across files\n policies_by_region = defaultdict(list)\n for p in policies:\n policies_by_region[p.options.region].append(p)\n for region in policies_by_region.keys():\n counts = Counter([p.name for p in policies_by_region[region]])\n for policy, count in six.iteritems(counts):\n if count > 1:\n log.error(\"duplicate policy name '{}'\".format(policy))\n sys.exit(1)\n\n # Variable expansion and non schema validation (not optional)\n for p in policies:\n p.expand_variables(p.get_variables())\n p.validate()\n\n return f(options, list(policies))\n\n return _load_policies\n\n\ndef _load_vars(options):\n vars = None\n if options.vars:\n try:\n vars = load_file(options.vars)\n except IOError as e:\n log.error('Problem loading vars file \"{}\": {}'.format(options.vars, e.strerror))\n sys.exit(1)\n\n # TODO - provide builtin vars here (such as account)\n\n return vars\n\n\ndef _print_no_policies_warning(options, policies):\n if options.policy_filter or options.resource_type:\n log.warning(\"Warning: no policies matched the filters provided.\")\n\n log.warning(\"Filters:\")\n if options.policy_filter:\n log.warning(\" Policy name filter (-p): \" + options.policy_filter)\n if options.resource_type:\n log.warning(\" Resource type filter (-t): \" + options.resource_type)\n\n log.warning(\"Available policies:\")\n for policy in policies:\n log.warning(\" - {} ({})\".format(policy.name, policy.resource_type))\n if not policies:\n log.warning(\" (none)\")\n else:\n log.warning('Empty policy file(s). Nothing to do.')\n\n\ndef validate(options):\n from c7n import schema\n load_resources()\n if len(options.configs) < 1:\n log.error('no config files specified')\n sys.exit(1)\n\n used_policy_names = set()\n schm = schema.generate()\n errors = []\n\n for config_file in options.configs:\n config_file = os.path.expanduser(config_file)\n if not os.path.exists(config_file):\n raise ValueError(\"Invalid path for config %r\" % config_file)\n\n options.dryrun = True\n fmt = config_file.rsplit('.', 1)[-1]\n with open(config_file) as fh:\n if fmt in ('yml', 'yaml'):\n data = yaml.safe_load(fh.read())\n elif fmt in ('json',):\n data = json.load(fh)\n else:\n log.error(\"The config file must end in .json, .yml or .yaml.\")\n raise ValueError(\"The config file must end in .json, .yml or .yaml.\")\n\n errors += schema.validate(data, schm)\n conf_policy_names = {\n p.get('name', 'unknown') for p in data.get('policies', ())}\n dupes = conf_policy_names.intersection(used_policy_names)\n if len(dupes) >= 1:\n errors.append(ValueError(\n \"Only one policy with a given name allowed, duplicates: %s\" % (\n \", \".join(dupes)\n )\n ))\n used_policy_names = used_policy_names.union(conf_policy_names)\n if not errors:\n null_config = Config.empty(dryrun=True, account_id='na', region='na')\n for p in data.get('policies', ()):\n try:\n policy = Policy(p, null_config, Bag())\n policy.validate()\n except Exception as e:\n msg = \"Policy: %s is invalid: %s\" % (\n p.get('name', 'unknown'), e)\n errors.append(msg)\n if not errors:\n log.info(\"Configuration valid: {}\".format(config_file))\n continue\n\n log.error(\"Configuration invalid: {}\".format(config_file))\n for e in errors:\n log.error(\"%s\" % e)\n if errors:\n sys.exit(1)\n\n\n@policy_command\ndef run(options, policies):\n exit_code = 0\n\n # AWS - Sanity check that we have an assumable role before executing policies\n # Todo - move this behind provider interface\n if options.assume_role and [p for p in policies if p.provider_name == 'aws']:\n try:\n local_session(clouds['aws']().get_session_factory(options))\n except ClientError:\n log.exception(\"Unable to assume role %s\", options.assume_role)\n sys.exit(1)\n\n for policy in policies:\n try:\n policy()\n except Exception:\n exit_code = 2\n if options.debug:\n raise\n log.exception(\n \"Error while executing policy %s, continuing\" % (\n policy.name))\n if exit_code != 0:\n sys.exit(exit_code)\n\n\n@policy_command\ndef report(options, policies):\n from c7n.reports import report as do_report\n if len(policies) == 0:\n log.error('Error: must supply at least one policy')\n sys.exit(1)\n\n resources = set([p.resource_type for p in policies])\n if len(resources) > 1:\n log.error('Error: Report subcommand can accept multiple policies, '\n 'but they must all be for the same resource.')\n sys.exit(1)\n\n delta = timedelta(days=options.days)\n begin_date = datetime.now() - delta\n do_report(\n policies, begin_date, options, sys.stdout, raw_output_fh=options.raw)\n\n\n@policy_command\ndef logs(options, policies):\n if len(policies) != 1:\n log.error(\"Log subcommand requires exactly one policy\")\n sys.exit(1)\n\n policy = policies.pop()\n # initialize policy execution context for access to outputs\n policy.ctx.initialize()\n\n for e in policy.get_logs(options.start, options.end):\n print(\"%s: %s\" % (\n time.strftime(\n \"%Y-%m-%d %H:%M:%S\", time.localtime(e['timestamp'] / 1000)),\n e['message']))\n\n\ndef _schema_get_docstring(starting_class):\n \"\"\" Given a class, return its docstring.\n\n If no docstring is present for the class, search base classes in MRO for a\n docstring.\n \"\"\"\n for cls in inspect.getmro(starting_class):\n if inspect.getdoc(cls):\n return inspect.getdoc(cls)\n\n\ndef schema_completer(prefix):\n \"\"\" For tab-completion via argcomplete, return completion options.\n\n For the given prefix so far, return the possible options. Note that\n filtering via startswith happens after this list is returned.\n \"\"\"\n from c7n import schema\n load_resources()\n components = prefix.split('.')\n\n if components[0] in provider.clouds.keys():\n cloud_provider = components.pop(0)\n provider_resources = provider.resources(cloud_provider)\n else:\n cloud_provider = 'aws'\n provider_resources = provider.resources('aws')\n components[0] = \"aws.%s\" % components[0]\n\n # Completions for resource\n if len(components) == 1:\n choices = [r for r in provider.resources().keys()\n if r.startswith(components[0])]\n if len(choices) == 1:\n choices += ['{}{}'.format(choices[0], '.')]\n return choices\n\n if components[0] not in provider_resources.keys():\n return []\n\n # Completions for category\n if len(components) == 2:\n choices = ['{}.{}'.format(components[0], x)\n for x in ('actions', 'filters') if x.startswith(components[1])]\n if len(choices) == 1:\n choices += ['{}{}'.format(choices[0], '.')]\n return choices\n\n # Completions for item\n elif len(components) == 3:\n resource_mapping = schema.resource_vocabulary(cloud_provider)\n return ['{}.{}.{}'.format(components[0], components[1], x)\n for x in resource_mapping[components[0]][components[1]]]\n\n return []\n\n\ndef schema_cmd(options):\n \"\"\" Print info about the resources, actions and filters available. \"\"\"\n from c7n import schema\n if options.json:\n schema.json_dump(options.resource)\n return\n\n load_resources()\n resource_mapping = schema.resource_vocabulary()\n if options.summary:\n schema.summary(resource_mapping)\n return\n\n # Here are the formats for what we accept:\n # - No argument\n # - List all available RESOURCES\n # - PROVIDER\n # - List all available RESOURCES for supplied PROVIDER\n # - RESOURCE\n # - List all available actions and filters for supplied RESOURCE\n # - RESOURCE.actions\n # - List all available actions for supplied RESOURCE\n # - RESOURCE.actions.ACTION\n # - Show class doc string and schema for supplied action\n # - RESOURCE.filters\n # - List all available filters for supplied RESOURCE\n # - RESOURCE.filters.FILTER\n # - Show class doc string and schema for supplied filter\n\n if not options.resource:\n resource_list = {'resources': sorted(provider.resources().keys())}\n print(yaml.safe_dump(resource_list, default_flow_style=False))\n return\n\n # Format is [PROVIDER].RESOURCE.CATEGORY.ITEM\n # optional provider defaults to aws for compatibility\n components = options.resource.lower().split('.')\n if len(components) == 1 and components[0] in provider.clouds.keys():\n resource_list = {'resources': sorted(\n provider.resources(cloud_provider=components[0]).keys())}\n print(yaml.safe_dump(resource_list, default_flow_style=False))\n return\n if components[0] in provider.clouds.keys():\n cloud_provider = components.pop(0)\n resource_mapping = schema.resource_vocabulary(\n cloud_provider)\n components[0] = '%s.%s' % (cloud_provider, components[0])\n else:\n resource_mapping = schema.resource_vocabulary('aws')\n components[0] = 'aws.%s' % components[0]\n #\n # Handle resource\n #\n resource = components[0]\n if resource not in resource_mapping:\n log.error('{} is not a valid resource'.format(resource))\n sys.exit(1)\n\n if len(components) == 1:\n del(resource_mapping[resource]['classes'])\n output = {resource: resource_mapping[resource]}\n print(yaml.safe_dump(output))\n return\n\n #\n # Handle category\n #\n category = components[1]\n if category not in ('actions', 'filters'):\n log.error(\"Valid choices are 'actions' and 'filters'. You supplied '{}'\".format(category))\n sys.exit(1)\n\n if len(components) == 2:\n output = \"No {} available for resource {}.\".format(category, resource)\n if category in resource_mapping[resource]:\n output = {resource: {\n category: resource_mapping[resource][category]}}\n print(yaml.safe_dump(output))\n return\n\n #\n # Handle item\n #\n item = components[2]\n if item not in resource_mapping[resource][category]:\n log.error('{} is not in the {} list for resource {}'.format(item, category, resource))\n sys.exit(1)\n\n if len(components) == 3:\n cls = resource_mapping[resource]['classes'][category][item]\n\n # Print docstring\n docstring = _schema_get_docstring(cls)\n print(\"\\nHelp\\n----\\n\")\n if docstring:\n print(docstring)\n else:\n # Shouldn't ever hit this, so exclude from cover\n print(\"No help is available for this item.\") # pragma: no cover\n\n # Print schema\n print(\"\\nSchema\\n------\\n\")\n if hasattr(cls, 'schema'):\n component_schema = dict(cls.schema)\n component_schema.pop('additionalProperties', None)\n component_schema.pop('type', None)\n print(yaml.safe_dump(component_schema))\n else:\n # Shouldn't ever hit this, so exclude from cover\n print(\"No schema is available for this item.\", file=sys.sterr) # pragma: no cover\n print('')\n return\n\n # We received too much (e.g. s3.actions.foo.bar)\n log.error(\"Invalid selector '{}'. Max of 3 components in the \"\n \"format RESOURCE.CATEGORY.ITEM\".format(options.resource))\n sys.exit(1)\n\n\ndef _metrics_get_endpoints(options):\n \"\"\" Determine the start and end dates based on user-supplied options. \"\"\"\n if bool(options.start) ^ bool(options.end):\n log.error('--start and --end must be specified together')\n sys.exit(1)\n\n if options.start and options.end:\n start = options.start\n end = options.end\n else:\n end = datetime.utcnow()\n start = end - timedelta(options.days)\n\n return start, end\n\n\n@policy_command\ndef metrics_cmd(options, policies):\n log.warning(\"metrics command is deprecated, and will be removed in future\")\n policies = [p for p in policies if p.provider_name == 'aws']\n start, end = _metrics_get_endpoints(options)\n data = {}\n for p in policies:\n log.info('Getting %s metrics', p)\n data[p.name] = p.get_metrics(start, end, options.period)\n print(dumps(data, indent=2))\n\n\ndef version_cmd(options):\n from c7n.version import version\n\n if not options.debug:\n print(version)\n return\n\n indent = 13\n pp = pprint.PrettyPrinter(indent=indent)\n\n print(\"\\nPlease copy/paste the following info along with any bug reports:\\n\")\n print(\"Custodian: \", version)\n pyversion = sys.version.replace('\\n', '\\n' + ' ' * indent) # For readability\n print(\"Python: \", pyversion)\n # os.uname is only available on recent versions of Unix\n try:\n print(\"Platform: \", os.uname())\n except Exception: # pragma: no cover\n print(\"Platform: \", sys.platform)\n print(\"Using venv: \", hasattr(sys, 'real_prefix'))\n print(\"PYTHONPATH: \")\n pp.pprint(sys.path)\n", "path": "c7n/commands.py" } ]
diff --git a/c7n/commands.py b/c7n/commands.py index 58cd81041dc..5140d5f2ee8 100644 --- a/c7n/commands.py +++ b/c7n/commands.py @@ -487,6 +487,8 @@ def _metrics_get_endpoints(options): @policy_command def metrics_cmd(options, policies): + log.warning("metrics command is deprecated, and will be removed in future") + policies = [p for p in policies if p.provider_name == 'aws'] start, end = _metrics_get_endpoints(options) data = {} for p in policies:
google__mobly-518
The kill signal param in `stop_standing_subprocess` is never used https://github.com/google/mobly/blob/master/mobly/utils.py#L318
[ { "content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport concurrent.futures\nimport datetime\nimport io\nimport logging\nimport os\nimport platform\nimport portpicker\nimport random\nimport re\nimport signal\nimport string\nimport subprocess\nimport time\nimport traceback\n\n# File name length is limited to 255 chars on some OS, so we need to make sure\n# the file names we output fits within the limit.\nMAX_FILENAME_LEN = 255\n# Number of times to retry to get available port\nMAX_PORT_ALLOCATION_RETRY = 50\n\nascii_letters_and_digits = string.ascii_letters + string.digits\nvalid_filename_chars = \"-_.\" + ascii_letters_and_digits\n\nGMT_to_olson = {\n \"GMT-9\": \"America/Anchorage\",\n \"GMT-8\": \"US/Pacific\",\n \"GMT-7\": \"US/Mountain\",\n \"GMT-6\": \"US/Central\",\n \"GMT-5\": \"US/Eastern\",\n \"GMT-4\": \"America/Barbados\",\n \"GMT-3\": \"America/Buenos_Aires\",\n \"GMT-2\": \"Atlantic/South_Georgia\",\n \"GMT-1\": \"Atlantic/Azores\",\n \"GMT+0\": \"Africa/Casablanca\",\n \"GMT+1\": \"Europe/Amsterdam\",\n \"GMT+2\": \"Europe/Athens\",\n \"GMT+3\": \"Europe/Moscow\",\n \"GMT+4\": \"Asia/Baku\",\n \"GMT+5\": \"Asia/Oral\",\n \"GMT+6\": \"Asia/Almaty\",\n \"GMT+7\": \"Asia/Bangkok\",\n \"GMT+8\": \"Asia/Hong_Kong\",\n \"GMT+9\": \"Asia/Tokyo\",\n \"GMT+10\": \"Pacific/Guam\",\n \"GMT+11\": \"Pacific/Noumea\",\n \"GMT+12\": \"Pacific/Fiji\",\n \"GMT+13\": \"Pacific/Tongatapu\",\n \"GMT-11\": \"Pacific/Midway\",\n \"GMT-10\": \"Pacific/Honolulu\"\n}\n\n\nclass Error(Exception):\n \"\"\"Raised when an error occurs in a util\"\"\"\n\n\ndef abs_path(path):\n \"\"\"Resolve the '.' and '~' in a path to get the absolute path.\n\n Args:\n path: The path to expand.\n\n Returns:\n The absolute path of the input path.\n \"\"\"\n return os.path.abspath(os.path.expanduser(path))\n\n\ndef create_dir(path):\n \"\"\"Creates a directory if it does not exist already.\n\n Args:\n path: The path of the directory to create.\n \"\"\"\n full_path = abs_path(path)\n if not os.path.exists(full_path):\n try:\n os.makedirs(full_path)\n except OSError as e:\n # ignore the error for dir already exist.\n if e.errno != os.errno.EEXIST:\n raise\n\n\ndef create_alias(target_path, alias_path):\n \"\"\"Creates an alias at 'alias_path' pointing to the file 'target_path'.\n\n On Unix, this is implemented via symlink. On Windows, this is done by\n creating a Windows shortcut file.\n\n Args:\n target_path: Destination path that the alias should point to.\n alias_path: Path at which to create the new alias.\n \"\"\"\n if platform.system() == 'Windows' and not alias_path.endswith('.lnk'):\n alias_path += '.lnk'\n if os.path.lexists(alias_path):\n os.remove(alias_path)\n if platform.system() == 'Windows':\n from win32com import client\n shell = client.Dispatch('WScript.Shell')\n shortcut = shell.CreateShortCut(alias_path)\n shortcut.Targetpath = target_path\n shortcut.save()\n else:\n os.symlink(target_path, alias_path)\n\n\ndef get_current_epoch_time():\n \"\"\"Current epoch time in milliseconds.\n\n Returns:\n An integer representing the current epoch time in milliseconds.\n \"\"\"\n return int(round(time.time() * 1000))\n\n\ndef get_current_human_time():\n \"\"\"Returns the current time in human readable format.\n\n Returns:\n The current time stamp in Month-Day-Year Hour:Min:Sec format.\n \"\"\"\n return time.strftime(\"%m-%d-%Y %H:%M:%S \")\n\n\ndef epoch_to_human_time(epoch_time):\n \"\"\"Converts an epoch timestamp to human readable time.\n\n This essentially converts an output of get_current_epoch_time to an output\n of get_current_human_time\n\n Args:\n epoch_time: An integer representing an epoch timestamp in milliseconds.\n\n Returns:\n A time string representing the input time.\n None if input param is invalid.\n \"\"\"\n if isinstance(epoch_time, int):\n try:\n d = datetime.datetime.fromtimestamp(epoch_time / 1000)\n return d.strftime(\"%m-%d-%Y %H:%M:%S \")\n except ValueError:\n return None\n\n\ndef get_timezone_olson_id():\n \"\"\"Return the Olson ID of the local (non-DST) timezone.\n\n Returns:\n A string representing one of the Olson IDs of the local (non-DST)\n timezone.\n \"\"\"\n tzoffset = int(time.timezone / 3600)\n gmt = None\n if tzoffset <= 0:\n gmt = \"GMT+{}\".format(-tzoffset)\n else:\n gmt = \"GMT-{}\".format(tzoffset)\n return GMT_to_olson[gmt]\n\n\ndef find_files(paths, file_predicate):\n \"\"\"Locate files whose names and extensions match the given predicate in\n the specified directories.\n\n Args:\n paths: A list of directory paths where to find the files.\n file_predicate: A function that returns True if the file name and\n extension are desired.\n\n Returns:\n A list of files that match the predicate.\n \"\"\"\n file_list = []\n for path in paths:\n p = abs_path(path)\n for dirPath, _, fileList in os.walk(p):\n for fname in fileList:\n name, ext = os.path.splitext(fname)\n if file_predicate(name, ext):\n file_list.append((dirPath, name, ext))\n return file_list\n\n\ndef load_file_to_base64_str(f_path):\n \"\"\"Loads the content of a file into a base64 string.\n\n Args:\n f_path: full path to the file including the file name.\n\n Returns:\n A base64 string representing the content of the file in utf-8 encoding.\n \"\"\"\n path = abs_path(f_path)\n with io.open(path, 'rb') as f:\n f_bytes = f.read()\n base64_str = base64.b64encode(f_bytes).decode(\"utf-8\")\n return base64_str\n\n\ndef find_field(item_list, cond, comparator, target_field):\n \"\"\"Finds the value of a field in a dict object that satisfies certain\n conditions.\n\n Args:\n item_list: A list of dict objects.\n cond: A param that defines the condition.\n comparator: A function that checks if an dict satisfies the condition.\n target_field: Name of the field whose value to be returned if an item\n satisfies the condition.\n\n Returns:\n Target value or None if no item satisfies the condition.\n \"\"\"\n for item in item_list:\n if comparator(item, cond) and target_field in item:\n return item[target_field]\n return None\n\n\ndef rand_ascii_str(length):\n \"\"\"Generates a random string of specified length, composed of ascii letters\n and digits.\n\n Args:\n length: The number of characters in the string.\n\n Returns:\n The random string generated.\n \"\"\"\n letters = [random.choice(ascii_letters_and_digits) for _ in range(length)]\n return ''.join(letters)\n\n\n# Thead/Process related functions.\ndef concurrent_exec(func, param_list):\n \"\"\"Executes a function with different parameters pseudo-concurrently.\n\n This is basically a map function. Each element (should be an iterable) in\n the param_list is unpacked and passed into the function. Due to Python's\n GIL, there's no true concurrency. This is suited for IO-bound tasks.\n\n Args:\n func: The function that parforms a task.\n param_list: A list of iterables, each being a set of params to be\n passed into the function.\n\n Returns:\n A list of return values from each function execution. If an execution\n caused an exception, the exception object will be the corresponding\n result.\n \"\"\"\n with concurrent.futures.ThreadPoolExecutor(max_workers=30) as executor:\n # Start the load operations and mark each future with its params\n future_to_params = {executor.submit(func, *p): p for p in param_list}\n return_vals = []\n for future in concurrent.futures.as_completed(future_to_params):\n params = future_to_params[future]\n try:\n return_vals.append(future.result())\n except Exception as exc:\n logging.exception(\"{} generated an exception: {}\".format(\n params, traceback.format_exc()))\n return_vals.append(exc)\n return return_vals\n\n\ndef start_standing_subprocess(cmd, shell=False):\n \"\"\"Starts a long-running subprocess.\n\n This is not a blocking call and the subprocess started by it should be\n explicitly terminated with stop_standing_subprocess.\n\n For short-running commands, you should use subprocess.check_call, which\n blocks.\n\n Args:\n cmd: string, the command to start the subprocess with.\n shell: bool, True to run this command through the system shell,\n False to invoke it directly. See subprocess.Proc() docs.\n\n Returns:\n The subprocess that was started.\n \"\"\"\n logging.debug('Starting standing subprocess with: %s', cmd)\n proc = subprocess.Popen(\n cmd,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=shell)\n # Leaving stdin open causes problems for input, e.g. breaking the\n # code.inspect() shell (http://stackoverflow.com/a/25512460/1612937), so\n # explicitly close it assuming it is not needed for standing subprocesses.\n proc.stdin.close()\n proc.stdin = None\n logging.debug('Started standing subprocess %d', proc.pid)\n return proc\n\n\ndef stop_standing_subprocess(proc, kill_signal=signal.SIGTERM):\n \"\"\"Stops a subprocess started by start_standing_subprocess.\n\n Before killing the process, we check if the process is running, if it has\n terminated, Error is raised.\n\n Catches and ignores the PermissionError which only happens on Macs.\n\n Args:\n proc: Subprocess to terminate.\n\n Raises:\n Error: if the subprocess could not be stopped.\n \"\"\"\n # Only import psutil when actually needed.\n # psutil may cause import error in certain env. This way the utils module\n # doesn't crash upon import.\n import psutil\n pid = proc.pid\n logging.debug('Stopping standing subprocess %d', pid)\n process = psutil.Process(pid)\n failed = []\n try:\n children = process.children(recursive=True)\n except AttributeError:\n # Handle versions <3.0.0 of psutil.\n children = process.get_children(recursive=True)\n for child in children:\n try:\n child.kill()\n child.wait(timeout=10)\n except psutil.NoSuchProcess:\n # Ignore if the child process has already terminated.\n pass\n except:\n failed.append(child.pid)\n logging.exception('Failed to kill standing subprocess %d',\n child.pid)\n try:\n process.kill()\n process.wait(timeout=10)\n except psutil.NoSuchProcess:\n # Ignore if the process has already terminated.\n pass\n except:\n failed.append(pid)\n logging.exception('Failed to kill standing subprocess %d', pid)\n if failed:\n raise Error('Failed to kill standing subprocesses: %s' % failed)\n # Call wait and close pipes on the original Python object so we don't get\n # runtime warnings.\n if proc.stdout:\n proc.stdout.close()\n if proc.stderr:\n proc.stderr.close()\n proc.wait()\n logging.debug('Stopped standing subprocess %d', pid)\n\n\ndef wait_for_standing_subprocess(proc, timeout=None):\n \"\"\"Waits for a subprocess started by start_standing_subprocess to finish\n or times out.\n\n Propagates the exception raised by the subprocess.wait(.) function.\n The subprocess.TimeoutExpired exception is raised if the process timed-out\n rather then terminating.\n\n If no exception is raised: the subprocess terminated on its own. No need\n to call stop_standing_subprocess() to kill it.\n\n If an exception is raised: the subprocess is still alive - it did not\n terminate. Either call stop_standing_subprocess() to kill it, or call\n wait_for_standing_subprocess() to keep waiting for it to terminate on its\n own.\n\n Args:\n p: Subprocess to wait for.\n timeout: An integer number of seconds to wait before timing out.\n \"\"\"\n proc.wait(timeout)\n\n\ndef get_available_host_port():\n \"\"\"Gets a host port number available for adb forward.\n\n Returns:\n An integer representing a port number on the host available for adb\n forward.\n\n Raises:\n Error: when no port is found after MAX_PORT_ALLOCATION_RETRY times.\n \"\"\"\n # Only import adb module if needed.\n from mobly.controllers.android_device_lib import adb\n for _ in range(MAX_PORT_ALLOCATION_RETRY):\n port = portpicker.PickUnusedPort()\n # Make sure adb is not using this port so we don't accidentally\n # interrupt ongoing runs by trying to bind to the port.\n if port not in adb.list_occupied_adb_ports():\n return port\n raise Error('Failed to find available port after {} retries'.format(\n MAX_PORT_ALLOCATION_RETRY))\n\n\ndef grep(regex, output):\n \"\"\"Similar to linux's `grep`, this returns the line in an output stream\n that matches a given regex pattern.\n\n It does not rely on the `grep` binary and is not sensitive to line endings,\n so it can be used cross-platform.\n\n Args:\n regex: string, a regex that matches the expected pattern.\n output: byte string, the raw output of the adb cmd.\n\n Returns:\n A list of strings, all of which are output lines that matches the\n regex pattern.\n \"\"\"\n lines = output.decode('utf-8').strip().splitlines()\n results = []\n for line in lines:\n if re.search(regex, line):\n results.append(line.strip())\n return results\n", "path": "mobly/utils.py" } ]
[ { "content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport concurrent.futures\nimport datetime\nimport io\nimport logging\nimport os\nimport platform\nimport portpicker\nimport random\nimport re\nimport signal\nimport string\nimport subprocess\nimport time\nimport traceback\n\n# File name length is limited to 255 chars on some OS, so we need to make sure\n# the file names we output fits within the limit.\nMAX_FILENAME_LEN = 255\n# Number of times to retry to get available port\nMAX_PORT_ALLOCATION_RETRY = 50\n\nascii_letters_and_digits = string.ascii_letters + string.digits\nvalid_filename_chars = \"-_.\" + ascii_letters_and_digits\n\nGMT_to_olson = {\n \"GMT-9\": \"America/Anchorage\",\n \"GMT-8\": \"US/Pacific\",\n \"GMT-7\": \"US/Mountain\",\n \"GMT-6\": \"US/Central\",\n \"GMT-5\": \"US/Eastern\",\n \"GMT-4\": \"America/Barbados\",\n \"GMT-3\": \"America/Buenos_Aires\",\n \"GMT-2\": \"Atlantic/South_Georgia\",\n \"GMT-1\": \"Atlantic/Azores\",\n \"GMT+0\": \"Africa/Casablanca\",\n \"GMT+1\": \"Europe/Amsterdam\",\n \"GMT+2\": \"Europe/Athens\",\n \"GMT+3\": \"Europe/Moscow\",\n \"GMT+4\": \"Asia/Baku\",\n \"GMT+5\": \"Asia/Oral\",\n \"GMT+6\": \"Asia/Almaty\",\n \"GMT+7\": \"Asia/Bangkok\",\n \"GMT+8\": \"Asia/Hong_Kong\",\n \"GMT+9\": \"Asia/Tokyo\",\n \"GMT+10\": \"Pacific/Guam\",\n \"GMT+11\": \"Pacific/Noumea\",\n \"GMT+12\": \"Pacific/Fiji\",\n \"GMT+13\": \"Pacific/Tongatapu\",\n \"GMT-11\": \"Pacific/Midway\",\n \"GMT-10\": \"Pacific/Honolulu\"\n}\n\n\nclass Error(Exception):\n \"\"\"Raised when an error occurs in a util\"\"\"\n\n\ndef abs_path(path):\n \"\"\"Resolve the '.' and '~' in a path to get the absolute path.\n\n Args:\n path: The path to expand.\n\n Returns:\n The absolute path of the input path.\n \"\"\"\n return os.path.abspath(os.path.expanduser(path))\n\n\ndef create_dir(path):\n \"\"\"Creates a directory if it does not exist already.\n\n Args:\n path: The path of the directory to create.\n \"\"\"\n full_path = abs_path(path)\n if not os.path.exists(full_path):\n try:\n os.makedirs(full_path)\n except OSError as e:\n # ignore the error for dir already exist.\n if e.errno != os.errno.EEXIST:\n raise\n\n\ndef create_alias(target_path, alias_path):\n \"\"\"Creates an alias at 'alias_path' pointing to the file 'target_path'.\n\n On Unix, this is implemented via symlink. On Windows, this is done by\n creating a Windows shortcut file.\n\n Args:\n target_path: Destination path that the alias should point to.\n alias_path: Path at which to create the new alias.\n \"\"\"\n if platform.system() == 'Windows' and not alias_path.endswith('.lnk'):\n alias_path += '.lnk'\n if os.path.lexists(alias_path):\n os.remove(alias_path)\n if platform.system() == 'Windows':\n from win32com import client\n shell = client.Dispatch('WScript.Shell')\n shortcut = shell.CreateShortCut(alias_path)\n shortcut.Targetpath = target_path\n shortcut.save()\n else:\n os.symlink(target_path, alias_path)\n\n\ndef get_current_epoch_time():\n \"\"\"Current epoch time in milliseconds.\n\n Returns:\n An integer representing the current epoch time in milliseconds.\n \"\"\"\n return int(round(time.time() * 1000))\n\n\ndef get_current_human_time():\n \"\"\"Returns the current time in human readable format.\n\n Returns:\n The current time stamp in Month-Day-Year Hour:Min:Sec format.\n \"\"\"\n return time.strftime(\"%m-%d-%Y %H:%M:%S \")\n\n\ndef epoch_to_human_time(epoch_time):\n \"\"\"Converts an epoch timestamp to human readable time.\n\n This essentially converts an output of get_current_epoch_time to an output\n of get_current_human_time\n\n Args:\n epoch_time: An integer representing an epoch timestamp in milliseconds.\n\n Returns:\n A time string representing the input time.\n None if input param is invalid.\n \"\"\"\n if isinstance(epoch_time, int):\n try:\n d = datetime.datetime.fromtimestamp(epoch_time / 1000)\n return d.strftime(\"%m-%d-%Y %H:%M:%S \")\n except ValueError:\n return None\n\n\ndef get_timezone_olson_id():\n \"\"\"Return the Olson ID of the local (non-DST) timezone.\n\n Returns:\n A string representing one of the Olson IDs of the local (non-DST)\n timezone.\n \"\"\"\n tzoffset = int(time.timezone / 3600)\n gmt = None\n if tzoffset <= 0:\n gmt = \"GMT+{}\".format(-tzoffset)\n else:\n gmt = \"GMT-{}\".format(tzoffset)\n return GMT_to_olson[gmt]\n\n\ndef find_files(paths, file_predicate):\n \"\"\"Locate files whose names and extensions match the given predicate in\n the specified directories.\n\n Args:\n paths: A list of directory paths where to find the files.\n file_predicate: A function that returns True if the file name and\n extension are desired.\n\n Returns:\n A list of files that match the predicate.\n \"\"\"\n file_list = []\n for path in paths:\n p = abs_path(path)\n for dirPath, _, fileList in os.walk(p):\n for fname in fileList:\n name, ext = os.path.splitext(fname)\n if file_predicate(name, ext):\n file_list.append((dirPath, name, ext))\n return file_list\n\n\ndef load_file_to_base64_str(f_path):\n \"\"\"Loads the content of a file into a base64 string.\n\n Args:\n f_path: full path to the file including the file name.\n\n Returns:\n A base64 string representing the content of the file in utf-8 encoding.\n \"\"\"\n path = abs_path(f_path)\n with io.open(path, 'rb') as f:\n f_bytes = f.read()\n base64_str = base64.b64encode(f_bytes).decode(\"utf-8\")\n return base64_str\n\n\ndef find_field(item_list, cond, comparator, target_field):\n \"\"\"Finds the value of a field in a dict object that satisfies certain\n conditions.\n\n Args:\n item_list: A list of dict objects.\n cond: A param that defines the condition.\n comparator: A function that checks if an dict satisfies the condition.\n target_field: Name of the field whose value to be returned if an item\n satisfies the condition.\n\n Returns:\n Target value or None if no item satisfies the condition.\n \"\"\"\n for item in item_list:\n if comparator(item, cond) and target_field in item:\n return item[target_field]\n return None\n\n\ndef rand_ascii_str(length):\n \"\"\"Generates a random string of specified length, composed of ascii letters\n and digits.\n\n Args:\n length: The number of characters in the string.\n\n Returns:\n The random string generated.\n \"\"\"\n letters = [random.choice(ascii_letters_and_digits) for _ in range(length)]\n return ''.join(letters)\n\n\n# Thead/Process related functions.\ndef concurrent_exec(func, param_list):\n \"\"\"Executes a function with different parameters pseudo-concurrently.\n\n This is basically a map function. Each element (should be an iterable) in\n the param_list is unpacked and passed into the function. Due to Python's\n GIL, there's no true concurrency. This is suited for IO-bound tasks.\n\n Args:\n func: The function that parforms a task.\n param_list: A list of iterables, each being a set of params to be\n passed into the function.\n\n Returns:\n A list of return values from each function execution. If an execution\n caused an exception, the exception object will be the corresponding\n result.\n \"\"\"\n with concurrent.futures.ThreadPoolExecutor(max_workers=30) as executor:\n # Start the load operations and mark each future with its params\n future_to_params = {executor.submit(func, *p): p for p in param_list}\n return_vals = []\n for future in concurrent.futures.as_completed(future_to_params):\n params = future_to_params[future]\n try:\n return_vals.append(future.result())\n except Exception as exc:\n logging.exception(\"{} generated an exception: {}\".format(\n params, traceback.format_exc()))\n return_vals.append(exc)\n return return_vals\n\n\ndef start_standing_subprocess(cmd, shell=False):\n \"\"\"Starts a long-running subprocess.\n\n This is not a blocking call and the subprocess started by it should be\n explicitly terminated with stop_standing_subprocess.\n\n For short-running commands, you should use subprocess.check_call, which\n blocks.\n\n Args:\n cmd: string, the command to start the subprocess with.\n shell: bool, True to run this command through the system shell,\n False to invoke it directly. See subprocess.Proc() docs.\n\n Returns:\n The subprocess that was started.\n \"\"\"\n logging.debug('Starting standing subprocess with: %s', cmd)\n proc = subprocess.Popen(\n cmd,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=shell)\n # Leaving stdin open causes problems for input, e.g. breaking the\n # code.inspect() shell (http://stackoverflow.com/a/25512460/1612937), so\n # explicitly close it assuming it is not needed for standing subprocesses.\n proc.stdin.close()\n proc.stdin = None\n logging.debug('Started standing subprocess %d', proc.pid)\n return proc\n\n\ndef stop_standing_subprocess(proc):\n \"\"\"Stops a subprocess started by start_standing_subprocess.\n\n Before killing the process, we check if the process is running, if it has\n terminated, Error is raised.\n\n Catches and ignores the PermissionError which only happens on Macs.\n\n Args:\n proc: Subprocess to terminate.\n\n Raises:\n Error: if the subprocess could not be stopped.\n \"\"\"\n # Only import psutil when actually needed.\n # psutil may cause import error in certain env. This way the utils module\n # doesn't crash upon import.\n import psutil\n pid = proc.pid\n logging.debug('Stopping standing subprocess %d', pid)\n process = psutil.Process(pid)\n failed = []\n try:\n children = process.children(recursive=True)\n except AttributeError:\n # Handle versions <3.0.0 of psutil.\n children = process.get_children(recursive=True)\n for child in children:\n try:\n child.kill()\n child.wait(timeout=10)\n except psutil.NoSuchProcess:\n # Ignore if the child process has already terminated.\n pass\n except:\n failed.append(child.pid)\n logging.exception('Failed to kill standing subprocess %d',\n child.pid)\n try:\n process.kill()\n process.wait(timeout=10)\n except psutil.NoSuchProcess:\n # Ignore if the process has already terminated.\n pass\n except:\n failed.append(pid)\n logging.exception('Failed to kill standing subprocess %d', pid)\n if failed:\n raise Error('Failed to kill standing subprocesses: %s' % failed)\n # Call wait and close pipes on the original Python object so we don't get\n # runtime warnings.\n if proc.stdout:\n proc.stdout.close()\n if proc.stderr:\n proc.stderr.close()\n proc.wait()\n logging.debug('Stopped standing subprocess %d', pid)\n\n\ndef wait_for_standing_subprocess(proc, timeout=None):\n \"\"\"Waits for a subprocess started by start_standing_subprocess to finish\n or times out.\n\n Propagates the exception raised by the subprocess.wait(.) function.\n The subprocess.TimeoutExpired exception is raised if the process timed-out\n rather then terminating.\n\n If no exception is raised: the subprocess terminated on its own. No need\n to call stop_standing_subprocess() to kill it.\n\n If an exception is raised: the subprocess is still alive - it did not\n terminate. Either call stop_standing_subprocess() to kill it, or call\n wait_for_standing_subprocess() to keep waiting for it to terminate on its\n own.\n\n Args:\n p: Subprocess to wait for.\n timeout: An integer number of seconds to wait before timing out.\n \"\"\"\n proc.wait(timeout)\n\n\ndef get_available_host_port():\n \"\"\"Gets a host port number available for adb forward.\n\n Returns:\n An integer representing a port number on the host available for adb\n forward.\n\n Raises:\n Error: when no port is found after MAX_PORT_ALLOCATION_RETRY times.\n \"\"\"\n # Only import adb module if needed.\n from mobly.controllers.android_device_lib import adb\n for _ in range(MAX_PORT_ALLOCATION_RETRY):\n port = portpicker.PickUnusedPort()\n # Make sure adb is not using this port so we don't accidentally\n # interrupt ongoing runs by trying to bind to the port.\n if port not in adb.list_occupied_adb_ports():\n return port\n raise Error('Failed to find available port after {} retries'.format(\n MAX_PORT_ALLOCATION_RETRY))\n\n\ndef grep(regex, output):\n \"\"\"Similar to linux's `grep`, this returns the line in an output stream\n that matches a given regex pattern.\n\n It does not rely on the `grep` binary and is not sensitive to line endings,\n so it can be used cross-platform.\n\n Args:\n regex: string, a regex that matches the expected pattern.\n output: byte string, the raw output of the adb cmd.\n\n Returns:\n A list of strings, all of which are output lines that matches the\n regex pattern.\n \"\"\"\n lines = output.decode('utf-8').strip().splitlines()\n results = []\n for line in lines:\n if re.search(regex, line):\n results.append(line.strip())\n return results\n", "path": "mobly/utils.py" } ]
diff --git a/mobly/utils.py b/mobly/utils.py index b4f85362..a9f065cd 100644 --- a/mobly/utils.py +++ b/mobly/utils.py @@ -316,7 +316,7 @@ def start_standing_subprocess(cmd, shell=False): return proc -def stop_standing_subprocess(proc, kill_signal=signal.SIGTERM): +def stop_standing_subprocess(proc): """Stops a subprocess started by start_standing_subprocess. Before killing the process, we check if the process is running, if it has
DDMAL__CantusDB-1182
Source create/edit: Provenance autocompletes should be icontains ...rather than istartswith. "Berne" should find "Abdij ban Berne", etc.
[ { "content": "import csv\nfrom typing import Optional, Union\nfrom django.http.response import JsonResponse\nfrom django.http import HttpResponse, HttpResponseNotFound\nfrom django.shortcuts import render, redirect\nfrom django.urls.base import reverse\nfrom articles.models import Article\nfrom main_app.models import (\n Century,\n Chant,\n Differentia,\n Feast,\n Genre,\n Notation,\n Office,\n Provenance,\n RismSiglum,\n Segment,\n Sequence,\n Source,\n)\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom main_app.models.base_model import BaseModel\nfrom next_chants import next_chants\nfrom django.contrib import messages\nfrom django.http import Http404\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django.core.exceptions import PermissionDenied\nfrom django.urls import reverse\nfrom django.contrib.auth import get_user_model\nfrom typing import List\nfrom django.core.paginator import Paginator\nfrom django.templatetags.static import static\nfrom django.contrib.flatpages.models import FlatPage\nfrom dal import autocomplete\nfrom django.db.models import Q\n\n\n@login_required\ndef items_count(request):\n \"\"\"\n Function-based view for the ``items count`` page, accessed with ``content-statistics``\n\n Update 2022-01-05:\n This page has been changed on the original Cantus. It is now in the private domain\n\n Args:\n request (request): The request\n\n Returns:\n HttpResponse: Render the page\n \"\"\"\n # in items count, the number on old cantus shows the total count of a type of object (chant, seq)\n # no matter published or not\n # but for the count of sources, it only shows the count of published sources\n chant_count = Chant.objects.count()\n sequence_count = Sequence.objects.count()\n source_count = Source.objects.filter(published=True).count()\n\n context = {\n \"chant_count\": chant_count,\n \"sequence_count\": sequence_count,\n \"source_count\": source_count,\n }\n return render(request, \"items_count.html\", context)\n\n\ndef ajax_concordance_list(request, cantus_id):\n \"\"\"\n Function-based view responding to the AJAX call for concordance list on the chant detail page,\n accessed with ``chants/<int:pk>``, click on \"Display concordances of this chant\"\n\n Args:\n cantus_id (str): The Cantus ID of the requested concordances group\n\n Returns:\n JsonResponse: A response to the AJAX call, to be unpacked by the frontend js code\n \"\"\"\n chants = Chant.objects.filter(cantus_id=cantus_id)\n seqs = Sequence.objects.filter(cantus_id=cantus_id)\n\n display_unpublished = request.user.is_authenticated\n if not display_unpublished:\n chants = chants.filter(source__published=True)\n seqs = seqs.filter(source__published=True)\n\n if seqs:\n chants = chants.union(seqs).order_by(\"siglum\", \"folio\")\n else:\n chants = chants.order_by(\"siglum\", \"folio\")\n # queryset(list of dictionaries)\n concordance_values = chants.values(\n \"siglum\",\n \"folio\",\n \"incipit\",\n \"office__name\",\n \"genre__name\",\n \"position\",\n \"feast__name\",\n \"mode\",\n \"image_link\",\n )\n\n concordances = list(concordance_values)\n for i, concordance in enumerate(concordances):\n # some chants do not have a source\n # for those chants, do not return source link\n if chants[i].source:\n concordance[\"source_link\"] = chants[i].source.get_absolute_url()\n if chants[i].search_vector:\n concordance[\"chant_link\"] = chants[i].get_absolute_url()\n else:\n concordance[\"chant_link\"] = reverse(\"sequence-detail\", args=[chants[i].id])\n concordance[\"db\"] = \"CD\"\n\n concordance_count = len(concordances)\n return JsonResponse(\n {\"concordances\": concordances, \"concordance_count\": concordance_count},\n safe=True,\n )\n\n\ndef ajax_melody_list(request, cantus_id):\n \"\"\"\n Function-based view responding to the AJAX call for melody list on the chant detail page,\n accessed with ``chants/<int:pk>``, click on \"Display melodies connected with this chant\"\n\n Args:\n cantus_id (str): The Cantus ID of the requested concordances group\n\n Returns:\n JsonResponse: A response to the AJAX call, to be unpacked by the frontend js code\n \"\"\"\n chants = (\n Chant.objects.filter(cantus_id=cantus_id).exclude(volpiano=None).order_by(\"id\")\n )\n\n display_unpublished = request.user.is_authenticated\n if not display_unpublished:\n chants = chants.filter(source__published=True)\n\n # queryset(list of dictionaries)\n concordance_values = chants.values(\n \"siglum\",\n \"folio\",\n \"office__name\",\n \"genre__name\",\n \"position\",\n \"feast__name\",\n \"cantus_id\",\n \"volpiano\",\n \"mode\",\n # OldCantus seems to use whichever is present: ms spelling, std spelling, incipit (in that order)\n \"manuscript_full_text_std_spelling\",\n )\n\n concordances = list(concordance_values)\n for i, concordance in enumerate(concordances):\n # some chants do not have a source\n # for those chants, do not return source link\n if chants[i].source:\n concordance[\"source_link\"] = chants[i].source.get_absolute_url()\n concordance[\"ci_link\"] = chants[i].get_ci_url()\n concordance[\"chant_link\"] = chants[i].get_absolute_url()\n concordance[\"db\"] = \"CD\"\n\n concordance_count = len(concordances)\n return JsonResponse(\n {\"concordances\": concordances, \"concordance_count\": concordance_count},\n safe=True,\n )\n\n\ndef csv_export(request, source_id):\n \"\"\"\n Function-based view for the CSV export page, accessed with ``csv/<str:source_id>``\n\n Args:\n source_id (str): The ID of the source to export\n\n Returns:\n HttpResponse: The CSV response\n \"\"\"\n try:\n source = Source.objects.get(id=source_id)\n except:\n raise Http404(\"This source does not exist\")\n\n display_unpublished = request.user.is_authenticated\n\n if (not source.published) and (not display_unpublished):\n raise PermissionDenied\n\n # \"4064\" is the segment id of the sequence DB, sources in that segment have sequences instead of chants\n if source.segment and source.segment.id == 4064:\n entries = source.sequence_set.order_by(\"id\")\n else:\n entries = source.chant_set.order_by(\"id\").select_related(\n \"feast\", \"office\", \"genre\"\n )\n\n response = HttpResponse(content_type=\"text/csv\")\n # response[\"Content-Disposition\"] = 'attachment; filename=\"somefilename.csv\"'\n\n writer = csv.writer(response)\n writer.writerow(\n [\n \"siglum\",\n \"marginalia\",\n \"folio\",\n \"sequence\",\n \"incipit\",\n \"feast\",\n \"office\",\n \"genre\",\n \"position\",\n \"cantus_id\",\n \"mode\",\n \"finalis\",\n \"differentia\",\n \"differentiae_database\",\n \"fulltext_standardized\",\n \"fulltext_ms\",\n \"volpiano\",\n \"image_link\",\n \"melody_id\",\n \"addendum\",\n \"extra\",\n \"node_id\",\n ]\n )\n for entry in entries:\n feast = entry.feast.name if entry.feast else \"\"\n office = entry.office.name if entry.office else \"\"\n genre = entry.genre.name if entry.genre else \"\"\n diff_db = entry.diff_db.id if entry.diff_db else \"\"\n\n writer.writerow(\n [\n entry.siglum,\n entry.marginalia,\n entry.folio,\n # if entry has a c_sequence, it's a Chant. If it doesn't, it's a Sequence, so write its s_sequence\n entry.c_sequence if entry.c_sequence is not None else entry.s_sequence,\n entry.incipit,\n feast,\n office,\n genre,\n entry.position,\n entry.cantus_id,\n entry.mode,\n entry.finalis,\n entry.differentia,\n entry.diff_db,\n entry.manuscript_full_text_std_spelling,\n entry.manuscript_full_text,\n entry.volpiano,\n entry.image_link,\n entry.melody_id,\n entry.addendum,\n entry.extra,\n entry.id,\n ]\n )\n\n return response\n\n\ndef csv_export_redirect_from_old_path(request, source_id):\n return redirect(reverse(\"csv-export\", args=[source_id]))\n\n\ndef contact(request):\n \"\"\"\n Function-based view that renders the contact page ``contact``\n\n Args:\n request (request): The request\n\n Returns:\n HttpResponse: Render the contact page\n \"\"\"\n return render(request, \"contact.html\")\n\n\ndef ajax_melody_search(request):\n \"\"\"\n Function-based view responding to melody search AJAX calls, accessed with ``melody``\n\n The queryset is filtered according to the ``GET`` parameters\n\n ``GET`` parameters:\n ``notes``: Note sequence drawn on the canvas by the user\n ``anywhere``: Bool value indicating either \"search anywhere\" or \"search beginning\"\n ``transpose``: Bool value indicating either \"search exact matches\" or \"search transpositions\"\n ``siglum``: Filters by the siglum\n ``text``: Filters by the chant text\n ``genre_name``: Filters by genre of chant\n ``feast_name``: Filters by feast of chant\n ``mode``: Filters by mode of Chant\n ``source``: Search in a specific source\n\n Args:\n request (request): The request\n\n Returns:\n JsonResponse: A response to the AJAX call, to be unpacked by frontend js code\n \"\"\"\n # all search parameters are passed in as GET params, without using the url conf\n notes = request.GET.get(\"notes\")\n anywhere = request.GET.get(\"anywhere\")\n transpose = request.GET.get(\"transpose\")\n siglum = request.GET.get(\"siglum\")\n text = request.GET.get(\"text\")\n genre_name = request.GET.get(\"genre\")\n feast_name = request.GET.get(\"feast\")\n mode = request.GET.get(\"mode\")\n source = request.GET.get(\"source\")\n\n display_unpublished = request.user.is_authenticated\n if not display_unpublished:\n chants = Chant.objects.filter(source__published=True)\n else:\n chants = Chant.objects\n\n # if \"search exact matches + transpositions\"\n if transpose == \"true\":\n # calculate intervals\n # replace '9' (the note G) with the char corresponding to (ASCII(a) - 1), because 'a' denotes the note A\n notes_copy = list(notes.replace(\"9\", chr(ord(\"a\") - 1)))\n # we model the interval between notes using the difference between the ASCII codes of corresponding letters\n # the letter for the note B is \"j\" (106), note A is \"h\" (104), the letter \"i\" (105) is skipped\n # move all notes above A down by one letter\n for j, note in enumerate(notes_copy):\n if ord(note) >= 106:\n notes_copy[j] = chr(ord(note) - 1)\n # `intervals` records the difference between two adjacent notes\n intervals = \"\".join(\n [\n str(ord(notes_copy[j]) - ord(notes_copy[j - 1]))\n for j in range(1, len(notes_copy))\n ]\n )\n # if \"search anywhere in the melody\"\n if anywhere == \"true\":\n chants = chants.filter(volpiano_intervals__contains=intervals)\n # if \"search the beginning of melody\"\n else:\n chants = chants.filter(volpiano_intervals__startswith=intervals)\n # if \"search exact matches\"\n else:\n # if \"search anywhere in the melody\"\n if anywhere == \"true\":\n chants = chants.filter(volpiano_notes__contains=notes)\n # if \"search the beginning of melody\"\n else:\n chants = chants.filter(volpiano_notes__startswith=notes)\n\n # filter the queryset with search params\n\n # source id and siglum are duplicate information, they both uniquely identify a source\n # if searching in a specific source, use `source`\n # if searching across all sources, use `siglum`\n if source:\n chants = chants.filter(source__id=source)\n elif siglum:\n chants = chants.filter(siglum__icontains=siglum)\n\n if text:\n chants = chants.filter(manuscript_full_text_std_spelling__icontains=text)\n if genre_name:\n chants = chants.filter(genre__name__icontains=genre_name)\n if feast_name:\n chants = chants.filter(feast__name__icontains=feast_name)\n if mode:\n chants = chants.filter(mode__icontains=mode)\n\n result_values = chants.order_by(\"id\").values(\n \"id\",\n \"siglum\",\n \"folio\",\n \"incipit\",\n \"genre__name\",\n \"feast__name\",\n \"mode\",\n \"volpiano\",\n )\n # convert queryset to a list of dicts because QuerySet is not JSON serializable\n # the above constructed queryset will be evaluated here\n results = list(result_values)\n for result in results:\n # construct the url for chant detail page and add it to the result\n result[\"chant_link\"] = reverse(\"chant-detail\", args=[result[\"id\"]])\n\n result_count = result_values.count()\n return JsonResponse({\"results\": results, \"result_count\": result_count}, safe=True)\n\n\ndef ajax_search_bar(request, search_term):\n \"\"\"\n Function-based view responding to global search bar AJAX calls,\n accessed with the search bar on the top-right corner of almost every page.\n\n Args:\n search_term (str): The search term input\n\n Returns:\n JsonResponse: A response to the AJAX call, to be unpacked by frontend js code\n \"\"\"\n # load only the first seven chants\n CHANT_CNT = 7\n\n if not search_term.replace(\" \", \"\").isalpha():\n # if the search term contains at least one digit, assume user is searching by Cantus ID\n chants = Chant.objects.filter(cantus_id__istartswith=search_term).order_by(\"id\")\n else:\n # if the search term does not contain any digits, assume user is searching by incipit\n chants = Chant.objects.filter(incipit__istartswith=search_term).order_by(\"id\")\n\n display_unpublished = request.user.is_authenticated\n if not display_unpublished:\n chants = chants.filter(source__published=True)\n\n chants = chants[:CHANT_CNT]\n\n returned_values = chants.values(\n \"incipit\",\n \"genre__name\",\n \"feast__name\",\n \"cantus_id\",\n \"mode\",\n \"siglum\",\n \"office__name\",\n \"folio\",\n \"c_sequence\",\n )\n returned_values = list(returned_values)\n for i in range(chants.count()):\n chant_link = chants[i].get_absolute_url()\n returned_values[i][\"chant_link\"] = chant_link\n return JsonResponse({\"chants\": returned_values}, safe=True)\n\n\ndef json_melody_export(request, cantus_id: str) -> JsonResponse:\n chants = Chant.objects.filter(\n cantus_id=cantus_id, volpiano__isnull=False, source__published=True\n )\n\n db_keys = [\n \"melody_id\",\n \"id\",\n \"cantus_id\",\n \"siglum\",\n \"source__id\", # don't fetch the entire Source object, just the id of\n # the source. __id is removed in standardize_for_api below\n \"folio\",\n \"incipit\",\n \"manuscript_full_text\",\n \"volpiano\",\n \"mode\",\n \"feast__id\",\n \"office__id\",\n \"genre__id\",\n \"position\",\n ]\n\n chants_values = list(chants.values(*db_keys)) # a list of dictionaries. Each\n # dictionary represents metadata on one chant\n\n standardized_chants_values = [\n standardize_dict_for_json_melody_export(cv, request) for cv in chants_values\n ]\n\n return JsonResponse(standardized_chants_values, safe=False)\n\n\ndef standardize_dict_for_json_melody_export(\n chant_values: List[dict], request\n) -> List[dict]:\n \"\"\"Take a list of dictionaries, and in each dictionary, change several\n of the keys to match their values in OldCantus\n\n Args:\n chant_values (List[dict]): a list of dictionaries, each containing\n information on a single chant in the database\n request: passed when this is called in json_melody_export. Used to get the domain\n while building the chant links\n\n Returns:\n List[dict]: a list of dictionaries, with updated keys\n \"\"\"\n keymap = { # map attribute names from Chant model (i.e. db_keys\n # in json_melody_export) to corresponding attribute names\n # in old API, and remove artifacts of query process (i.e. __id suffixes)\n \"melody_id\": \"mid\", # <-\n \"id\": \"nid\", # <-\n \"cantus_id\": \"cid\", # <-\n \"siglum\": \"siglum\",\n \"source__id\": \"srcnid\", # <-\n \"folio\": \"folio\",\n \"incipit\": \"incipit\",\n \"manuscript_full_text\": \"fulltext\", # <-\n \"volpiano\": \"volpiano\",\n \"mode\": \"mode\",\n \"feast__id\": \"feast\", # <-\n \"office__id\": \"office\", # <-\n \"genre__id\": \"genre\", # <-\n \"position\": \"position\",\n }\n\n standardized_chant_values = {keymap[key]: chant_values[key] for key in chant_values}\n\n # manually build a couple of last fields that aren't represented in Chant object\n chant_uri = request.build_absolute_uri(\n reverse(\"chant-detail\", args=[chant_values[\"id\"]])\n )\n standardized_chant_values[\"chantlink\"] = chant_uri\n src_uri = request.build_absolute_uri(\n reverse(\"source-detail\", args=[chant_values[\"source__id\"]])\n )\n standardized_chant_values[\"srclink\"] = src_uri\n\n return standardized_chant_values\n\n\ndef json_sources_export(request) -> JsonResponse:\n \"\"\"\n Generate a json object of published sources with their IDs and CSV links\n \"\"\"\n cantus_segment = Segment.objects.get(id=4063)\n sources = cantus_segment.source_set.filter(published=True)\n ids = [source.id for source in sources]\n\n csv_links = {id: build_json_sources_export_dictionary(id, request) for id in ids}\n\n return JsonResponse(csv_links)\n\n\ndef build_json_sources_export_dictionary(id: int, request) -> dict:\n \"\"\"Return a dictionary containing a link to the csv-export page for a source\n\n Args:\n id (int): the pk of the source\n request: passed when this is called in json_sources_export. Used to get the domain\n while building the CSV link\n\n Returns:\n dict: a dictionary with a single key, \"csv\", and a link to the source's csv-export\n page\n \"\"\"\n return {\"csv\": request.build_absolute_uri(reverse(\"csv-export\", args=[id]))}\n\n\ndef json_nextchants(request, cantus_id):\n ids_and_counts = next_chants(cantus_id, display_unpublished=False)\n suggested_chants_dict = {id: count for (id, count) in ids_and_counts}\n return JsonResponse(suggested_chants_dict)\n\n\ndef json_cid_export(request, cantus_id: str) -> JsonResponse:\n \"\"\"Return a JsonResponse containing information on all chants with a given\n Cantus ID, in the following format:\n {\n \"chants\": [\n {\n \"chant\": {\n a bunch of keys and values, created in build_json_cid_dictionary\n },\n },\n {\n \"chant\": {\n etc.\n },\n },\n ]\n }\n We believe Cantus Index uses this API in building its list of concordances\n for a given Cantus ID across the databases in the Cantus Network\n\n Args:\n request: the incoming request\n cantus_id (string): A Cantus ID\n \"\"\"\n\n # the API in OldCantus appears to only return chants, and no sequences.\n chants = Chant.objects.filter(cantus_id=cantus_id).filter(source__published=True)\n chant_dicts = [{\"chant\": build_json_cid_dictionary(c, request)} for c in chants]\n response = {\"chants\": chant_dicts}\n return JsonResponse(response)\n\n\ndef build_json_cid_dictionary(chant, request) -> dict:\n \"\"\"Return a dictionary with information on a given chant in the database\n\n Args:\n chant: a Chant\n request: passed when this is called in json_cid_export. Used to get the domain\n while building the chant link\n\n Returns:\n dict: a dictionary with information about the chant and its source, including\n absolute URLs for the chant and source detail pages\n \"\"\"\n source_relative_url = reverse(\"source-detail\", args=[chant.source.id])\n source_absolute_url = request.build_absolute_uri(source_relative_url)\n chant_relative_url = reverse(\"chant-detail\", args=[chant.id])\n chant_absolute_url = request.build_absolute_uri(chant_relative_url)\n dictionary = {\n \"siglum\": chant.source.siglum,\n \"srclink\": source_absolute_url,\n \"chantlink\": chant_absolute_url,\n # \"chantlinkOLD\": # OldCantus included a URL using http:// here,\n # # whereas \"chantlink\" had a URL with https://\n \"folio\": chant.folio if chant.folio else \"\",\n \"incipit\": chant.incipit if chant.incipit else \"\",\n \"feast\": chant.feast.name if chant.feast else \"\",\n \"genre\": chant.genre.name if chant.genre else \"\",\n \"office\": chant.office.name if chant.office else \"\",\n \"position\": chant.position if chant.position else \"\",\n \"mode\": chant.mode if chant.mode else \"\",\n \"image\": chant.image_link if chant.image_link else \"\",\n \"melody\": chant.volpiano if chant.volpiano else \"\",\n \"fulltext\": (\n chant.manuscript_full_text_std_spelling\n if chant.manuscript_full_text_std_spelling\n else \"\"\n ),\n \"db\": \"CD\",\n }\n return dictionary\n\n\ndef record_exists(rec_type: BaseModel, pk: int) -> bool:\n \"\"\"Determines whether record of specific type (chant, source, sequence, article) exists for a given pk\n\n Args:\n rec_type (BaseModel): Which model to check to see if an object of that type exists\n pk (int): The ID of the object being checked for.\n\n Returns:\n bool: True if an object of the specified model with the specified ID exists, False otherwise.\n \"\"\"\n try:\n rec_type.objects.get(id=pk)\n return True\n except rec_type.DoesNotExist:\n return False\n\n\ndef get_user_id_from_old_indexer_id(pk: int) -> Optional[int]:\n \"\"\"\n Finds the matching User ID in NewCantus for an Indexer ID in OldCantus.\n This is stored in the User table's old_indexer_id column.\n This is necessary because indexers were originally stored in the general Node\n table in OldCantus, but are now represented as users in NewCantus.\n\n Args:\n pk (int): the ID of an indexer in OldCantus\n\n Returns:\n Optional int: the ID of the corresponding User in NewCantus\n \"\"\"\n User = get_user_model()\n try:\n result = User.objects.get(old_indexer_id=pk)\n return result.id\n except User.DoesNotExist:\n return None\n\n\ndef check_for_unpublished(item: Union[Chant, Sequence, Source]) -> None:\n \"\"\"Raises an Http404 exception if item is unpublished\n\n Args:\n item (Chant, Sequence, or Source): An item to check whether it is published or not\n\n Raises:\n Http404 if the item is a source and it's unpublished,\n or if it's a chant/sequence and its source is unpublished\n\n Returns:\n None\n \"\"\"\n if isinstance(item, Source):\n if not item.published:\n raise Http404()\n if isinstance(item, Chant) or isinstance(item, Sequence):\n if not item.source.published:\n raise Http404()\n\n\nNODE_TYPES_AND_VIEWS = [\n (Chant, \"chant-detail\"),\n (Source, \"source-detail\"),\n (Sequence, \"sequence-detail\"),\n (Article, \"article-detail\"),\n]\n\n\n# all IDs above this value are created in NewCantus and thus could have conflicts between types.\n# when data is migrated from OldCantus to NewCantus, (unpublished) dummy objects are created\n# in the database to ensure that all newly created objects have IDs above this number.\nNODE_ID_CUTOFF = 1_000_000\n\n\ndef json_node_export(request, id: int) -> JsonResponse:\n \"\"\"\n returns all fields of the chant/sequence/source/indexer with the specified `id`\n \"\"\"\n\n # all IDs above this value are created in NewCantus and thus could have conflicts between types.\n # when data is migrated from OldCantus to NewCantus, (unpublished) dummy objects are created\n # in the database to ensure that all newly created objects have IDs above this number.\n if id >= NODE_ID_CUTOFF:\n raise Http404()\n\n user_id = get_user_id_from_old_indexer_id(id)\n if get_user_id_from_old_indexer_id(id) is not None:\n User = get_user_model()\n user = User.objects.filter(id=user_id)\n # in order to easily unpack the object's properties in `vals` below, `user` needs to be\n # a queryset rather than an individual object.\n vals = dict(*user.values())\n return JsonResponse(vals)\n\n for rec_type, _ in NODE_TYPES_AND_VIEWS:\n if record_exists(rec_type, id):\n requested_item = rec_type.objects.filter(id=id)\n # in order to easily unpack the object's properties in `vals` below, `requested_item`\n # needs to be a queryset rather than an individual object. But in order to\n # `check_for_unpublished`, we need a single object rather than a queryset, hence\n # `.first()`\n check_for_unpublished(\n requested_item.first()\n ) # raises a 404 if item is unpublished\n vals = dict(*requested_item.values())\n return JsonResponse(vals)\n\n return HttpResponseNotFound()\n\n\ndef articles_list_export(request) -> HttpResponse:\n \"\"\"Returns a list of URLs of all articles on the site\n\n Args:\n request: the incoming request\n\n Returns:\n HttpResponse: A list of URLs, separated by newline characters\n \"\"\"\n articles = Article.objects.all()\n article_urls = [\n request.build_absolute_uri(reverse(\"article-detail\", args=[article.id]))\n for article in articles\n ]\n return HttpResponse(\" \".join(article_urls), content_type=\"text/plain\")\n\n\ndef flatpages_list_export(request) -> HttpResponse:\n \"\"\"Returns a list of URLs of all articles on the site\n\n Args:\n request: the incoming request\n\n Returns:\n HttpResponse: A list of URLs, separated by newline characters\n \"\"\"\n\n flatpages = FlatPage.objects.all()\n flatpage_urls = [\n request.build_absolute_uri(flatpage.get_absolute_url())\n for flatpage in flatpages\n ]\n return HttpResponse(\" \".join(flatpage_urls), content_type=\"text/plain\")\n\n\ndef redirect_node_url(request, pk: int) -> HttpResponse:\n \"\"\"\n A function that will redirect /node/ URLs from OldCantus to their corresponding page in NewCantus.\n This makes NewCantus links backwards compatible for users who may have bookmarked these types of URLs in OldCantus.\n In addition, this function (paired with get_user_id() below) account for the different numbering systems in both versions of CantusDB, notably for /indexer/ paths which are now at /user/.\n\n Takes in a request and the primary key (ID following /node/ in the URL) as arguments.\n Returns the matching page in NewCantus if it exists and a 404 otherwise.\n \"\"\"\n\n if pk >= NODE_ID_CUTOFF:\n raise Http404(\"Invalid ID for /node/ path.\")\n\n user_id = get_user_id_from_old_indexer_id(pk)\n if get_user_id_from_old_indexer_id(pk) is not None:\n return redirect(\"user-detail\", user_id)\n\n for rec_type, view in NODE_TYPES_AND_VIEWS:\n if record_exists(rec_type, pk):\n # if an object is found, a redirect() call to the appropriate view is returned\n return redirect(view, pk)\n\n # if it reaches the end of the types with finding an existing object, a 404 will be returned\n raise Http404(\"No record found matching the /node/ query.\")\n\n\ndef handle404(request, exception):\n return render(request, \"404.html\")\n\n\n@login_required\ndef change_password(request):\n if request.method == \"POST\":\n form = PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n user = form.save()\n update_session_auth_hash(request, user)\n messages.success(request, \"Your password was successfully updated!\")\n else:\n form = PasswordChangeForm(request.user)\n return render(request, \"registration/change_password.html\", {\"form\": form})\n\n\ndef project_manager_check(user):\n \"\"\"\n A callback function that will be called by the user_passes_test decorator of content_overview.\n\n Takes in a logged-in user as an argument.\n Returns True if they are in a \"project manager\" group, raises PermissionDenied otherwise.\n \"\"\"\n if user.groups.filter(name=\"project manager\").exists():\n return True\n raise PermissionDenied\n\n\n# first give the user a chance to login\n@login_required\n# if they're logged in but they're not a project manager, raise 403\n@user_passes_test(project_manager_check)\ndef content_overview(request):\n objects = []\n models = [\n Source,\n Chant,\n Feast,\n Sequence,\n Office,\n Provenance,\n Genre,\n Notation,\n Century,\n RismSiglum,\n ]\n\n model_names = [model._meta.verbose_name_plural for model in models]\n selected_model_name = request.GET.get(\"model\", None)\n selected_model = None\n if selected_model_name in model_names:\n selected_model = models[model_names.index(selected_model_name)]\n\n objects = []\n if selected_model:\n objects = selected_model.objects.all().order_by(\"-date_updated\")\n\n paginator = Paginator(objects, 100)\n page_number = request.GET.get(\"page\")\n page_obj = paginator.get_page(page_number)\n\n context = {\n \"models\": model_names,\n \"selected_model_name\": selected_model_name,\n \"page_obj\": page_obj,\n }\n\n return render(request, \"content_overview.html\", context)\n\n\ndef redirect_indexer(request, pk: int) -> HttpResponse:\n \"\"\"\n A function that will redirect /indexer/ URLs from OldCantus to their corresponding /user/ page in NewCantus.\n This makes NewCantus links backwards compatible for users who may have bookmarked these types of URLs in OldCantus.\n\n Takes in a request and the Indexer ID as arguments.\n Returns the matching User page in NewCantus if it exists and a 404 otherwise.\n \"\"\"\n user_id = get_user_id_from_old_indexer_id(pk)\n if get_user_id_from_old_indexer_id(pk) is not None:\n return redirect(\"user-detail\", user_id)\n\n raise Http404(\"No indexer found matching the query.\")\n\n\ndef redirect_office(request) -> HttpResponse:\n \"\"\"\n Redirects from office/ (à la OldCantus) to offices/ (à la NewCantus)\n\n Args:\n request\n\n Returns:\n HttpResponse\n \"\"\"\n return redirect(\"office-list\")\n\n\ndef redirect_genre(request) -> HttpResponse:\n \"\"\"\n Redirects from genre/ (à la OldCantus) to genres/ (à la NewCantus)\n\n Args:\n request\n\n Returns:\n HttpResponse\n \"\"\"\n return redirect(\"genre-list\")\n\n\ndef redirect_documents(request) -> HttpResponse:\n \"\"\"Handle requests to old paths for various\n documents on OldCantus, returning an HTTP Response\n redirecting the user to the updated path\n\n Args:\n request: the request to the old path\n\n Returns:\n HttpResponse: response redirecting to the new path\n \"\"\"\n mapping = {\n \"/sites/default/files/documents/1. Quick Guide to Liturgy.pdf\": static(\n \"documents/1. Quick Guide to Liturgy.pdf\"\n ),\n \"/sites/default/files/documents/2. Volpiano Protocols.pdf\": static(\n \"documents/2. Volpiano Protocols.pdf\"\n ),\n \"/sites/default/files/documents/3. Volpiano Neumes for Review.docx\": static(\n \"documents/3. Volpiano Neumes for Review.docx\"\n ),\n \"/sites/default/files/documents/4. Volpiano Neume Protocols.pdf\": static(\n \"documents/4. Volpiano Neume Protocols.pdf\"\n ),\n \"/sites/default/files/documents/5. Volpiano Editing Guidelines.pdf\": static(\n \"documents/5. Volpiano Editing Guidelines.pdf\"\n ),\n \"/sites/default/files/documents/7. Guide to Graduals.pdf\": static(\n \"documents/7. Guide to Graduals.pdf\"\n ),\n \"/sites/default/files/HOW TO - manuscript descriptions-Nov6-20.pdf\": static(\n \"documents/HOW TO - manuscript descriptions-Nov6-20.pdf\"\n ),\n }\n old_path = request.path\n try:\n new_path = mapping[old_path]\n except KeyError:\n raise Http404\n return redirect(new_path)\n\n\nclass CurrentEditorsAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n if not self.request.user.is_authenticated:\n return get_user_model().objects.none()\n qs = (\n get_user_model()\n .objects.filter(\n Q(groups__name=\"project manager\")\n | Q(groups__name=\"editor\")\n | Q(groups__name=\"contributor\")\n )\n .order_by(\"full_name\")\n )\n if self.q:\n qs = qs.filter(\n Q(full_name__istartswith=self.q) | Q(email__istartswith=self.q)\n )\n return qs\n\n\nclass AllUsersAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n if not self.request.user.is_authenticated:\n return get_user_model().objects.none()\n qs = get_user_model().objects.all().order_by(\"full_name\")\n if self.q:\n qs = qs.filter(\n Q(full_name__istartswith=self.q) | Q(email__istartswith=self.q)\n )\n return qs\n\n\nclass CenturyAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n if not self.request.user.is_authenticated:\n return Century.objects.none()\n qs = Century.objects.all().order_by(\"name\")\n if self.q:\n qs = qs.filter(name__istartswith=self.q)\n return qs\n\n\nclass RismSiglumAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n if not self.request.user.is_authenticated:\n return RismSiglum.objects.none()\n qs = RismSiglum.objects.all().order_by(\"name\")\n if self.q:\n qs = qs.filter(name__istartswith=self.q)\n return qs\n\n\nclass FeastAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n if not self.request.user.is_authenticated:\n return Feast.objects.none()\n qs = Feast.objects.all().order_by(\"name\")\n if self.q:\n qs = qs.filter(name__icontains=self.q)\n return qs\n\n\nclass OfficeAutocomplete(autocomplete.Select2QuerySetView):\n def get_result_label(self, office):\n return f\"{office.name} - {office.description}\"\n\n def get_queryset(self):\n if not self.request.user.is_authenticated:\n return Office.objects.none()\n qs = Office.objects.all().order_by(\"name\")\n if self.q:\n qs = qs.filter(\n Q(name__istartswith=self.q) | Q(description__icontains=self.q)\n )\n return qs\n\n\nclass GenreAutocomplete(autocomplete.Select2QuerySetView):\n def get_result_label(self, genre):\n return f\"{genre.name} - {genre.description}\"\n\n def get_queryset(self):\n if not self.request.user.is_authenticated:\n return Genre.objects.none()\n qs = Genre.objects.all().order_by(\"name\")\n if self.q:\n qs = qs.filter(\n Q(name__istartswith=self.q) | Q(description__icontains=self.q)\n )\n return qs\n\n\nclass DifferentiaAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n if not self.request.user.is_authenticated:\n return Differentia.objects.none()\n qs = Differentia.objects.all().order_by(\"differentia_id\")\n if self.q:\n qs = qs.filter(differentia_id__istartswith=self.q)\n return qs\n\n\nclass ProvenanceAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n if not self.request.user.is_authenticated:\n return Provenance.objects.none()\n qs = Provenance.objects.all().order_by(\"name\")\n if self.q:\n qs = qs.filter(name__istartswith=self.q)\n return qs\n\n\nclass ProofreadByAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n if not self.request.user.is_authenticated:\n return get_user_model().objects.none()\n qs = (\n get_user_model()\n .objects.filter(\n Q(groups__name=\"project manager\") | Q(groups__name=\"editor\")\n )\n .distinct()\n .order_by(\"full_name\")\n )\n if self.q:\n qs = qs.filter(\n Q(full_name__istartswith=self.q) | Q(email__istartswith=self.q)\n )\n return qs\n", "path": "django/cantusdb_project/main_app/views/views.py" } ]
[ { "content": "import csv\nfrom typing import Optional, Union\nfrom django.http.response import JsonResponse\nfrom django.http import HttpResponse, HttpResponseNotFound\nfrom django.shortcuts import render, redirect\nfrom django.urls.base import reverse\nfrom articles.models import Article\nfrom main_app.models import (\n Century,\n Chant,\n Differentia,\n Feast,\n Genre,\n Notation,\n Office,\n Provenance,\n RismSiglum,\n Segment,\n Sequence,\n Source,\n)\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom main_app.models.base_model import BaseModel\nfrom next_chants import next_chants\nfrom django.contrib import messages\nfrom django.http import Http404\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django.core.exceptions import PermissionDenied\nfrom django.urls import reverse\nfrom django.contrib.auth import get_user_model\nfrom typing import List\nfrom django.core.paginator import Paginator\nfrom django.templatetags.static import static\nfrom django.contrib.flatpages.models import FlatPage\nfrom dal import autocomplete\nfrom django.db.models import Q\n\n\n@login_required\ndef items_count(request):\n \"\"\"\n Function-based view for the ``items count`` page, accessed with ``content-statistics``\n\n Update 2022-01-05:\n This page has been changed on the original Cantus. It is now in the private domain\n\n Args:\n request (request): The request\n\n Returns:\n HttpResponse: Render the page\n \"\"\"\n # in items count, the number on old cantus shows the total count of a type of object (chant, seq)\n # no matter published or not\n # but for the count of sources, it only shows the count of published sources\n chant_count = Chant.objects.count()\n sequence_count = Sequence.objects.count()\n source_count = Source.objects.filter(published=True).count()\n\n context = {\n \"chant_count\": chant_count,\n \"sequence_count\": sequence_count,\n \"source_count\": source_count,\n }\n return render(request, \"items_count.html\", context)\n\n\ndef ajax_concordance_list(request, cantus_id):\n \"\"\"\n Function-based view responding to the AJAX call for concordance list on the chant detail page,\n accessed with ``chants/<int:pk>``, click on \"Display concordances of this chant\"\n\n Args:\n cantus_id (str): The Cantus ID of the requested concordances group\n\n Returns:\n JsonResponse: A response to the AJAX call, to be unpacked by the frontend js code\n \"\"\"\n chants = Chant.objects.filter(cantus_id=cantus_id)\n seqs = Sequence.objects.filter(cantus_id=cantus_id)\n\n display_unpublished = request.user.is_authenticated\n if not display_unpublished:\n chants = chants.filter(source__published=True)\n seqs = seqs.filter(source__published=True)\n\n if seqs:\n chants = chants.union(seqs).order_by(\"siglum\", \"folio\")\n else:\n chants = chants.order_by(\"siglum\", \"folio\")\n # queryset(list of dictionaries)\n concordance_values = chants.values(\n \"siglum\",\n \"folio\",\n \"incipit\",\n \"office__name\",\n \"genre__name\",\n \"position\",\n \"feast__name\",\n \"mode\",\n \"image_link\",\n )\n\n concordances = list(concordance_values)\n for i, concordance in enumerate(concordances):\n # some chants do not have a source\n # for those chants, do not return source link\n if chants[i].source:\n concordance[\"source_link\"] = chants[i].source.get_absolute_url()\n if chants[i].search_vector:\n concordance[\"chant_link\"] = chants[i].get_absolute_url()\n else:\n concordance[\"chant_link\"] = reverse(\"sequence-detail\", args=[chants[i].id])\n concordance[\"db\"] = \"CD\"\n\n concordance_count = len(concordances)\n return JsonResponse(\n {\"concordances\": concordances, \"concordance_count\": concordance_count},\n safe=True,\n )\n\n\ndef ajax_melody_list(request, cantus_id):\n \"\"\"\n Function-based view responding to the AJAX call for melody list on the chant detail page,\n accessed with ``chants/<int:pk>``, click on \"Display melodies connected with this chant\"\n\n Args:\n cantus_id (str): The Cantus ID of the requested concordances group\n\n Returns:\n JsonResponse: A response to the AJAX call, to be unpacked by the frontend js code\n \"\"\"\n chants = (\n Chant.objects.filter(cantus_id=cantus_id).exclude(volpiano=None).order_by(\"id\")\n )\n\n display_unpublished = request.user.is_authenticated\n if not display_unpublished:\n chants = chants.filter(source__published=True)\n\n # queryset(list of dictionaries)\n concordance_values = chants.values(\n \"siglum\",\n \"folio\",\n \"office__name\",\n \"genre__name\",\n \"position\",\n \"feast__name\",\n \"cantus_id\",\n \"volpiano\",\n \"mode\",\n # OldCantus seems to use whichever is present: ms spelling, std spelling, incipit (in that order)\n \"manuscript_full_text_std_spelling\",\n )\n\n concordances = list(concordance_values)\n for i, concordance in enumerate(concordances):\n # some chants do not have a source\n # for those chants, do not return source link\n if chants[i].source:\n concordance[\"source_link\"] = chants[i].source.get_absolute_url()\n concordance[\"ci_link\"] = chants[i].get_ci_url()\n concordance[\"chant_link\"] = chants[i].get_absolute_url()\n concordance[\"db\"] = \"CD\"\n\n concordance_count = len(concordances)\n return JsonResponse(\n {\"concordances\": concordances, \"concordance_count\": concordance_count},\n safe=True,\n )\n\n\ndef csv_export(request, source_id):\n \"\"\"\n Function-based view for the CSV export page, accessed with ``csv/<str:source_id>``\n\n Args:\n source_id (str): The ID of the source to export\n\n Returns:\n HttpResponse: The CSV response\n \"\"\"\n try:\n source = Source.objects.get(id=source_id)\n except:\n raise Http404(\"This source does not exist\")\n\n display_unpublished = request.user.is_authenticated\n\n if (not source.published) and (not display_unpublished):\n raise PermissionDenied\n\n # \"4064\" is the segment id of the sequence DB, sources in that segment have sequences instead of chants\n if source.segment and source.segment.id == 4064:\n entries = source.sequence_set.order_by(\"id\")\n else:\n entries = source.chant_set.order_by(\"id\").select_related(\n \"feast\", \"office\", \"genre\"\n )\n\n response = HttpResponse(content_type=\"text/csv\")\n # response[\"Content-Disposition\"] = 'attachment; filename=\"somefilename.csv\"'\n\n writer = csv.writer(response)\n writer.writerow(\n [\n \"siglum\",\n \"marginalia\",\n \"folio\",\n \"sequence\",\n \"incipit\",\n \"feast\",\n \"office\",\n \"genre\",\n \"position\",\n \"cantus_id\",\n \"mode\",\n \"finalis\",\n \"differentia\",\n \"differentiae_database\",\n \"fulltext_standardized\",\n \"fulltext_ms\",\n \"volpiano\",\n \"image_link\",\n \"melody_id\",\n \"addendum\",\n \"extra\",\n \"node_id\",\n ]\n )\n for entry in entries:\n feast = entry.feast.name if entry.feast else \"\"\n office = entry.office.name if entry.office else \"\"\n genre = entry.genre.name if entry.genre else \"\"\n diff_db = entry.diff_db.id if entry.diff_db else \"\"\n\n writer.writerow(\n [\n entry.siglum,\n entry.marginalia,\n entry.folio,\n # if entry has a c_sequence, it's a Chant. If it doesn't, it's a Sequence, so write its s_sequence\n entry.c_sequence if entry.c_sequence is not None else entry.s_sequence,\n entry.incipit,\n feast,\n office,\n genre,\n entry.position,\n entry.cantus_id,\n entry.mode,\n entry.finalis,\n entry.differentia,\n entry.diff_db,\n entry.manuscript_full_text_std_spelling,\n entry.manuscript_full_text,\n entry.volpiano,\n entry.image_link,\n entry.melody_id,\n entry.addendum,\n entry.extra,\n entry.id,\n ]\n )\n\n return response\n\n\ndef csv_export_redirect_from_old_path(request, source_id):\n return redirect(reverse(\"csv-export\", args=[source_id]))\n\n\ndef contact(request):\n \"\"\"\n Function-based view that renders the contact page ``contact``\n\n Args:\n request (request): The request\n\n Returns:\n HttpResponse: Render the contact page\n \"\"\"\n return render(request, \"contact.html\")\n\n\ndef ajax_melody_search(request):\n \"\"\"\n Function-based view responding to melody search AJAX calls, accessed with ``melody``\n\n The queryset is filtered according to the ``GET`` parameters\n\n ``GET`` parameters:\n ``notes``: Note sequence drawn on the canvas by the user\n ``anywhere``: Bool value indicating either \"search anywhere\" or \"search beginning\"\n ``transpose``: Bool value indicating either \"search exact matches\" or \"search transpositions\"\n ``siglum``: Filters by the siglum\n ``text``: Filters by the chant text\n ``genre_name``: Filters by genre of chant\n ``feast_name``: Filters by feast of chant\n ``mode``: Filters by mode of Chant\n ``source``: Search in a specific source\n\n Args:\n request (request): The request\n\n Returns:\n JsonResponse: A response to the AJAX call, to be unpacked by frontend js code\n \"\"\"\n # all search parameters are passed in as GET params, without using the url conf\n notes = request.GET.get(\"notes\")\n anywhere = request.GET.get(\"anywhere\")\n transpose = request.GET.get(\"transpose\")\n siglum = request.GET.get(\"siglum\")\n text = request.GET.get(\"text\")\n genre_name = request.GET.get(\"genre\")\n feast_name = request.GET.get(\"feast\")\n mode = request.GET.get(\"mode\")\n source = request.GET.get(\"source\")\n\n display_unpublished = request.user.is_authenticated\n if not display_unpublished:\n chants = Chant.objects.filter(source__published=True)\n else:\n chants = Chant.objects\n\n # if \"search exact matches + transpositions\"\n if transpose == \"true\":\n # calculate intervals\n # replace '9' (the note G) with the char corresponding to (ASCII(a) - 1), because 'a' denotes the note A\n notes_copy = list(notes.replace(\"9\", chr(ord(\"a\") - 1)))\n # we model the interval between notes using the difference between the ASCII codes of corresponding letters\n # the letter for the note B is \"j\" (106), note A is \"h\" (104), the letter \"i\" (105) is skipped\n # move all notes above A down by one letter\n for j, note in enumerate(notes_copy):\n if ord(note) >= 106:\n notes_copy[j] = chr(ord(note) - 1)\n # `intervals` records the difference between two adjacent notes\n intervals = \"\".join(\n [\n str(ord(notes_copy[j]) - ord(notes_copy[j - 1]))\n for j in range(1, len(notes_copy))\n ]\n )\n # if \"search anywhere in the melody\"\n if anywhere == \"true\":\n chants = chants.filter(volpiano_intervals__contains=intervals)\n # if \"search the beginning of melody\"\n else:\n chants = chants.filter(volpiano_intervals__startswith=intervals)\n # if \"search exact matches\"\n else:\n # if \"search anywhere in the melody\"\n if anywhere == \"true\":\n chants = chants.filter(volpiano_notes__contains=notes)\n # if \"search the beginning of melody\"\n else:\n chants = chants.filter(volpiano_notes__startswith=notes)\n\n # filter the queryset with search params\n\n # source id and siglum are duplicate information, they both uniquely identify a source\n # if searching in a specific source, use `source`\n # if searching across all sources, use `siglum`\n if source:\n chants = chants.filter(source__id=source)\n elif siglum:\n chants = chants.filter(siglum__icontains=siglum)\n\n if text:\n chants = chants.filter(manuscript_full_text_std_spelling__icontains=text)\n if genre_name:\n chants = chants.filter(genre__name__icontains=genre_name)\n if feast_name:\n chants = chants.filter(feast__name__icontains=feast_name)\n if mode:\n chants = chants.filter(mode__icontains=mode)\n\n result_values = chants.order_by(\"id\").values(\n \"id\",\n \"siglum\",\n \"folio\",\n \"incipit\",\n \"genre__name\",\n \"feast__name\",\n \"mode\",\n \"volpiano\",\n )\n # convert queryset to a list of dicts because QuerySet is not JSON serializable\n # the above constructed queryset will be evaluated here\n results = list(result_values)\n for result in results:\n # construct the url for chant detail page and add it to the result\n result[\"chant_link\"] = reverse(\"chant-detail\", args=[result[\"id\"]])\n\n result_count = result_values.count()\n return JsonResponse({\"results\": results, \"result_count\": result_count}, safe=True)\n\n\ndef ajax_search_bar(request, search_term):\n \"\"\"\n Function-based view responding to global search bar AJAX calls,\n accessed with the search bar on the top-right corner of almost every page.\n\n Args:\n search_term (str): The search term input\n\n Returns:\n JsonResponse: A response to the AJAX call, to be unpacked by frontend js code\n \"\"\"\n # load only the first seven chants\n CHANT_CNT = 7\n\n if not search_term.replace(\" \", \"\").isalpha():\n # if the search term contains at least one digit, assume user is searching by Cantus ID\n chants = Chant.objects.filter(cantus_id__istartswith=search_term).order_by(\"id\")\n else:\n # if the search term does not contain any digits, assume user is searching by incipit\n chants = Chant.objects.filter(incipit__istartswith=search_term).order_by(\"id\")\n\n display_unpublished = request.user.is_authenticated\n if not display_unpublished:\n chants = chants.filter(source__published=True)\n\n chants = chants[:CHANT_CNT]\n\n returned_values = chants.values(\n \"incipit\",\n \"genre__name\",\n \"feast__name\",\n \"cantus_id\",\n \"mode\",\n \"siglum\",\n \"office__name\",\n \"folio\",\n \"c_sequence\",\n )\n returned_values = list(returned_values)\n for i in range(chants.count()):\n chant_link = chants[i].get_absolute_url()\n returned_values[i][\"chant_link\"] = chant_link\n return JsonResponse({\"chants\": returned_values}, safe=True)\n\n\ndef json_melody_export(request, cantus_id: str) -> JsonResponse:\n chants = Chant.objects.filter(\n cantus_id=cantus_id, volpiano__isnull=False, source__published=True\n )\n\n db_keys = [\n \"melody_id\",\n \"id\",\n \"cantus_id\",\n \"siglum\",\n \"source__id\", # don't fetch the entire Source object, just the id of\n # the source. __id is removed in standardize_for_api below\n \"folio\",\n \"incipit\",\n \"manuscript_full_text\",\n \"volpiano\",\n \"mode\",\n \"feast__id\",\n \"office__id\",\n \"genre__id\",\n \"position\",\n ]\n\n chants_values = list(chants.values(*db_keys)) # a list of dictionaries. Each\n # dictionary represents metadata on one chant\n\n standardized_chants_values = [\n standardize_dict_for_json_melody_export(cv, request) for cv in chants_values\n ]\n\n return JsonResponse(standardized_chants_values, safe=False)\n\n\ndef standardize_dict_for_json_melody_export(\n chant_values: List[dict], request\n) -> List[dict]:\n \"\"\"Take a list of dictionaries, and in each dictionary, change several\n of the keys to match their values in OldCantus\n\n Args:\n chant_values (List[dict]): a list of dictionaries, each containing\n information on a single chant in the database\n request: passed when this is called in json_melody_export. Used to get the domain\n while building the chant links\n\n Returns:\n List[dict]: a list of dictionaries, with updated keys\n \"\"\"\n keymap = { # map attribute names from Chant model (i.e. db_keys\n # in json_melody_export) to corresponding attribute names\n # in old API, and remove artifacts of query process (i.e. __id suffixes)\n \"melody_id\": \"mid\", # <-\n \"id\": \"nid\", # <-\n \"cantus_id\": \"cid\", # <-\n \"siglum\": \"siglum\",\n \"source__id\": \"srcnid\", # <-\n \"folio\": \"folio\",\n \"incipit\": \"incipit\",\n \"manuscript_full_text\": \"fulltext\", # <-\n \"volpiano\": \"volpiano\",\n \"mode\": \"mode\",\n \"feast__id\": \"feast\", # <-\n \"office__id\": \"office\", # <-\n \"genre__id\": \"genre\", # <-\n \"position\": \"position\",\n }\n\n standardized_chant_values = {keymap[key]: chant_values[key] for key in chant_values}\n\n # manually build a couple of last fields that aren't represented in Chant object\n chant_uri = request.build_absolute_uri(\n reverse(\"chant-detail\", args=[chant_values[\"id\"]])\n )\n standardized_chant_values[\"chantlink\"] = chant_uri\n src_uri = request.build_absolute_uri(\n reverse(\"source-detail\", args=[chant_values[\"source__id\"]])\n )\n standardized_chant_values[\"srclink\"] = src_uri\n\n return standardized_chant_values\n\n\ndef json_sources_export(request) -> JsonResponse:\n \"\"\"\n Generate a json object of published sources with their IDs and CSV links\n \"\"\"\n cantus_segment = Segment.objects.get(id=4063)\n sources = cantus_segment.source_set.filter(published=True)\n ids = [source.id for source in sources]\n\n csv_links = {id: build_json_sources_export_dictionary(id, request) for id in ids}\n\n return JsonResponse(csv_links)\n\n\ndef build_json_sources_export_dictionary(id: int, request) -> dict:\n \"\"\"Return a dictionary containing a link to the csv-export page for a source\n\n Args:\n id (int): the pk of the source\n request: passed when this is called in json_sources_export. Used to get the domain\n while building the CSV link\n\n Returns:\n dict: a dictionary with a single key, \"csv\", and a link to the source's csv-export\n page\n \"\"\"\n return {\"csv\": request.build_absolute_uri(reverse(\"csv-export\", args=[id]))}\n\n\ndef json_nextchants(request, cantus_id):\n ids_and_counts = next_chants(cantus_id, display_unpublished=False)\n suggested_chants_dict = {id: count for (id, count) in ids_and_counts}\n return JsonResponse(suggested_chants_dict)\n\n\ndef json_cid_export(request, cantus_id: str) -> JsonResponse:\n \"\"\"Return a JsonResponse containing information on all chants with a given\n Cantus ID, in the following format:\n {\n \"chants\": [\n {\n \"chant\": {\n a bunch of keys and values, created in build_json_cid_dictionary\n },\n },\n {\n \"chant\": {\n etc.\n },\n },\n ]\n }\n We believe Cantus Index uses this API in building its list of concordances\n for a given Cantus ID across the databases in the Cantus Network\n\n Args:\n request: the incoming request\n cantus_id (string): A Cantus ID\n \"\"\"\n\n # the API in OldCantus appears to only return chants, and no sequences.\n chants = Chant.objects.filter(cantus_id=cantus_id).filter(source__published=True)\n chant_dicts = [{\"chant\": build_json_cid_dictionary(c, request)} for c in chants]\n response = {\"chants\": chant_dicts}\n return JsonResponse(response)\n\n\ndef build_json_cid_dictionary(chant, request) -> dict:\n \"\"\"Return a dictionary with information on a given chant in the database\n\n Args:\n chant: a Chant\n request: passed when this is called in json_cid_export. Used to get the domain\n while building the chant link\n\n Returns:\n dict: a dictionary with information about the chant and its source, including\n absolute URLs for the chant and source detail pages\n \"\"\"\n source_relative_url = reverse(\"source-detail\", args=[chant.source.id])\n source_absolute_url = request.build_absolute_uri(source_relative_url)\n chant_relative_url = reverse(\"chant-detail\", args=[chant.id])\n chant_absolute_url = request.build_absolute_uri(chant_relative_url)\n dictionary = {\n \"siglum\": chant.source.siglum,\n \"srclink\": source_absolute_url,\n \"chantlink\": chant_absolute_url,\n # \"chantlinkOLD\": # OldCantus included a URL using http:// here,\n # # whereas \"chantlink\" had a URL with https://\n \"folio\": chant.folio if chant.folio else \"\",\n \"incipit\": chant.incipit if chant.incipit else \"\",\n \"feast\": chant.feast.name if chant.feast else \"\",\n \"genre\": chant.genre.name if chant.genre else \"\",\n \"office\": chant.office.name if chant.office else \"\",\n \"position\": chant.position if chant.position else \"\",\n \"mode\": chant.mode if chant.mode else \"\",\n \"image\": chant.image_link if chant.image_link else \"\",\n \"melody\": chant.volpiano if chant.volpiano else \"\",\n \"fulltext\": (\n chant.manuscript_full_text_std_spelling\n if chant.manuscript_full_text_std_spelling\n else \"\"\n ),\n \"db\": \"CD\",\n }\n return dictionary\n\n\ndef record_exists(rec_type: BaseModel, pk: int) -> bool:\n \"\"\"Determines whether record of specific type (chant, source, sequence, article) exists for a given pk\n\n Args:\n rec_type (BaseModel): Which model to check to see if an object of that type exists\n pk (int): The ID of the object being checked for.\n\n Returns:\n bool: True if an object of the specified model with the specified ID exists, False otherwise.\n \"\"\"\n try:\n rec_type.objects.get(id=pk)\n return True\n except rec_type.DoesNotExist:\n return False\n\n\ndef get_user_id_from_old_indexer_id(pk: int) -> Optional[int]:\n \"\"\"\n Finds the matching User ID in NewCantus for an Indexer ID in OldCantus.\n This is stored in the User table's old_indexer_id column.\n This is necessary because indexers were originally stored in the general Node\n table in OldCantus, but are now represented as users in NewCantus.\n\n Args:\n pk (int): the ID of an indexer in OldCantus\n\n Returns:\n Optional int: the ID of the corresponding User in NewCantus\n \"\"\"\n User = get_user_model()\n try:\n result = User.objects.get(old_indexer_id=pk)\n return result.id\n except User.DoesNotExist:\n return None\n\n\ndef check_for_unpublished(item: Union[Chant, Sequence, Source]) -> None:\n \"\"\"Raises an Http404 exception if item is unpublished\n\n Args:\n item (Chant, Sequence, or Source): An item to check whether it is published or not\n\n Raises:\n Http404 if the item is a source and it's unpublished,\n or if it's a chant/sequence and its source is unpublished\n\n Returns:\n None\n \"\"\"\n if isinstance(item, Source):\n if not item.published:\n raise Http404()\n if isinstance(item, Chant) or isinstance(item, Sequence):\n if not item.source.published:\n raise Http404()\n\n\nNODE_TYPES_AND_VIEWS = [\n (Chant, \"chant-detail\"),\n (Source, \"source-detail\"),\n (Sequence, \"sequence-detail\"),\n (Article, \"article-detail\"),\n]\n\n\n# all IDs above this value are created in NewCantus and thus could have conflicts between types.\n# when data is migrated from OldCantus to NewCantus, (unpublished) dummy objects are created\n# in the database to ensure that all newly created objects have IDs above this number.\nNODE_ID_CUTOFF = 1_000_000\n\n\ndef json_node_export(request, id: int) -> JsonResponse:\n \"\"\"\n returns all fields of the chant/sequence/source/indexer with the specified `id`\n \"\"\"\n\n # all IDs above this value are created in NewCantus and thus could have conflicts between types.\n # when data is migrated from OldCantus to NewCantus, (unpublished) dummy objects are created\n # in the database to ensure that all newly created objects have IDs above this number.\n if id >= NODE_ID_CUTOFF:\n raise Http404()\n\n user_id = get_user_id_from_old_indexer_id(id)\n if get_user_id_from_old_indexer_id(id) is not None:\n User = get_user_model()\n user = User.objects.filter(id=user_id)\n # in order to easily unpack the object's properties in `vals` below, `user` needs to be\n # a queryset rather than an individual object.\n vals = dict(*user.values())\n return JsonResponse(vals)\n\n for rec_type, _ in NODE_TYPES_AND_VIEWS:\n if record_exists(rec_type, id):\n requested_item = rec_type.objects.filter(id=id)\n # in order to easily unpack the object's properties in `vals` below, `requested_item`\n # needs to be a queryset rather than an individual object. But in order to\n # `check_for_unpublished`, we need a single object rather than a queryset, hence\n # `.first()`\n check_for_unpublished(\n requested_item.first()\n ) # raises a 404 if item is unpublished\n vals = dict(*requested_item.values())\n return JsonResponse(vals)\n\n return HttpResponseNotFound()\n\n\ndef articles_list_export(request) -> HttpResponse:\n \"\"\"Returns a list of URLs of all articles on the site\n\n Args:\n request: the incoming request\n\n Returns:\n HttpResponse: A list of URLs, separated by newline characters\n \"\"\"\n articles = Article.objects.all()\n article_urls = [\n request.build_absolute_uri(reverse(\"article-detail\", args=[article.id]))\n for article in articles\n ]\n return HttpResponse(\" \".join(article_urls), content_type=\"text/plain\")\n\n\ndef flatpages_list_export(request) -> HttpResponse:\n \"\"\"Returns a list of URLs of all articles on the site\n\n Args:\n request: the incoming request\n\n Returns:\n HttpResponse: A list of URLs, separated by newline characters\n \"\"\"\n\n flatpages = FlatPage.objects.all()\n flatpage_urls = [\n request.build_absolute_uri(flatpage.get_absolute_url())\n for flatpage in flatpages\n ]\n return HttpResponse(\" \".join(flatpage_urls), content_type=\"text/plain\")\n\n\ndef redirect_node_url(request, pk: int) -> HttpResponse:\n \"\"\"\n A function that will redirect /node/ URLs from OldCantus to their corresponding page in NewCantus.\n This makes NewCantus links backwards compatible for users who may have bookmarked these types of URLs in OldCantus.\n In addition, this function (paired with get_user_id() below) account for the different numbering systems in both versions of CantusDB, notably for /indexer/ paths which are now at /user/.\n\n Takes in a request and the primary key (ID following /node/ in the URL) as arguments.\n Returns the matching page in NewCantus if it exists and a 404 otherwise.\n \"\"\"\n\n if pk >= NODE_ID_CUTOFF:\n raise Http404(\"Invalid ID for /node/ path.\")\n\n user_id = get_user_id_from_old_indexer_id(pk)\n if get_user_id_from_old_indexer_id(pk) is not None:\n return redirect(\"user-detail\", user_id)\n\n for rec_type, view in NODE_TYPES_AND_VIEWS:\n if record_exists(rec_type, pk):\n # if an object is found, a redirect() call to the appropriate view is returned\n return redirect(view, pk)\n\n # if it reaches the end of the types with finding an existing object, a 404 will be returned\n raise Http404(\"No record found matching the /node/ query.\")\n\n\ndef handle404(request, exception):\n return render(request, \"404.html\")\n\n\n@login_required\ndef change_password(request):\n if request.method == \"POST\":\n form = PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n user = form.save()\n update_session_auth_hash(request, user)\n messages.success(request, \"Your password was successfully updated!\")\n else:\n form = PasswordChangeForm(request.user)\n return render(request, \"registration/change_password.html\", {\"form\": form})\n\n\ndef project_manager_check(user):\n \"\"\"\n A callback function that will be called by the user_passes_test decorator of content_overview.\n\n Takes in a logged-in user as an argument.\n Returns True if they are in a \"project manager\" group, raises PermissionDenied otherwise.\n \"\"\"\n if user.groups.filter(name=\"project manager\").exists():\n return True\n raise PermissionDenied\n\n\n# first give the user a chance to login\n@login_required\n# if they're logged in but they're not a project manager, raise 403\n@user_passes_test(project_manager_check)\ndef content_overview(request):\n objects = []\n models = [\n Source,\n Chant,\n Feast,\n Sequence,\n Office,\n Provenance,\n Genre,\n Notation,\n Century,\n RismSiglum,\n ]\n\n model_names = [model._meta.verbose_name_plural for model in models]\n selected_model_name = request.GET.get(\"model\", None)\n selected_model = None\n if selected_model_name in model_names:\n selected_model = models[model_names.index(selected_model_name)]\n\n objects = []\n if selected_model:\n objects = selected_model.objects.all().order_by(\"-date_updated\")\n\n paginator = Paginator(objects, 100)\n page_number = request.GET.get(\"page\")\n page_obj = paginator.get_page(page_number)\n\n context = {\n \"models\": model_names,\n \"selected_model_name\": selected_model_name,\n \"page_obj\": page_obj,\n }\n\n return render(request, \"content_overview.html\", context)\n\n\ndef redirect_indexer(request, pk: int) -> HttpResponse:\n \"\"\"\n A function that will redirect /indexer/ URLs from OldCantus to their corresponding /user/ page in NewCantus.\n This makes NewCantus links backwards compatible for users who may have bookmarked these types of URLs in OldCantus.\n\n Takes in a request and the Indexer ID as arguments.\n Returns the matching User page in NewCantus if it exists and a 404 otherwise.\n \"\"\"\n user_id = get_user_id_from_old_indexer_id(pk)\n if get_user_id_from_old_indexer_id(pk) is not None:\n return redirect(\"user-detail\", user_id)\n\n raise Http404(\"No indexer found matching the query.\")\n\n\ndef redirect_office(request) -> HttpResponse:\n \"\"\"\n Redirects from office/ (à la OldCantus) to offices/ (à la NewCantus)\n\n Args:\n request\n\n Returns:\n HttpResponse\n \"\"\"\n return redirect(\"office-list\")\n\n\ndef redirect_genre(request) -> HttpResponse:\n \"\"\"\n Redirects from genre/ (à la OldCantus) to genres/ (à la NewCantus)\n\n Args:\n request\n\n Returns:\n HttpResponse\n \"\"\"\n return redirect(\"genre-list\")\n\n\ndef redirect_documents(request) -> HttpResponse:\n \"\"\"Handle requests to old paths for various\n documents on OldCantus, returning an HTTP Response\n redirecting the user to the updated path\n\n Args:\n request: the request to the old path\n\n Returns:\n HttpResponse: response redirecting to the new path\n \"\"\"\n mapping = {\n \"/sites/default/files/documents/1. Quick Guide to Liturgy.pdf\": static(\n \"documents/1. Quick Guide to Liturgy.pdf\"\n ),\n \"/sites/default/files/documents/2. Volpiano Protocols.pdf\": static(\n \"documents/2. Volpiano Protocols.pdf\"\n ),\n \"/sites/default/files/documents/3. Volpiano Neumes for Review.docx\": static(\n \"documents/3. Volpiano Neumes for Review.docx\"\n ),\n \"/sites/default/files/documents/4. Volpiano Neume Protocols.pdf\": static(\n \"documents/4. Volpiano Neume Protocols.pdf\"\n ),\n \"/sites/default/files/documents/5. Volpiano Editing Guidelines.pdf\": static(\n \"documents/5. Volpiano Editing Guidelines.pdf\"\n ),\n \"/sites/default/files/documents/7. Guide to Graduals.pdf\": static(\n \"documents/7. Guide to Graduals.pdf\"\n ),\n \"/sites/default/files/HOW TO - manuscript descriptions-Nov6-20.pdf\": static(\n \"documents/HOW TO - manuscript descriptions-Nov6-20.pdf\"\n ),\n }\n old_path = request.path\n try:\n new_path = mapping[old_path]\n except KeyError:\n raise Http404\n return redirect(new_path)\n\n\nclass CurrentEditorsAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n if not self.request.user.is_authenticated:\n return get_user_model().objects.none()\n qs = (\n get_user_model()\n .objects.filter(\n Q(groups__name=\"project manager\")\n | Q(groups__name=\"editor\")\n | Q(groups__name=\"contributor\")\n )\n .order_by(\"full_name\")\n )\n if self.q:\n qs = qs.filter(\n Q(full_name__istartswith=self.q) | Q(email__istartswith=self.q)\n )\n return qs\n\n\nclass AllUsersAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n if not self.request.user.is_authenticated:\n return get_user_model().objects.none()\n qs = get_user_model().objects.all().order_by(\"full_name\")\n if self.q:\n qs = qs.filter(\n Q(full_name__istartswith=self.q) | Q(email__istartswith=self.q)\n )\n return qs\n\n\nclass CenturyAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n if not self.request.user.is_authenticated:\n return Century.objects.none()\n qs = Century.objects.all().order_by(\"name\")\n if self.q:\n qs = qs.filter(name__istartswith=self.q)\n return qs\n\n\nclass RismSiglumAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n if not self.request.user.is_authenticated:\n return RismSiglum.objects.none()\n qs = RismSiglum.objects.all().order_by(\"name\")\n if self.q:\n qs = qs.filter(name__istartswith=self.q)\n return qs\n\n\nclass FeastAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n if not self.request.user.is_authenticated:\n return Feast.objects.none()\n qs = Feast.objects.all().order_by(\"name\")\n if self.q:\n qs = qs.filter(name__icontains=self.q)\n return qs\n\n\nclass OfficeAutocomplete(autocomplete.Select2QuerySetView):\n def get_result_label(self, office):\n return f\"{office.name} - {office.description}\"\n\n def get_queryset(self):\n if not self.request.user.is_authenticated:\n return Office.objects.none()\n qs = Office.objects.all().order_by(\"name\")\n if self.q:\n qs = qs.filter(\n Q(name__istartswith=self.q) | Q(description__icontains=self.q)\n )\n return qs\n\n\nclass GenreAutocomplete(autocomplete.Select2QuerySetView):\n def get_result_label(self, genre):\n return f\"{genre.name} - {genre.description}\"\n\n def get_queryset(self):\n if not self.request.user.is_authenticated:\n return Genre.objects.none()\n qs = Genre.objects.all().order_by(\"name\")\n if self.q:\n qs = qs.filter(\n Q(name__istartswith=self.q) | Q(description__icontains=self.q)\n )\n return qs\n\n\nclass DifferentiaAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n if not self.request.user.is_authenticated:\n return Differentia.objects.none()\n qs = Differentia.objects.all().order_by(\"differentia_id\")\n if self.q:\n qs = qs.filter(differentia_id__istartswith=self.q)\n return qs\n\n\nclass ProvenanceAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n if not self.request.user.is_authenticated:\n return Provenance.objects.none()\n qs = Provenance.objects.all().order_by(\"name\")\n if self.q:\n qs = qs.filter(name__icontains=self.q)\n return qs\n\n\nclass ProofreadByAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n if not self.request.user.is_authenticated:\n return get_user_model().objects.none()\n qs = (\n get_user_model()\n .objects.filter(\n Q(groups__name=\"project manager\") | Q(groups__name=\"editor\")\n )\n .distinct()\n .order_by(\"full_name\")\n )\n if self.q:\n qs = qs.filter(\n Q(full_name__istartswith=self.q) | Q(email__istartswith=self.q)\n )\n return qs\n", "path": "django/cantusdb_project/main_app/views/views.py" } ]
diff --git a/django/cantusdb_project/main_app/views/views.py b/django/cantusdb_project/main_app/views/views.py index 61d06a15c..cbcba9c17 100644 --- a/django/cantusdb_project/main_app/views/views.py +++ b/django/cantusdb_project/main_app/views/views.py @@ -1063,7 +1063,7 @@ def get_queryset(self): return Provenance.objects.none() qs = Provenance.objects.all().order_by("name") if self.q: - qs = qs.filter(name__istartswith=self.q) + qs = qs.filter(name__icontains=self.q) return qs
microsoft__ptvsd-362
PTVSD fails to run on windows ``` Traceback (most recent call last): File "C:\Users\karth\.vscode\extensions\ms-python.python-2018.3.1\pythonFiles\experimental\ptvsd_launcher.py", line 96, in <module> vspd.debug(filename, port_num, debug_id, debug_options, run_as) File "c:\git\ptvsd\ptvsd\debugger.py", line 36, in debug run(address, filename, *args, **kwargs) File "c:\git\ptvsd\ptvsd\__main__.py", line 37, in run_file run(argv, addr, **kwargs) File "c:\git\ptvsd\ptvsd\__main__.py", line 85, in _run daemon = _install(_pydevd, addr, **kwargs) File "c:\git\ptvsd\ptvsd\pydevd_hooks.py", line 52, in install daemon = Daemon(**kwargs) File "c:\git\ptvsd\ptvsd\daemon.py", line 53, in __init__ self.install_exit_handlers() File "c:\git\ptvsd\ptvsd\daemon.py", line 91, in install_exit_handlers signal.SIGHUP: [], AttributeError: module 'signal' has no attribute 'SIGHUP' ``` PTVSD fails to run on windows ``` Traceback (most recent call last): File "C:\Users\karth\.vscode\extensions\ms-python.python-2018.3.1\pythonFiles\experimental\ptvsd_launcher.py", line 96, in <module> vspd.debug(filename, port_num, debug_id, debug_options, run_as) File "c:\git\ptvsd\ptvsd\debugger.py", line 36, in debug run(address, filename, *args, **kwargs) File "c:\git\ptvsd\ptvsd\__main__.py", line 37, in run_file run(argv, addr, **kwargs) File "c:\git\ptvsd\ptvsd\__main__.py", line 85, in _run daemon = _install(_pydevd, addr, **kwargs) File "c:\git\ptvsd\ptvsd\pydevd_hooks.py", line 52, in install daemon = Daemon(**kwargs) File "c:\git\ptvsd\ptvsd\daemon.py", line 53, in __init__ self.install_exit_handlers() File "c:\git\ptvsd\ptvsd\daemon.py", line 91, in install_exit_handlers signal.SIGHUP: [], AttributeError: module 'signal' has no attribute 'SIGHUP' ```
[ { "content": "import atexit\nimport os\nimport platform\nimport signal\nimport sys\n\nfrom ptvsd import wrapper\nfrom ptvsd.socket import close_socket\n\n\ndef _wait_on_exit():\n if sys.__stdout__ is not None:\n try:\n import msvcrt\n except ImportError:\n sys.__stdout__.write('Press Enter to continue . . . ')\n sys.__stdout__.flush()\n sys.__stdin__.read(1)\n else:\n sys.__stdout__.write('Press any key to continue . . . ')\n sys.__stdout__.flush()\n msvcrt.getch()\n\n\nclass DaemonClosedError(RuntimeError):\n \"\"\"Indicates that a Daemon was unexpectedly closed.\"\"\"\n def __init__(self, msg='closed'):\n super(DaemonClosedError, self).__init__(msg)\n\n\nclass Daemon(object):\n \"\"\"The process-level manager for the VSC protocol debug adapter.\"\"\"\n\n exitcode = 0\n\n def __init__(self, wait_on_exit=_wait_on_exit,\n addhandlers=True, killonclose=True):\n self.wait_on_exit = wait_on_exit\n self.killonclose = killonclose\n\n self._closed = False\n self._exiting_via_atexit_handler = False\n\n self._pydevd = None\n self._server = None\n self._client = None\n self._adapter = None\n\n self._signal_handlers = None\n self._atexit_handlers = None\n self._handlers_installed = False\n if addhandlers:\n self.install_exit_handlers()\n\n @property\n def pydevd(self):\n return self._pydevd\n\n @property\n def server(self):\n return self._server\n\n @property\n def client(self):\n return self._client\n\n @property\n def adapter(self):\n return self._adapter\n\n def start(self, server=None):\n \"\"\"Return the \"socket\" to use for pydevd after setting it up.\"\"\"\n if self._closed:\n raise DaemonClosedError()\n if self._pydevd is not None:\n raise RuntimeError('already started')\n self._pydevd = wrapper.PydevdSocket(\n self._handle_pydevd_message,\n self._handle_pydevd_close,\n self._getpeername,\n self._getsockname,\n )\n self._server = server\n return self._pydevd\n\n def install_exit_handlers(self):\n \"\"\"Set the placeholder handlers.\"\"\"\n if self._signal_handlers is not None:\n raise RuntimeError('exit handlers already installed')\n self._signal_handlers = {\n signal.SIGHUP: [],\n }\n self._atexit_handlers = []\n\n if platform.system() != 'Windows':\n try:\n for sig in self._signal_handlers:\n signal.signal(sig, self._signal_handler)\n except ValueError:\n # Wasn't called in main thread!\n raise\n atexit.register(self._atexit_handler)\n\n def set_connection(self, client):\n \"\"\"Set the client socket to use for the debug adapter.\n\n A VSC message loop is started for the client.\n \"\"\"\n if self._closed:\n raise DaemonClosedError()\n if self._pydevd is None:\n raise RuntimeError('not started yet')\n if self._client is not None:\n raise RuntimeError('connection already set')\n self._client = client\n\n self._adapter = wrapper.VSCodeMessageProcessor(\n client,\n self._pydevd.pydevd_notify,\n self._pydevd.pydevd_request,\n self._handle_vsc_disconnect,\n self._handle_vsc_close,\n )\n name = 'ptvsd.Client' if self._server is None else 'ptvsd.Server'\n self._adapter.start(name)\n if self._signal_handlers is not None:\n self._add_signal_handlers()\n self._add_atexit_handler()\n return self._adapter\n\n def close(self):\n \"\"\"Stop all loops and release all resources.\"\"\"\n if self._closed:\n raise DaemonClosedError('already closed')\n self._closed = True\n\n if self._adapter is not None:\n normal, abnormal = self._adapter._wait_options()\n if (normal and not self.exitcode) or (abnormal and self.exitcode):\n self.wait_on_exit()\n\n if self._pydevd is not None:\n close_socket(self._pydevd)\n if self._client is not None:\n self._release_connection()\n\n def re_build_breakpoints(self):\n self.adapter.re_build_breakpoints()\n\n # internal methods\n\n def _signal_handler(self, signum, frame):\n for handle_signal in self._signal_handlers.get(signum, ()):\n handle_signal(signum, frame)\n\n def _atexit_handler(self):\n for handle_atexit in self._atexit_handlers:\n handle_atexit()\n\n def _add_atexit_handler(self):\n def handler():\n self._exiting_via_atexit_handler = True\n if not self._closed:\n self.close()\n if self._adapter is not None:\n # TODO: Do this in VSCodeMessageProcessor.close()?\n self._adapter._wait_for_server_thread()\n self._atexit_handlers.append(handler)\n\n def _add_signal_handlers(self):\n def handler(signum, frame):\n if not self._closed:\n self.close()\n sys.exit(0)\n self._signal_handlers[signal.SIGHUP].append(handler)\n\n def _release_connection(self):\n if self._adapter is not None:\n # TODO: This is not correct in the \"attach\" case.\n self._adapter.handle_pydevd_stopped(self.exitcode)\n self._adapter.close()\n close_socket(self._client)\n\n # internal methods for PyDevdSocket().\n\n def _handle_pydevd_message(self, cmdid, seq, text):\n if self._adapter is not None:\n self._adapter.on_pydevd_event(cmdid, seq, text)\n\n def _handle_pydevd_close(self):\n if self._closed:\n return\n self.close()\n\n def _getpeername(self):\n if self._client is None:\n raise NotImplementedError\n return self._client.getpeername()\n\n def _getsockname(self):\n if self._client is None:\n raise NotImplementedError\n return self._client.getsockname()\n\n # internal methods for VSCodeMessageProcessor\n\n def _handle_vsc_disconnect(self, kill=False):\n if not self._closed:\n self.close()\n if kill and self.killonclose and not self._exiting_via_atexit_handler:\n os.kill(os.getpid(), signal.SIGTERM)\n\n def _handle_vsc_close(self):\n if self._closed:\n return\n self.close()\n", "path": "ptvsd/daemon.py" } ]
[ { "content": "import atexit\nimport os\nimport platform\nimport signal\nimport sys\n\nfrom ptvsd import wrapper\nfrom ptvsd.socket import close_socket\n\n\ndef _wait_on_exit():\n if sys.__stdout__ is not None:\n try:\n import msvcrt\n except ImportError:\n sys.__stdout__.write('Press Enter to continue . . . ')\n sys.__stdout__.flush()\n sys.__stdin__.read(1)\n else:\n sys.__stdout__.write('Press any key to continue . . . ')\n sys.__stdout__.flush()\n msvcrt.getch()\n\n\nclass DaemonClosedError(RuntimeError):\n \"\"\"Indicates that a Daemon was unexpectedly closed.\"\"\"\n def __init__(self, msg='closed'):\n super(DaemonClosedError, self).__init__(msg)\n\n\nclass Daemon(object):\n \"\"\"The process-level manager for the VSC protocol debug adapter.\"\"\"\n\n exitcode = 0\n\n def __init__(self, wait_on_exit=_wait_on_exit,\n addhandlers=True, killonclose=True):\n self.wait_on_exit = wait_on_exit\n self.killonclose = killonclose\n\n self._closed = False\n self._exiting_via_atexit_handler = False\n\n self._pydevd = None\n self._server = None\n self._client = None\n self._adapter = None\n\n self._signal_handlers = None\n self._atexit_handlers = None\n self._handlers_installed = False\n if addhandlers:\n self.install_exit_handlers()\n\n @property\n def pydevd(self):\n return self._pydevd\n\n @property\n def server(self):\n return self._server\n\n @property\n def client(self):\n return self._client\n\n @property\n def adapter(self):\n return self._adapter\n\n def start(self, server=None):\n \"\"\"Return the \"socket\" to use for pydevd after setting it up.\"\"\"\n if self._closed:\n raise DaemonClosedError()\n if self._pydevd is not None:\n raise RuntimeError('already started')\n self._pydevd = wrapper.PydevdSocket(\n self._handle_pydevd_message,\n self._handle_pydevd_close,\n self._getpeername,\n self._getsockname,\n )\n self._server = server\n return self._pydevd\n\n def install_exit_handlers(self):\n \"\"\"Set the placeholder handlers.\"\"\"\n if self._signal_handlers is not None:\n raise RuntimeError('exit handlers already installed')\n self._signal_handlers = {\n signal.SIGHUP: [],\n }\n self._atexit_handlers = []\n\n if platform.system() != 'Windows':\n try:\n for sig in self._signal_handlers:\n signal.signal(sig, self._signal_handler)\n except ValueError:\n # Wasn't called in main thread!\n raise\n atexit.register(self._atexit_handler)\n\n def set_connection(self, client):\n \"\"\"Set the client socket to use for the debug adapter.\n\n A VSC message loop is started for the client.\n \"\"\"\n if self._closed:\n raise DaemonClosedError()\n if self._pydevd is None:\n raise RuntimeError('not started yet')\n if self._client is not None:\n raise RuntimeError('connection already set')\n self._client = client\n\n self._adapter = wrapper.VSCodeMessageProcessor(\n client,\n self._pydevd.pydevd_notify,\n self._pydevd.pydevd_request,\n self._handle_vsc_disconnect,\n self._handle_vsc_close,\n )\n name = 'ptvsd.Client' if self._server is None else 'ptvsd.Server'\n self._adapter.start(name)\n if self._signal_handlers is not None:\n self._add_signal_handlers()\n self._add_atexit_handler()\n return self._adapter\n\n def close(self):\n \"\"\"Stop all loops and release all resources.\"\"\"\n if self._closed:\n raise DaemonClosedError('already closed')\n self._closed = True\n\n if self._adapter is not None:\n normal, abnormal = self._adapter._wait_options()\n if (normal and not self.exitcode) or (abnormal and self.exitcode):\n self.wait_on_exit()\n\n if self._pydevd is not None:\n close_socket(self._pydevd)\n if self._client is not None:\n self._release_connection()\n\n def re_build_breakpoints(self):\n self.adapter.re_build_breakpoints()\n\n # internal methods\n\n def _signal_handler(self, signum, frame):\n for handle_signal in self._signal_handlers.get(signum, ()):\n handle_signal(signum, frame)\n\n def _atexit_handler(self):\n for handle_atexit in self._atexit_handlers:\n handle_atexit()\n\n def _add_atexit_handler(self):\n def handler():\n self._exiting_via_atexit_handler = True\n if not self._closed:\n self.close()\n if self._adapter is not None:\n # TODO: Do this in VSCodeMessageProcessor.close()?\n self._adapter._wait_for_server_thread()\n self._atexit_handlers.append(handler)\n\n def _add_signal_handlers(self):\n if platform.system() == 'Windows':\n return\n\n def handler(signum, frame):\n if not self._closed:\n self.close()\n sys.exit(0)\n self._signal_handlers[signal.SIGHUP].append(handler)\n\n def _release_connection(self):\n if self._adapter is not None:\n # TODO: This is not correct in the \"attach\" case.\n self._adapter.handle_pydevd_stopped(self.exitcode)\n self._adapter.close()\n close_socket(self._client)\n\n # internal methods for PyDevdSocket().\n\n def _handle_pydevd_message(self, cmdid, seq, text):\n if self._adapter is not None:\n self._adapter.on_pydevd_event(cmdid, seq, text)\n\n def _handle_pydevd_close(self):\n if self._closed:\n return\n self.close()\n\n def _getpeername(self):\n if self._client is None:\n raise NotImplementedError\n return self._client.getpeername()\n\n def _getsockname(self):\n if self._client is None:\n raise NotImplementedError\n return self._client.getsockname()\n\n # internal methods for VSCodeMessageProcessor\n\n def _handle_vsc_disconnect(self, kill=False):\n if not self._closed:\n self.close()\n if kill and self.killonclose and not self._exiting_via_atexit_handler:\n os.kill(os.getpid(), signal.SIGTERM)\n\n def _handle_vsc_close(self):\n if self._closed:\n return\n self.close()\n", "path": "ptvsd/daemon.py" } ]
diff --git a/ptvsd/daemon.py b/ptvsd/daemon.py index 7ab4622d5..6dc4681fc 100644 --- a/ptvsd/daemon.py +++ b/ptvsd/daemon.py @@ -168,6 +168,9 @@ def handler(): self._atexit_handlers.append(handler) def _add_signal_handlers(self): + if platform.system() == 'Windows': + return + def handler(signum, frame): if not self._closed: self.close()
opendatacube__datacube-core-262
Error reading rainfall grids ### Expected behaviour Return an xarray Dataset like the following: ```python <xarray.Dataset> Dimensions: (latitude: 1, longitude: 1, time: 366) Coordinates: * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ... * latitude (latitude) float64 -27.52 * longitude (longitude) float64 132.1 Data variables: rainfall (time, latitude, longitude) float32 0.0 0.0 7.44684e-13 0.0 ... Attributes: crs: EPSG:4326 ``` Data Cube, version 1.3.2 GDAL 2.1.3, released 2017/20/01 rasterio, version 1.0a8 And the conda environment at NCI is: /g/data/v10/public/modules/agdc-py3-env/20170427 ### Actual behaviour Fails hard with the first file: ```python Error opening source dataset: NETCDF:/g/data/rr5/agcd/0_05/rainfall/daily/2000/rr.2000010120000101.grid.nc:rain_day ``` And then continuing on in trying to assess the crs of the object which is None. ```python /g/data/v10/public/modules/agdc-py3/1.5.0/lib/python3.6/site-packages/datacube/storage/storage.py in _rasterio_crs_wkt(src) 62 if str(rasterio.__version__) >= '0.36.0': 63 def _rasterio_crs_wkt(src): ---> 64 return str(src.crs.wkt) 65 else: 66 def _rasterio_crs_wkt(src): AttributeError: 'NoneType' object has no attribute 'wkt' ``` ### Steps to reproduce the behaviour ```python import datacube dc = datacube.Datacube() rain = dc.load(product='bom_rainfall_grids', longitude=132.1, latitude=-27.5, time=('2000-1-1', '2001-1-1')) ``` ### Environment information * Which ``datacube --version`` are you using? Open Data Cube core, version 1.5.0 * What datacube deployment/environment are you running against? GDAL 2.2.1, released 2017/06/23 rasterio, version 1.0a9 The conda environment being used at NCI is: /g/data/v10/public/modules/agdc-py3-env/20170710
[ { "content": "# coding=utf-8\n\"\"\"\nCreate/store dataset data into storage units based on the provided storage mappings.\n\nImportant functions are:\n\n* :func:`reproject_and_fuse`\n* :func:`read_from_source`\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport logging\nimport math\nfrom contextlib import contextmanager\nfrom pathlib import Path\n\nfrom datacube.compat import urlparse, urljoin, url_parse_module\nfrom datacube.config import OPTIONS\nfrom datacube.model import Dataset\nfrom datacube.storage import netcdf_writer\nfrom datacube.utils import clamp, datetime_to_seconds_since_1970, DatacubeException, ignore_exceptions_if\nfrom datacube.utils import geometry\nfrom datacube.utils import is_url, uri_to_local_path\n\ntry:\n from yaml import CSafeDumper as SafeDumper\nexcept ImportError:\n from yaml import SafeDumper\nimport numpy\n\nfrom affine import Affine\nfrom datacube.compat import integer_types\nimport rasterio\n\ntry:\n from rasterio.warp import Resampling\nexcept ImportError:\n from rasterio.warp import RESAMPLING as Resampling\n\n_LOG = logging.getLogger(__name__)\n\nRESAMPLING_METHODS = {\n 'nearest': Resampling.nearest,\n 'cubic': Resampling.cubic,\n 'bilinear': Resampling.bilinear,\n 'cubic_spline': Resampling.cubic_spline,\n 'lanczos': Resampling.lanczos,\n 'average': Resampling.average,\n}\n\nassert str(rasterio.__version__) >= '0.34.0', \"rasterio version 0.34.0 or higher is required\"\nGDAL_NETCDF_DIM = ('NETCDF_DIM_'\n if str(rasterio.__gdal_version__) >= '1.10.0' else\n 'NETCDF_DIMENSION_')\n\n\ndef _rasterio_resampling_method(resampling):\n return RESAMPLING_METHODS[resampling.lower()]\n\n\nif str(rasterio.__version__) >= '0.36.0':\n def _rasterio_crs_wkt(src):\n return str(src.crs.wkt)\nelse:\n def _rasterio_crs_wkt(src):\n return str(src.crs_wkt)\n\nif str(rasterio.__version__) >= '1.0':\n def _rasterio_transform(src):\n return src.transform\nelse:\n def _rasterio_transform(src):\n return src.affine\n\n\ndef _calc_offsets_impl(off, scale, src_size, dst_size):\n assert scale >= 1-1e-5\n\n if off >= 0:\n write_off = 0\n else:\n write_off = math.ceil((-off-0.5)/scale)\n read_off = round((write_off+0.5)*scale-0.5+off) - round(0.5*(scale-1.0)) # assuming read_size/write_size ~= scale\n if read_off >= src_size:\n return 0, 0, 0, 0\n\n write_end = dst_size\n write_size = write_end-write_off\n read_end = read_off+round(write_size*scale)\n if read_end > src_size:\n # +0.5 below is a fudge that will return last row in more situations, but will change the scale more\n write_end = math.floor((src_size-off+0.5)/scale)\n write_size = write_end-write_off\n read_end = clamp(read_off+round(write_size*scale), read_off, src_size)\n read_size = read_end-read_off\n\n return int(read_off), int(write_off), int(read_size), int(write_size)\n\n\ndef _calc_offsets2(off, scale, src_size, dst_size):\n if scale < 0:\n r_off, write_off, read_size, write_size = _calc_offsets_impl(off + dst_size*scale, -scale, src_size, dst_size)\n return r_off, dst_size - write_size - write_off, read_size, write_size\n else:\n return _calc_offsets_impl(off, scale, src_size, dst_size)\n\n\ndef _read_decimated(array_transform, src, dest_shape):\n dy_dx = (array_transform.f, array_transform.c)\n sy_sx = (array_transform.e, array_transform.a)\n read, write, read_shape, write_shape = zip(*map(_calc_offsets2, dy_dx, sy_sx, src.shape, dest_shape))\n if all(write_shape):\n window = ((read[0], read[0] + read_shape[0]), (read[1], read[1] + read_shape[1]))\n tmp = src.read(window=window, out_shape=write_shape)\n scale = (read_shape[0]/write_shape[0] if sy_sx[0] > 0 else -read_shape[0]/write_shape[0],\n read_shape[1]/write_shape[1] if sy_sx[1] > 0 else -read_shape[1]/write_shape[1])\n offset = (read[0] + (0 if sy_sx[0] > 0 else read_shape[0]),\n read[1] + (0 if sy_sx[1] > 0 else read_shape[1]))\n transform = Affine(scale[1], 0, offset[1], 0, scale[0], offset[0])\n return tmp[::(-1 if sy_sx[0] < 0 else 1), ::(-1 if sy_sx[1] < 0 else 1)], write, transform\n return None, None, None\n\n\ndef _no_scale(affine, eps=1e-5):\n return abs(abs(affine.a) - 1.0) < eps and abs(abs(affine.e) - 1.0) < eps\n\n\ndef _no_fractional_translate(affine, eps=0.01):\n return abs(affine.c % 1.0) < eps and abs(affine.f % 1.0) < eps\n\n\ndef read_from_source(source, dest, dst_transform, dst_nodata, dst_projection, resampling):\n \"\"\"\n Read from `source` into `dest`, reprojecting if necessary.\n\n :param RasterioDataSource source: Data source\n :param numpy.ndarray dest: Data destination\n \"\"\"\n with source.open() as src:\n array_transform = ~src.transform * dst_transform\n # if the CRS is the same use decimated reads if possible (NN or 1:1 scaling)\n if src.crs == dst_projection and _no_scale(array_transform) and (resampling == Resampling.nearest or\n _no_fractional_translate(array_transform)):\n dest.fill(dst_nodata)\n tmp, offset, _ = _read_decimated(array_transform, src, dest.shape)\n if tmp is None:\n return\n dest = dest[offset[0]:offset[0] + tmp.shape[0], offset[1]:offset[1] + tmp.shape[1]]\n numpy.copyto(dest, tmp, where=(tmp != src.nodata))\n else:\n if dest.dtype == numpy.dtype('int8'):\n dest = dest.view(dtype='uint8')\n dst_nodata = dst_nodata.astype('uint8')\n src.reproject(dest,\n dst_transform=dst_transform,\n dst_crs=str(dst_projection),\n dst_nodata=dst_nodata,\n resampling=resampling,\n NUM_THREADS=OPTIONS['reproject_threads'])\n\n\ndef reproject_and_fuse(sources, destination, dst_transform, dst_projection, dst_nodata,\n resampling='nearest', fuse_func=None, skip_broken_datasets=False):\n \"\"\"\n Reproject and fuse `sources` into a 2D numpy array `destination`.\n\n :param List[BaseRasterDataSource] sources: Data sources to open and read from\n :param numpy.ndarray destination: ndarray of appropriate size to read data into\n :type resampling: str\n :type fuse_func: callable or None\n :param bool skip_broken_datasets: Carry on in the face of adversity and failing reads.\n \"\"\"\n assert len(destination.shape) == 2\n\n resampling = _rasterio_resampling_method(resampling)\n\n def copyto_fuser(dest, src):\n \"\"\"\n :type dest: numpy.ndarray\n :type src: numpy.ndarray\n \"\"\"\n numpy.copyto(dest, src, where=(dest == dst_nodata))\n\n fuse_func = fuse_func or copyto_fuser\n\n destination.fill(dst_nodata)\n if len(sources) == 0:\n return destination\n elif len(sources) == 1:\n with ignore_exceptions_if(skip_broken_datasets):\n read_from_source(sources[0], destination, dst_transform, dst_nodata, dst_projection, resampling)\n return destination\n else:\n # Muitiple sources, we need to fuse them together into a single array\n buffer_ = numpy.empty(destination.shape, dtype=destination.dtype)\n for source in sources:\n with ignore_exceptions_if(skip_broken_datasets):\n read_from_source(source, buffer_, dst_transform, dst_nodata, dst_projection, resampling)\n fuse_func(destination, buffer_)\n\n return destination\n\n\nclass BandDataSource(object):\n \"\"\"\n Wrapper for a :class:`rasterio.Band` object\n\n :type source: rasterio.Band\n \"\"\"\n def __init__(self, source, nodata=None):\n self.source = source\n if nodata is None:\n assert self.source.ds.nodatavals[0] is not None\n nodata = self.dtype.type(self.source.ds.nodatavals[0])\n self.nodata = nodata\n\n @property\n def crs(self):\n return geometry.CRS(_rasterio_crs_wkt(self.source.ds))\n\n @property\n def transform(self):\n return _rasterio_transform(self.source.ds)\n\n @property\n def dtype(self):\n return numpy.dtype(self.source.dtype)\n\n @property\n def shape(self):\n return self.source.shape\n\n def read(self, window=None, out_shape=None):\n \"\"\"Read data in the native format, returning a numpy array\n \"\"\"\n return self.source.ds.read(indexes=self.source.bidx, window=window, out_shape=out_shape)\n\n def reproject(self, dest, dst_transform, dst_crs, dst_nodata, resampling, **kwargs):\n return rasterio.warp.reproject(self.source,\n dest,\n src_nodata=self.nodata,\n dst_transform=dst_transform,\n dst_crs=str(dst_crs),\n dst_nodata=dst_nodata,\n resampling=resampling,\n **kwargs)\n\n\n# class NetCDFDataSource(object):\n# def __init__(self, dataset, variable, slab=None, nodata=None):\n# self.dataset = dataset\n# self.variable = self.dataset[variable]\n# self.slab = slab or {}\n# if nodata is None:\n# nodata = self.variable.getncattr('_FillValue')\n# self.nodata = nodata\n#\n# @property\n# def crs(self):\n# crs_var_name = self.variable.grid_mapping\n# crs_var = self.dataset[crs_var_name]\n# return geometry.CRS(crs_var.crs_wkt)\n#\n# @property\n# def transform(self):\n# dims = self.crs.dimensions\n# xres, xoff = data_resolution_and_offset(self.dataset[dims[1]])\n# yres, yoff = data_resolution_and_offset(self.dataset[dims[0]])\n# return Affine.translation(xoff, yoff) * Affine.scale(xres, yres)\n#\n# @property\n# def dtype(self):\n# return self.variable.dtype\n#\n# @property\n# def shape(self):\n# return self.variable.shape\n#\n# def read(self, window=None, out_shape=None):\n# data = self.variable\n# if window is None:\n# window = ((0, data.shape[0]), (0, data.shape[1]))\n# data_shape = (window[0][1]-window[0][0]), (window[1][1]-window[1][0])\n# if out_shape is None:\n# out_shape = data_shape\n# xidx = window[0][0] + ((\n# numpy.arange(out_shape[1])+0.5)*(data_shape[1]/out_shape[1])-0.5).round().astype('int')\n# yidx = window[1][0] + ((\n# numpy.arange(out_shape[0])+0.5)*(data_shape[0]/out_shape[0])-0.5).round().astype('int')\n# slab = {self.crs.dimensions[1]: xidx, self.crs.dimensions[0]: yidx}\n# slab.update(self.slab)\n# return data[tuple(slab[d] for d in self.variable.dimensions)]\n#\n# def reproject(self, dest, dst_transform, dst_crs, dst_nodata, resampling, **kwargs):\n# dst_poly = geometry.polygon_from_transform(dest.shape[1], dest.shape[0],\n# dst_transform, dst_crs).to_crs(self.crs)\n# src_poly = geometry.polygon_from_transform(self.shape[1], self.shape[0],\n# self.transform, self.crs)\n# bounds = dst_poly.intersection(src_poly)\n# geobox = geometry.GeoBox.from_geopolygon(bounds, (self.transform.e, self.transform.a), crs=self.crs)\n# tmp, _, tmp_transform = _read_decimated(~self.transform * geobox.affine, self, geobox.shape)\n#\n# return rasterio.warp.reproject(tmp,\n# dest,\n# src_transform=self.transform * tmp_transform,\n# src_crs=str(geobox.crs),\n# src_nodata=self.nodata,\n# dst_transform=dst_transform,\n# dst_crs=str(dst_crs),\n# dst_nodata=dst_nodata,\n# resampling=resampling,\n# **kwargs)\n\n\nclass OverrideBandDataSource(object):\n \"\"\"Wrapper for a rasterio.Band object that overrides nodata, crs and transform\n\n This is useful for files with malformed or missing properties.\n\n\n :type source: rasterio.Band\n \"\"\"\n def __init__(self, source, nodata, crs, transform):\n self.source = source\n self.nodata = nodata\n self.crs = crs\n self.transform = transform\n\n @property\n def dtype(self):\n return numpy.dtype(self.source.dtype)\n\n @property\n def shape(self):\n return self.source.shape\n\n def read(self, window=None, out_shape=None):\n \"\"\"Read data in the native format, returning a native array\n \"\"\"\n return self.source.ds.read(indexes=self.source.bidx, window=window, out_shape=out_shape)\n\n def reproject(self, dest, dst_transform, dst_crs, dst_nodata, resampling, **kwargs):\n source = self.read() # TODO: read only the part the we care about\n return rasterio.warp.reproject(source,\n dest,\n src_transform=self.transform,\n src_crs=str(self.crs),\n src_nodata=self.nodata,\n dst_transform=dst_transform,\n dst_crs=str(dst_crs),\n dst_nodata=dst_nodata,\n resampling=resampling,\n **kwargs)\n\n\nclass RasterioDataSource(object):\n \"\"\"\n Abstract class used by fuse_sources and :func:`read_from_source`\n\n \"\"\"\n def __init__(self, filename, nodata):\n self.filename = filename\n self.nodata = nodata\n\n def get_bandnumber(self, src):\n raise NotImplementedError()\n\n def get_transform(self, shape):\n raise NotImplementedError()\n\n def get_crs(self):\n raise NotImplementedError()\n\n @contextmanager\n def open(self):\n \"\"\"Context manager which returns a :class:`BandDataSource`\"\"\"\n try:\n _LOG.debug(\"opening %s\", self.filename)\n with rasterio.open(self.filename) as src:\n override = False\n\n transform = _rasterio_transform(src)\n if transform.is_identity:\n override = True\n transform = self.get_transform(src.shape)\n\n try:\n crs = geometry.CRS(_rasterio_crs_wkt(src))\n except ValueError:\n override = True\n crs = self.get_crs()\n\n # The 1.0 onwards release of rasterio has a bug that means it\n # cannot read multiband data into a numpy array during reprojection\n # We override it here to force the reading and reprojection into separate steps\n # TODO: Remove when rasterio bug fixed\n bandnumber = self.get_bandnumber(src)\n if bandnumber > 1 and str(rasterio.__version__) >= '1.0':\n override = True\n\n band = rasterio.band(src, bandnumber)\n nodata = numpy.dtype(band.dtype).type(src.nodatavals[0] if src.nodatavals[0] is not None\n else self.nodata)\n\n if override:\n yield OverrideBandDataSource(band, nodata=nodata, crs=crs, transform=transform)\n else:\n yield BandDataSource(band, nodata=nodata)\n\n except Exception as e:\n _LOG.error(\"Error opening source dataset: %s\", self.filename)\n raise e\n\n\nclass RasterFileDataSource(RasterioDataSource):\n def __init__(self, filename, bandnumber, nodata=None, crs=None, transform=None):\n super(RasterFileDataSource, self).__init__(filename, nodata)\n self.bandnumber = bandnumber\n self.crs = crs\n self.transform = transform\n\n def get_bandnumber(self, src):\n return self.bandnumber\n\n def get_transform(self, shape):\n if self.transform is None:\n raise RuntimeError('No transform in the data and no fallback')\n return self.transform\n\n def get_crs(self):\n if self.crs is None:\n raise RuntimeError('No CRS in the data and no fallback')\n return self.crs\n\n\ndef register_scheme(*schemes):\n \"\"\"\n Register additional uri schemes as supporting relative offsets (etc), so that band/measurement paths can be\n calculated relative to the base uri.\n \"\"\"\n url_parse_module.uses_netloc.extend(schemes)\n url_parse_module.uses_relative.extend(schemes)\n url_parse_module.uses_params.extend(schemes)\n\n# Not recognised by python by default. Doctests below will fail without it.\nregister_scheme('s3')\n\n\ndef _resolve_url(base_url, path):\n \"\"\"\n If path is a URL or an absolute path return URL\n If path is a relative path return base_url joined with path\n\n >>> _resolve_url('file:///foo/abc', 'bar')\n 'file:///foo/bar'\n >>> _resolve_url('file:///foo/abc', 'file:///bar')\n 'file:///bar'\n >>> _resolve_url('file:///foo/abc', None)\n 'file:///foo/abc'\n >>> _resolve_url('file:///foo/abc', '/bar')\n 'file:///bar'\n >>> _resolve_url('http://foo.com/abc/odc-metadata.yaml', 'band-5.tif')\n 'http://foo.com/abc/band-5.tif'\n >>> _resolve_url('s3://foo.com/abc/odc-metadata.yaml', 'band-5.tif')\n 's3://foo.com/abc/band-5.tif'\n >>> _resolve_url('s3://foo.com/abc/odc-metadata.yaml?something', 'band-5.tif')\n 's3://foo.com/abc/band-5.tif'\n \"\"\"\n if path:\n if is_url(path):\n url_str = path\n elif Path(path).is_absolute():\n url_str = Path(path).as_uri()\n else:\n url_str = urljoin(base_url, path)\n else:\n url_str = base_url\n return url_str\n\n\ndef _url2rasterio(url_str, fmt, layer):\n \"\"\"\n turn URL into a string that could be passed to raterio.open\n \"\"\"\n url = urlparse(url_str)\n assert url.scheme, \"Expecting URL with scheme here\"\n\n # if format is NETCDF of HDF need to pass NETCDF:path:band as filename to rasterio/GDAL\n for nasty_format in ('netcdf', 'hdf'):\n if nasty_format in fmt.lower():\n if url.scheme != 'file':\n raise RuntimeError(\"Can't access %s over %s\" % (fmt, url.scheme))\n filename = '%s:%s:%s' % (fmt, uri_to_local_path(url_str), layer)\n return filename\n\n if url.scheme and url.scheme != 'file':\n return url_str\n\n # if local path strip scheme and other gunk\n return str(uri_to_local_path(url_str))\n\n\ndef _choose_location(dataset):\n # type: (Dataset) -> str\n\n # If there's a local (filesystem) URI, prefer it.\n local_uri = dataset.local_uri\n if local_uri:\n return local_uri\n\n uris = dataset.uris\n if not uris:\n # Location-less datasets should have been filtered already.\n raise RuntimeError(\"No recorded location for dataset {}\".format(dataset))\n\n # Newest location first, use it.\n # We may want more nuanced selection in the future.\n return uris[0]\n\n\nclass DatasetSource(RasterioDataSource):\n \"\"\"Data source for reading from a Data Cube Dataset\"\"\"\n\n def __init__(self, dataset, measurement_id):\n \"\"\"\n Initialise for reading from a Data Cube Dataset.\n\n :param Dataset dataset: dataset to read from\n :param str measurement_id: measurement to read. a single 'band' or 'slice'\n \"\"\"\n self._dataset = dataset\n self._measurement = dataset.measurements[measurement_id]\n url = _resolve_url(_choose_location(dataset), self._measurement['path'])\n filename = _url2rasterio(url, dataset.format, self._measurement.get('layer'))\n nodata = dataset.type.measurements[measurement_id].get('nodata')\n super(DatasetSource, self).__init__(filename, nodata=nodata)\n\n def get_bandnumber(self, src):\n\n # If `band` property is set to an integer it overrides any other logic\n band = self._measurement.get('band')\n if band is not None:\n if isinstance(band, integer_types):\n return band\n else:\n _LOG.warning('Expected \"band\" property to be of integer type')\n\n if 'netcdf' not in self._dataset.format.lower():\n layer_id = self._measurement.get('layer', 1)\n return layer_id if isinstance(layer_id, integer_types) else 1\n\n tag_name = GDAL_NETCDF_DIM + 'time'\n if tag_name not in src.tags(1): # TODO: support time-less datasets properly\n return 1\n\n time = self._dataset.center_time\n sec_since_1970 = datetime_to_seconds_since_1970(time)\n\n idx = 0\n dist = float('+inf')\n for i in range(1, src.count + 1):\n v = float(src.tags(i)[tag_name])\n if abs(sec_since_1970 - v) < dist:\n idx = i\n dist = abs(sec_since_1970 - v)\n return idx\n\n def get_transform(self, shape):\n return self._dataset.transform * Affine.scale(1/shape[1], 1/shape[0])\n\n def get_crs(self):\n return self._dataset.crs\n\n\ndef create_netcdf_storage_unit(filename,\n crs, coordinates, variables, variable_params, global_attributes=None,\n netcdfparams=None):\n \"\"\"\n Create a NetCDF file on disk.\n\n :param pathlib.Path filename: filename to write to\n :param datacube.utils.geometry.CRS crs: Datacube CRS object defining the spatial projection\n :param dict coordinates: Dict of named `datacube.model.Coordinate`s to create\n :param dict variables: Dict of named `datacube.model.Variable`s to create\n :param dict variable_params:\n Dict of dicts, with keys matching variable names, of extra parameters for variables\n :param dict global_attributes: named global attributes to add to output file\n :param dict netcdfparams: Extra parameters to use when creating netcdf file\n :return: open netCDF4.Dataset object, ready for writing to\n \"\"\"\n filename = Path(filename)\n if filename.exists():\n raise RuntimeError('Storage Unit already exists: %s' % filename)\n\n try:\n filename.parent.mkdir(parents=True)\n except OSError:\n pass\n\n _LOG.info('Creating storage unit: %s', filename)\n\n nco = netcdf_writer.create_netcdf(str(filename), **(netcdfparams or {}))\n\n for name, coord in coordinates.items():\n netcdf_writer.create_coordinate(nco, name, coord.values, coord.units)\n\n netcdf_writer.create_grid_mapping_variable(nco, crs)\n\n for name, variable in variables.items():\n set_crs = all(dim in variable.dims for dim in crs.dimensions)\n var_params = variable_params.get(name, {})\n data_var = netcdf_writer.create_variable(nco, name, variable, set_crs=set_crs, **var_params)\n\n for key, value in var_params.get('attrs', {}).items():\n setattr(data_var, key, value)\n\n for key, value in (global_attributes or {}).items():\n setattr(nco, key, value)\n\n return nco\n\n\ndef write_dataset_to_netcdf(dataset, filename, global_attributes=None, variable_params=None,\n netcdfparams=None):\n \"\"\"\n Write a Data Cube style xarray Dataset to a NetCDF file\n\n Requires a spatial Dataset, with attached coordinates and global crs attribute.\n\n :param `xarray.Dataset` dataset:\n :param filename: Output filename\n :param global_attributes: Global file attributes. dict of attr_name: attr_value\n :param variable_params: dict of variable_name: {param_name: param_value, [...]}\n Allows setting storage and compression options per variable.\n See the `netCDF4.Dataset.createVariable` for available\n parameters.\n :param netcdfparams: Optional params affecting netCDF file creation\n \"\"\"\n global_attributes = global_attributes or {}\n variable_params = variable_params or {}\n filename = Path(filename)\n\n if not dataset.data_vars.keys():\n raise DatacubeException('Cannot save empty dataset to disk.')\n\n if not hasattr(dataset, 'crs'):\n raise DatacubeException('Dataset does not contain CRS, cannot write to NetCDF file.')\n\n nco = create_netcdf_storage_unit(filename,\n dataset.crs,\n dataset.coords,\n dataset.data_vars,\n variable_params,\n global_attributes,\n netcdfparams)\n\n for name, variable in dataset.data_vars.items():\n nco[name][:] = netcdf_writer.netcdfy_data(variable.values)\n\n nco.close()\n", "path": "datacube/storage/storage.py" } ]
[ { "content": "# coding=utf-8\n\"\"\"\nCreate/store dataset data into storage units based on the provided storage mappings.\n\nImportant functions are:\n\n* :func:`reproject_and_fuse`\n* :func:`read_from_source`\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport logging\nimport math\nfrom contextlib import contextmanager\nfrom pathlib import Path\n\nfrom datacube.compat import urlparse, urljoin, url_parse_module\nfrom datacube.config import OPTIONS\nfrom datacube.model import Dataset\nfrom datacube.storage import netcdf_writer\nfrom datacube.utils import clamp, datetime_to_seconds_since_1970, DatacubeException, ignore_exceptions_if\nfrom datacube.utils import geometry\nfrom datacube.utils import is_url, uri_to_local_path\n\ntry:\n from yaml import CSafeDumper as SafeDumper\nexcept ImportError:\n from yaml import SafeDumper\nimport numpy\n\nfrom affine import Affine\nfrom datacube.compat import integer_types\nimport rasterio\n\ntry:\n from rasterio.warp import Resampling\nexcept ImportError:\n from rasterio.warp import RESAMPLING as Resampling\n\n_LOG = logging.getLogger(__name__)\n\nRESAMPLING_METHODS = {\n 'nearest': Resampling.nearest,\n 'cubic': Resampling.cubic,\n 'bilinear': Resampling.bilinear,\n 'cubic_spline': Resampling.cubic_spline,\n 'lanczos': Resampling.lanczos,\n 'average': Resampling.average,\n}\n\nassert str(rasterio.__version__) >= '0.34.0', \"rasterio version 0.34.0 or higher is required\"\nGDAL_NETCDF_DIM = ('NETCDF_DIM_'\n if str(rasterio.__gdal_version__) >= '1.10.0' else\n 'NETCDF_DIMENSION_')\n\n\ndef _rasterio_resampling_method(resampling):\n return RESAMPLING_METHODS[resampling.lower()]\n\n\nif str(rasterio.__version__) >= '0.36.0':\n def _rasterio_crs_wkt(src):\n if src.crs:\n return str(src.crs.wkt)\n else:\n return ''\nelse:\n def _rasterio_crs_wkt(src):\n return str(src.crs_wkt)\n\nif str(rasterio.__version__) >= '1.0':\n def _rasterio_transform(src):\n return src.transform\nelse:\n def _rasterio_transform(src):\n return src.affine\n\n\ndef _calc_offsets_impl(off, scale, src_size, dst_size):\n assert scale >= 1-1e-5\n\n if off >= 0:\n write_off = 0\n else:\n write_off = math.ceil((-off-0.5)/scale)\n read_off = round((write_off+0.5)*scale-0.5+off) - round(0.5*(scale-1.0)) # assuming read_size/write_size ~= scale\n if read_off >= src_size:\n return 0, 0, 0, 0\n\n write_end = dst_size\n write_size = write_end-write_off\n read_end = read_off+round(write_size*scale)\n if read_end > src_size:\n # +0.5 below is a fudge that will return last row in more situations, but will change the scale more\n write_end = math.floor((src_size-off+0.5)/scale)\n write_size = write_end-write_off\n read_end = clamp(read_off+round(write_size*scale), read_off, src_size)\n read_size = read_end-read_off\n\n return int(read_off), int(write_off), int(read_size), int(write_size)\n\n\ndef _calc_offsets2(off, scale, src_size, dst_size):\n if scale < 0:\n r_off, write_off, read_size, write_size = _calc_offsets_impl(off + dst_size*scale, -scale, src_size, dst_size)\n return r_off, dst_size - write_size - write_off, read_size, write_size\n else:\n return _calc_offsets_impl(off, scale, src_size, dst_size)\n\n\ndef _read_decimated(array_transform, src, dest_shape):\n dy_dx = (array_transform.f, array_transform.c)\n sy_sx = (array_transform.e, array_transform.a)\n read, write, read_shape, write_shape = zip(*map(_calc_offsets2, dy_dx, sy_sx, src.shape, dest_shape))\n if all(write_shape):\n window = ((read[0], read[0] + read_shape[0]), (read[1], read[1] + read_shape[1]))\n tmp = src.read(window=window, out_shape=write_shape)\n scale = (read_shape[0]/write_shape[0] if sy_sx[0] > 0 else -read_shape[0]/write_shape[0],\n read_shape[1]/write_shape[1] if sy_sx[1] > 0 else -read_shape[1]/write_shape[1])\n offset = (read[0] + (0 if sy_sx[0] > 0 else read_shape[0]),\n read[1] + (0 if sy_sx[1] > 0 else read_shape[1]))\n transform = Affine(scale[1], 0, offset[1], 0, scale[0], offset[0])\n return tmp[::(-1 if sy_sx[0] < 0 else 1), ::(-1 if sy_sx[1] < 0 else 1)], write, transform\n return None, None, None\n\n\ndef _no_scale(affine, eps=1e-5):\n return abs(abs(affine.a) - 1.0) < eps and abs(abs(affine.e) - 1.0) < eps\n\n\ndef _no_fractional_translate(affine, eps=0.01):\n return abs(affine.c % 1.0) < eps and abs(affine.f % 1.0) < eps\n\n\ndef read_from_source(source, dest, dst_transform, dst_nodata, dst_projection, resampling):\n \"\"\"\n Read from `source` into `dest`, reprojecting if necessary.\n\n :param RasterioDataSource source: Data source\n :param numpy.ndarray dest: Data destination\n \"\"\"\n with source.open() as src:\n array_transform = ~src.transform * dst_transform\n # if the CRS is the same use decimated reads if possible (NN or 1:1 scaling)\n if src.crs == dst_projection and _no_scale(array_transform) and (resampling == Resampling.nearest or\n _no_fractional_translate(array_transform)):\n dest.fill(dst_nodata)\n tmp, offset, _ = _read_decimated(array_transform, src, dest.shape)\n if tmp is None:\n return\n dest = dest[offset[0]:offset[0] + tmp.shape[0], offset[1]:offset[1] + tmp.shape[1]]\n numpy.copyto(dest, tmp, where=(tmp != src.nodata))\n else:\n if dest.dtype == numpy.dtype('int8'):\n dest = dest.view(dtype='uint8')\n dst_nodata = dst_nodata.astype('uint8')\n src.reproject(dest,\n dst_transform=dst_transform,\n dst_crs=str(dst_projection),\n dst_nodata=dst_nodata,\n resampling=resampling,\n NUM_THREADS=OPTIONS['reproject_threads'])\n\n\ndef reproject_and_fuse(sources, destination, dst_transform, dst_projection, dst_nodata,\n resampling='nearest', fuse_func=None, skip_broken_datasets=False):\n \"\"\"\n Reproject and fuse `sources` into a 2D numpy array `destination`.\n\n :param List[BaseRasterDataSource] sources: Data sources to open and read from\n :param numpy.ndarray destination: ndarray of appropriate size to read data into\n :type resampling: str\n :type fuse_func: callable or None\n :param bool skip_broken_datasets: Carry on in the face of adversity and failing reads.\n \"\"\"\n assert len(destination.shape) == 2\n\n resampling = _rasterio_resampling_method(resampling)\n\n def copyto_fuser(dest, src):\n \"\"\"\n :type dest: numpy.ndarray\n :type src: numpy.ndarray\n \"\"\"\n numpy.copyto(dest, src, where=(dest == dst_nodata))\n\n fuse_func = fuse_func or copyto_fuser\n\n destination.fill(dst_nodata)\n if len(sources) == 0:\n return destination\n elif len(sources) == 1:\n with ignore_exceptions_if(skip_broken_datasets):\n read_from_source(sources[0], destination, dst_transform, dst_nodata, dst_projection, resampling)\n return destination\n else:\n # Muitiple sources, we need to fuse them together into a single array\n buffer_ = numpy.empty(destination.shape, dtype=destination.dtype)\n for source in sources:\n with ignore_exceptions_if(skip_broken_datasets):\n read_from_source(source, buffer_, dst_transform, dst_nodata, dst_projection, resampling)\n fuse_func(destination, buffer_)\n\n return destination\n\n\nclass BandDataSource(object):\n \"\"\"\n Wrapper for a :class:`rasterio.Band` object\n\n :type source: rasterio.Band\n \"\"\"\n def __init__(self, source, nodata=None):\n self.source = source\n if nodata is None:\n assert self.source.ds.nodatavals[0] is not None\n nodata = self.dtype.type(self.source.ds.nodatavals[0])\n self.nodata = nodata\n\n @property\n def crs(self):\n return geometry.CRS(_rasterio_crs_wkt(self.source.ds))\n\n @property\n def transform(self):\n return _rasterio_transform(self.source.ds)\n\n @property\n def dtype(self):\n return numpy.dtype(self.source.dtype)\n\n @property\n def shape(self):\n return self.source.shape\n\n def read(self, window=None, out_shape=None):\n \"\"\"Read data in the native format, returning a numpy array\n \"\"\"\n return self.source.ds.read(indexes=self.source.bidx, window=window, out_shape=out_shape)\n\n def reproject(self, dest, dst_transform, dst_crs, dst_nodata, resampling, **kwargs):\n return rasterio.warp.reproject(self.source,\n dest,\n src_nodata=self.nodata,\n dst_transform=dst_transform,\n dst_crs=str(dst_crs),\n dst_nodata=dst_nodata,\n resampling=resampling,\n **kwargs)\n\n\n# class NetCDFDataSource(object):\n# def __init__(self, dataset, variable, slab=None, nodata=None):\n# self.dataset = dataset\n# self.variable = self.dataset[variable]\n# self.slab = slab or {}\n# if nodata is None:\n# nodata = self.variable.getncattr('_FillValue')\n# self.nodata = nodata\n#\n# @property\n# def crs(self):\n# crs_var_name = self.variable.grid_mapping\n# crs_var = self.dataset[crs_var_name]\n# return geometry.CRS(crs_var.crs_wkt)\n#\n# @property\n# def transform(self):\n# dims = self.crs.dimensions\n# xres, xoff = data_resolution_and_offset(self.dataset[dims[1]])\n# yres, yoff = data_resolution_and_offset(self.dataset[dims[0]])\n# return Affine.translation(xoff, yoff) * Affine.scale(xres, yres)\n#\n# @property\n# def dtype(self):\n# return self.variable.dtype\n#\n# @property\n# def shape(self):\n# return self.variable.shape\n#\n# def read(self, window=None, out_shape=None):\n# data = self.variable\n# if window is None:\n# window = ((0, data.shape[0]), (0, data.shape[1]))\n# data_shape = (window[0][1]-window[0][0]), (window[1][1]-window[1][0])\n# if out_shape is None:\n# out_shape = data_shape\n# xidx = window[0][0] + ((\n# numpy.arange(out_shape[1])+0.5)*(data_shape[1]/out_shape[1])-0.5).round().astype('int')\n# yidx = window[1][0] + ((\n# numpy.arange(out_shape[0])+0.5)*(data_shape[0]/out_shape[0])-0.5).round().astype('int')\n# slab = {self.crs.dimensions[1]: xidx, self.crs.dimensions[0]: yidx}\n# slab.update(self.slab)\n# return data[tuple(slab[d] for d in self.variable.dimensions)]\n#\n# def reproject(self, dest, dst_transform, dst_crs, dst_nodata, resampling, **kwargs):\n# dst_poly = geometry.polygon_from_transform(dest.shape[1], dest.shape[0],\n# dst_transform, dst_crs).to_crs(self.crs)\n# src_poly = geometry.polygon_from_transform(self.shape[1], self.shape[0],\n# self.transform, self.crs)\n# bounds = dst_poly.intersection(src_poly)\n# geobox = geometry.GeoBox.from_geopolygon(bounds, (self.transform.e, self.transform.a), crs=self.crs)\n# tmp, _, tmp_transform = _read_decimated(~self.transform * geobox.affine, self, geobox.shape)\n#\n# return rasterio.warp.reproject(tmp,\n# dest,\n# src_transform=self.transform * tmp_transform,\n# src_crs=str(geobox.crs),\n# src_nodata=self.nodata,\n# dst_transform=dst_transform,\n# dst_crs=str(dst_crs),\n# dst_nodata=dst_nodata,\n# resampling=resampling,\n# **kwargs)\n\n\nclass OverrideBandDataSource(object):\n \"\"\"Wrapper for a rasterio.Band object that overrides nodata, crs and transform\n\n This is useful for files with malformed or missing properties.\n\n\n :type source: rasterio.Band\n \"\"\"\n def __init__(self, source, nodata, crs, transform):\n self.source = source\n self.nodata = nodata\n self.crs = crs\n self.transform = transform\n\n @property\n def dtype(self):\n return numpy.dtype(self.source.dtype)\n\n @property\n def shape(self):\n return self.source.shape\n\n def read(self, window=None, out_shape=None):\n \"\"\"Read data in the native format, returning a native array\n \"\"\"\n return self.source.ds.read(indexes=self.source.bidx, window=window, out_shape=out_shape)\n\n def reproject(self, dest, dst_transform, dst_crs, dst_nodata, resampling, **kwargs):\n source = self.read() # TODO: read only the part the we care about\n return rasterio.warp.reproject(source,\n dest,\n src_transform=self.transform,\n src_crs=str(self.crs),\n src_nodata=self.nodata,\n dst_transform=dst_transform,\n dst_crs=str(dst_crs),\n dst_nodata=dst_nodata,\n resampling=resampling,\n **kwargs)\n\n\nclass RasterioDataSource(object):\n \"\"\"\n Abstract class used by fuse_sources and :func:`read_from_source`\n\n \"\"\"\n def __init__(self, filename, nodata):\n self.filename = filename\n self.nodata = nodata\n\n def get_bandnumber(self, src):\n raise NotImplementedError()\n\n def get_transform(self, shape):\n raise NotImplementedError()\n\n def get_crs(self):\n raise NotImplementedError()\n\n @contextmanager\n def open(self):\n \"\"\"Context manager which returns a :class:`BandDataSource`\"\"\"\n try:\n _LOG.debug(\"opening %s\", self.filename)\n with rasterio.open(self.filename) as src:\n override = False\n\n transform = _rasterio_transform(src)\n if transform.is_identity:\n override = True\n transform = self.get_transform(src.shape)\n\n try:\n crs = geometry.CRS(_rasterio_crs_wkt(src))\n except ValueError:\n override = True\n crs = self.get_crs()\n\n # The 1.0 onwards release of rasterio has a bug that means it\n # cannot read multiband data into a numpy array during reprojection\n # We override it here to force the reading and reprojection into separate steps\n # TODO: Remove when rasterio bug fixed\n bandnumber = self.get_bandnumber(src)\n if bandnumber > 1 and str(rasterio.__version__) >= '1.0':\n override = True\n\n band = rasterio.band(src, bandnumber)\n nodata = numpy.dtype(band.dtype).type(src.nodatavals[0] if src.nodatavals[0] is not None\n else self.nodata)\n\n if override:\n yield OverrideBandDataSource(band, nodata=nodata, crs=crs, transform=transform)\n else:\n yield BandDataSource(band, nodata=nodata)\n\n except Exception as e:\n _LOG.error(\"Error opening source dataset: %s\", self.filename)\n raise e\n\n\nclass RasterFileDataSource(RasterioDataSource):\n def __init__(self, filename, bandnumber, nodata=None, crs=None, transform=None):\n super(RasterFileDataSource, self).__init__(filename, nodata)\n self.bandnumber = bandnumber\n self.crs = crs\n self.transform = transform\n\n def get_bandnumber(self, src):\n return self.bandnumber\n\n def get_transform(self, shape):\n if self.transform is None:\n raise RuntimeError('No transform in the data and no fallback')\n return self.transform\n\n def get_crs(self):\n if self.crs is None:\n raise RuntimeError('No CRS in the data and no fallback')\n return self.crs\n\n\ndef register_scheme(*schemes):\n \"\"\"\n Register additional uri schemes as supporting relative offsets (etc), so that band/measurement paths can be\n calculated relative to the base uri.\n \"\"\"\n url_parse_module.uses_netloc.extend(schemes)\n url_parse_module.uses_relative.extend(schemes)\n url_parse_module.uses_params.extend(schemes)\n\n# Not recognised by python by default. Doctests below will fail without it.\nregister_scheme('s3')\n\n\ndef _resolve_url(base_url, path):\n \"\"\"\n If path is a URL or an absolute path return URL\n If path is a relative path return base_url joined with path\n\n >>> _resolve_url('file:///foo/abc', 'bar')\n 'file:///foo/bar'\n >>> _resolve_url('file:///foo/abc', 'file:///bar')\n 'file:///bar'\n >>> _resolve_url('file:///foo/abc', None)\n 'file:///foo/abc'\n >>> _resolve_url('file:///foo/abc', '/bar')\n 'file:///bar'\n >>> _resolve_url('http://foo.com/abc/odc-metadata.yaml', 'band-5.tif')\n 'http://foo.com/abc/band-5.tif'\n >>> _resolve_url('s3://foo.com/abc/odc-metadata.yaml', 'band-5.tif')\n 's3://foo.com/abc/band-5.tif'\n >>> _resolve_url('s3://foo.com/abc/odc-metadata.yaml?something', 'band-5.tif')\n 's3://foo.com/abc/band-5.tif'\n \"\"\"\n if path:\n if is_url(path):\n url_str = path\n elif Path(path).is_absolute():\n url_str = Path(path).as_uri()\n else:\n url_str = urljoin(base_url, path)\n else:\n url_str = base_url\n return url_str\n\n\ndef _url2rasterio(url_str, fmt, layer):\n \"\"\"\n turn URL into a string that could be passed to raterio.open\n \"\"\"\n url = urlparse(url_str)\n assert url.scheme, \"Expecting URL with scheme here\"\n\n # if format is NETCDF of HDF need to pass NETCDF:path:band as filename to rasterio/GDAL\n for nasty_format in ('netcdf', 'hdf'):\n if nasty_format in fmt.lower():\n if url.scheme != 'file':\n raise RuntimeError(\"Can't access %s over %s\" % (fmt, url.scheme))\n filename = '%s:%s:%s' % (fmt, uri_to_local_path(url_str), layer)\n return filename\n\n if url.scheme and url.scheme != 'file':\n return url_str\n\n # if local path strip scheme and other gunk\n return str(uri_to_local_path(url_str))\n\n\ndef _choose_location(dataset):\n # type: (Dataset) -> str\n\n # If there's a local (filesystem) URI, prefer it.\n local_uri = dataset.local_uri\n if local_uri:\n return local_uri\n\n uris = dataset.uris\n if not uris:\n # Location-less datasets should have been filtered already.\n raise RuntimeError(\"No recorded location for dataset {}\".format(dataset))\n\n # Newest location first, use it.\n # We may want more nuanced selection in the future.\n return uris[0]\n\n\nclass DatasetSource(RasterioDataSource):\n \"\"\"Data source for reading from a Data Cube Dataset\"\"\"\n\n def __init__(self, dataset, measurement_id):\n \"\"\"\n Initialise for reading from a Data Cube Dataset.\n\n :param Dataset dataset: dataset to read from\n :param str measurement_id: measurement to read. a single 'band' or 'slice'\n \"\"\"\n self._dataset = dataset\n self._measurement = dataset.measurements[measurement_id]\n url = _resolve_url(_choose_location(dataset), self._measurement['path'])\n filename = _url2rasterio(url, dataset.format, self._measurement.get('layer'))\n nodata = dataset.type.measurements[measurement_id].get('nodata')\n super(DatasetSource, self).__init__(filename, nodata=nodata)\n\n def get_bandnumber(self, src):\n\n # If `band` property is set to an integer it overrides any other logic\n band = self._measurement.get('band')\n if band is not None:\n if isinstance(band, integer_types):\n return band\n else:\n _LOG.warning('Expected \"band\" property to be of integer type')\n\n if 'netcdf' not in self._dataset.format.lower():\n layer_id = self._measurement.get('layer', 1)\n return layer_id if isinstance(layer_id, integer_types) else 1\n\n tag_name = GDAL_NETCDF_DIM + 'time'\n if tag_name not in src.tags(1): # TODO: support time-less datasets properly\n return 1\n\n time = self._dataset.center_time\n sec_since_1970 = datetime_to_seconds_since_1970(time)\n\n idx = 0\n dist = float('+inf')\n for i in range(1, src.count + 1):\n v = float(src.tags(i)[tag_name])\n if abs(sec_since_1970 - v) < dist:\n idx = i\n dist = abs(sec_since_1970 - v)\n return idx\n\n def get_transform(self, shape):\n return self._dataset.transform * Affine.scale(1/shape[1], 1/shape[0])\n\n def get_crs(self):\n return self._dataset.crs\n\n\ndef create_netcdf_storage_unit(filename,\n crs, coordinates, variables, variable_params, global_attributes=None,\n netcdfparams=None):\n \"\"\"\n Create a NetCDF file on disk.\n\n :param pathlib.Path filename: filename to write to\n :param datacube.utils.geometry.CRS crs: Datacube CRS object defining the spatial projection\n :param dict coordinates: Dict of named `datacube.model.Coordinate`s to create\n :param dict variables: Dict of named `datacube.model.Variable`s to create\n :param dict variable_params:\n Dict of dicts, with keys matching variable names, of extra parameters for variables\n :param dict global_attributes: named global attributes to add to output file\n :param dict netcdfparams: Extra parameters to use when creating netcdf file\n :return: open netCDF4.Dataset object, ready for writing to\n \"\"\"\n filename = Path(filename)\n if filename.exists():\n raise RuntimeError('Storage Unit already exists: %s' % filename)\n\n try:\n filename.parent.mkdir(parents=True)\n except OSError:\n pass\n\n _LOG.info('Creating storage unit: %s', filename)\n\n nco = netcdf_writer.create_netcdf(str(filename), **(netcdfparams or {}))\n\n for name, coord in coordinates.items():\n netcdf_writer.create_coordinate(nco, name, coord.values, coord.units)\n\n netcdf_writer.create_grid_mapping_variable(nco, crs)\n\n for name, variable in variables.items():\n set_crs = all(dim in variable.dims for dim in crs.dimensions)\n var_params = variable_params.get(name, {})\n data_var = netcdf_writer.create_variable(nco, name, variable, set_crs=set_crs, **var_params)\n\n for key, value in var_params.get('attrs', {}).items():\n setattr(data_var, key, value)\n\n for key, value in (global_attributes or {}).items():\n setattr(nco, key, value)\n\n return nco\n\n\ndef write_dataset_to_netcdf(dataset, filename, global_attributes=None, variable_params=None,\n netcdfparams=None):\n \"\"\"\n Write a Data Cube style xarray Dataset to a NetCDF file\n\n Requires a spatial Dataset, with attached coordinates and global crs attribute.\n\n :param `xarray.Dataset` dataset:\n :param filename: Output filename\n :param global_attributes: Global file attributes. dict of attr_name: attr_value\n :param variable_params: dict of variable_name: {param_name: param_value, [...]}\n Allows setting storage and compression options per variable.\n See the `netCDF4.Dataset.createVariable` for available\n parameters.\n :param netcdfparams: Optional params affecting netCDF file creation\n \"\"\"\n global_attributes = global_attributes or {}\n variable_params = variable_params or {}\n filename = Path(filename)\n\n if not dataset.data_vars.keys():\n raise DatacubeException('Cannot save empty dataset to disk.')\n\n if not hasattr(dataset, 'crs'):\n raise DatacubeException('Dataset does not contain CRS, cannot write to NetCDF file.')\n\n nco = create_netcdf_storage_unit(filename,\n dataset.crs,\n dataset.coords,\n dataset.data_vars,\n variable_params,\n global_attributes,\n netcdfparams)\n\n for name, variable in dataset.data_vars.items():\n nco[name][:] = netcdf_writer.netcdfy_data(variable.values)\n\n nco.close()\n", "path": "datacube/storage/storage.py" } ]
diff --git a/datacube/storage/storage.py b/datacube/storage/storage.py index 5c19d2d587..f525c9e612 100644 --- a/datacube/storage/storage.py +++ b/datacube/storage/storage.py @@ -61,7 +61,10 @@ def _rasterio_resampling_method(resampling): if str(rasterio.__version__) >= '0.36.0': def _rasterio_crs_wkt(src): - return str(src.crs.wkt) + if src.crs: + return str(src.crs.wkt) + else: + return '' else: def _rasterio_crs_wkt(src): return str(src.crs_wkt) diff --git a/docs/about/whats_new.rst b/docs/about/whats_new.rst index 70ac62026f..527b1aa025 100644 --- a/docs/about/whats_new.rst +++ b/docs/about/whats_new.rst @@ -5,8 +5,16 @@ What's New ========== -v1.5.0 (????) -------------- +v1.5.1 Purpler Unicorn (13 July 2017) +------------------------------------- + + - Fix bug #261. Unable to load Australian Rainfall Grid Data. This was as a + result of the CRS/Transformation override functionality being broken when + using the latest `rasterio` version `1.0a9` + + +v1.5.0 Purple Unicorn (9 July 2017) +----------------------------------- Usability Improvements ~~~~~~~~~~~~~~~~~~~~~~ diff --git a/tests/conftest.py b/tests/conftest.py index e35c66be13..55feb0c83c 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -20,6 +20,12 @@ def example_gdal_path(data_folder): return str(os.path.join(data_folder, 'sample_tile_151_-29.tif')) [email protected] +def no_crs_gdal_path(data_folder): + """Return the pathname of a GDAL file that doesn't contain a valid CRS.""" + return str(os.path.join(data_folder, 'no_crs_ds.tif')) + + @pytest.fixture def data_folder(): return os.path.join(os.path.split(os.path.realpath(__file__))[0], 'data') diff --git a/tests/data/no_crs_ds.tif b/tests/data/no_crs_ds.tif new file mode 100644 index 0000000000..a35f8b0ad7 Binary files /dev/null and b/tests/data/no_crs_ds.tif differ diff --git a/tests/storage/test_storage.py b/tests/storage/test_storage.py index 1e673d4d69..38ae3fc8fc 100644 --- a/tests/storage/test_storage.py +++ b/tests/storage/test_storage.py @@ -4,7 +4,7 @@ import mock import netCDF4 -import numpy +import numpy as np import pytest import rasterio.warp import xarray @@ -12,7 +12,7 @@ import datacube from datacube.model import Dataset, DatasetType, MetadataType -from datacube.storage.storage import OverrideBandDataSource +from datacube.storage.storage import OverrideBandDataSource, RasterFileDataSource from datacube.storage.storage import write_dataset_to_netcdf, reproject_and_fuse, read_from_source, Resampling, \ DatasetSource from datacube.utils import geometry @@ -30,7 +30,7 @@ def test_write_dataset_to_netcdf(tmpnetcdf_filename): dataset[name] = (name, coord.values, {'units': coord.units, 'crs': geobox.crs}) dataset['B10'] = (geobox.dimensions, - numpy.arange(10000, dtype='int16').reshape(geobox.shape), + np.arange(10000, dtype='int16').reshape(geobox.shape), {'nodata': 0, 'units': '1', 'crs': geobox.crs}) write_dataset_to_netcdf(dataset, tmpnetcdf_filename, global_attributes={'foo': 'bar'}, @@ -57,7 +57,7 @@ def test_write_dataset_to_netcdf(tmpnetcdf_filename): # dataset[name] = (name, coord.values, {'units': coord.units, 'crs': geobox.crs}) # # dataset['B10'] = (geobox.dimensions, -# numpy.arange(11000, dtype='int16').reshape(geobox.shape), +# np.arange(11000, dtype='int16').reshape(geobox.shape), # {'nodata': 0, 'units': '1', 'crs': geobox.crs}) # # write_dataset_to_netcdf(dataset, tmpnetcdf_filename, global_attributes={'foo': 'bar'}, @@ -70,7 +70,7 @@ def test_write_dataset_to_netcdf(tmpnetcdf_filename): # assert source.transform.almost_equals(affine) # assert (source.read() == dataset['B10']).all() # -# dest = numpy.empty((60, 50)) +# dest = np.empty((60, 50)) # source.reproject(dest, affine, geobox.crs, 0, Resampling.nearest) # assert (dest == dataset['B10'][:60, :50]).all() # @@ -80,7 +80,7 @@ def test_write_dataset_to_netcdf(tmpnetcdf_filename): # source.reproject(dest, affine * Affine.translation(-10, -10), geobox.crs, 0, Resampling.nearest) # assert (dest[10:, 10:] == dataset['B10'][:50, :40]).all() # -# dest = numpy.empty((200, 200)) +# dest = np.empty((200, 200)) # source.reproject(dest, affine, geobox.crs, 0, Resampling.nearest) # assert (dest[:100, :110] == dataset['B10']).all() # @@ -111,7 +111,7 @@ def test_first_source_is_priority_in_reproject_and_fuse(): source2 = _mock_datasetsource([[2, 2], [2, 2]], crs=crs, shape=shape) sources = [source1, source2] - output_data = numpy.full(shape, fill_value=no_data, dtype='int16') + output_data = np.full(shape, fill_value=no_data, dtype='int16') reproject_and_fuse(sources, output_data, dst_transform=identity, dst_projection=crs, dst_nodata=no_data) assert (output_data == 1).all() @@ -126,7 +126,7 @@ def test_second_source_used_when_first_is_empty(): source2 = _mock_datasetsource([[2, 2], [2, 2]], crs=crs, shape=shape) sources = [source1, source2] - output_data = numpy.full(shape, fill_value=no_data, dtype='int16') + output_data = np.full(shape, fill_value=no_data, dtype='int16') reproject_and_fuse(sources, output_data, dst_transform=identity, dst_projection=crs, dst_nodata=no_data) assert (output_data == 2).all() @@ -141,7 +141,7 @@ def test_mixed_result_when_first_source_partially_empty(): source2 = _mock_datasetsource([[2, 2], [2, 2]], crs=crs, shape=shape) sources = [source1, source2] - output_data = numpy.full(shape, fill_value=no_data, dtype='int16') + output_data = np.full(shape, fill_value=no_data, dtype='int16') reproject_and_fuse(sources, output_data, dst_transform=identity, dst_projection=crs, dst_nodata=no_data) assert (output_data == [[1, 1], [2, 2]]).all() @@ -154,7 +154,7 @@ def _mock_datasetsource(value, crs=None, shape=(2, 2)): rio_reader.crs = crs rio_reader.transform = identity rio_reader.shape = shape - rio_reader.read.return_value = numpy.array(value) + rio_reader.read.return_value = np.array(value) # Use the following if a reproject were to be required # def fill_array(dest, *args, **kwargs): @@ -175,7 +175,7 @@ def test_read_from_broken_source(): rio_reader = source1.open.return_value.__enter__.return_value rio_reader.read.side_effect = OSError('Read or write failed') - output_data = numpy.full(shape, fill_value=no_data, dtype='int16') + output_data = np.full(shape, fill_value=no_data, dtype='int16') # Check exception is raised with pytest.raises(OSError): @@ -212,17 +212,17 @@ def __init__(self): self.nodata = -999 self.shape = (613, 597) - self.data = numpy.full(self.shape, self.nodata, dtype='int16') - self.data[:512, :512] = numpy.arange(512) + numpy.arange(512).reshape((512, 1)) + self.data = np.full(self.shape, self.nodata, dtype='int16') + self.data[:512, :512] = np.arange(512) + np.arange(512).reshape((512, 1)) def read(self, window=None, out_shape=None): data = self.data if window: data = self.data[slice(*window[0]), slice(*window[1])] if out_shape: - xidx = ((numpy.arange(out_shape[1]) + 0.5) * (data.shape[1] / out_shape[1]) - 0.5).round().astype('int') - yidx = ((numpy.arange(out_shape[0]) + 0.5) * (data.shape[0] / out_shape[0]) - 0.5).round().astype('int') - data = data[numpy.meshgrid(yidx, xidx, indexing='ij')] + xidx = ((np.arange(out_shape[1]) + 0.5) * (data.shape[1] / out_shape[1]) - 0.5).round().astype('int') + yidx = ((np.arange(out_shape[0]) + 0.5) * (data.shape[0] / out_shape[0]) - 0.5).round().astype('int') + data = data[np.meshgrid(yidx, xidx, indexing='ij')] return data def reproject(self, dest, dst_transform, dst_crs, dst_nodata, resampling, **kwargs): @@ -239,7 +239,7 @@ def reproject(self, dest, dst_transform, dst_crs, dst_nodata, resampling, **kwar def assert_same_read_results(source, dst_shape, dst_dtype, dst_transform, dst_nodata, dst_projection, resampling): - expected = numpy.empty(dst_shape, dtype=dst_dtype) + expected = np.empty(dst_shape, dtype=dst_dtype) with source.open() as src: rasterio.warp.reproject(src.data, expected, @@ -251,7 +251,7 @@ def assert_same_read_results(source, dst_shape, dst_dtype, dst_transform, dst_no dst_nodata=dst_nodata, resampling=resampling) - result = numpy.empty(dst_shape, dtype=dst_dtype) + result = np.empty(dst_shape, dtype=dst_dtype) with datacube.set_options(reproject_threads=1): read_from_source(source, result, @@ -260,7 +260,7 @@ def assert_same_read_results(source, dst_shape, dst_dtype, dst_transform, dst_no dst_projection=dst_projection, resampling=resampling) - assert numpy.isclose(result, expected, atol=0, rtol=0.05, equal_nan=True).all() + assert np.isclose(result, expected, atol=0, rtol=0.05, equal_nan=True).all() return result @@ -420,8 +420,6 @@ def fake_open(): def test_read_raster_with_custom_crs_and_transform(example_gdal_path): - import numpy as np - with rasterio.open(example_gdal_path) as src: band = rasterio.band(src, 1) crs = geometry.CRS('EPSG:3577') @@ -444,6 +442,22 @@ def test_read_raster_with_custom_crs_and_transform(example_gdal_path): assert (dest1 == dest2).all() +def test_read_from_file_with_missing_crs(no_crs_gdal_path): + """ + We need to be able to read from data files even when GDAL can't automatically gather all the metdata. + + The :class:`RasterFileDataSource` is able to override the nodata, CRS and transform attributes if necessary. + """ + crs = geometry.CRS('EPSG:4326') + nodata = -999 + transform = Affine(0.01, 0.0, 111.975, + 0.0, 0.01, -9.975) + data_source = RasterFileDataSource(no_crs_gdal_path, bandnumber=1, nodata=nodata, crs=crs, transform=transform) + with data_source.open() as src: + dest1 = src.read() + assert dest1.shape == (10, 10) + + _EXAMPLE_METADATA_TYPE = MetadataType( { 'name': 'eo',
kserve__kserve-864
explanations no longer working with 0.3.0 Am following the steps in with 0.3.0 of kfserving: https://github.com/kubeflow/kfserving/tree/master/docs/samples/explanation/alibi/income When I execute the curl for the explain I get a 500 error and the container logs show the below. I'm guessing the [update to master](https://github.com/kubeflow/kfserving/pull/803) means that the explainer models have also been updated and so they no longer work with 0.3.0 (the latest release version) ``` [E 200605 17:15:14 web:1792] Uncaught exception POST /v1/models/income:explain (127.0.0.1) HTTPServerRequest(protocol='http', host='income-explainer-default.default.svc.cluster.local', method='POST', uri='/v1/models/income:explain', version='HTTP/1.1', remote_ip='127.0.0.1') Traceback (most recent call last): File "/usr/local/lib/python3.7/site-packages/tornado/web.py", line 1701, in _execute result = method(*self.path_args, **self.path_kwargs) File "/kfserving/kfserving/handlers/http.py", line 61, in post response = model.explain(request) File "/alibiexplainer/alibiexplainer/explainer.py", line 74, in explain explanation = self.wrapper.explain(request["instances"]) File "/alibiexplainer/alibiexplainer/anchor_tabular.py", line 89, in explain anchor_exp = self.anchors_tabular.explain(arr[0], **self.kwargs) File "/usr/local/lib/python3.7/site-packages/alibi/explainers/anchor_tabular.py", line 803, in explain for sampler in self.samplers: AttributeError: 'AnchorTabular' object has no attribute 'samplers' [E 200605 17:15:14 web:2250] 500 POST /v1/models/income:explain (127.0.0.1) 58.80ms [I 200605 17:18:22 anchor_tabular:83] Arr shape ((1, 12),) [E 200605 17:18:22 web:1792] Uncaught exception POST /v1/models/income:explain (127.0.0.1) HTTPServerRequest(protocol='http', host='income-explainer-default.default.svc.cluster.local', method='POST', uri='/v1/models/income:explain', version='HTTP/1.1', remote_ip='127.0.0.1') Traceback (most recent call last): File "/usr/local/lib/python3.7/site-packages/tornado/web.py", line 1701, in _execute result = method(*self.path_args, **self.path_kwargs) File "/kfserving/kfserving/handlers/http.py", line 61, in post response = model.explain(request) File "/alibiexplainer/alibiexplainer/explainer.py", line 74, in explain explanation = self.wrapper.explain(request["instances"]) File "/alibiexplainer/alibiexplainer/anchor_tabular.py", line 89, in explain anchor_exp = self.anchors_tabular.explain(arr[0], **self.kwargs) File "/usr/local/lib/python3.7/site-packages/alibi/explainers/anchor_tabular.py", line 803, in explain for sampler in self.samplers: AttributeError: 'AnchorTabular' object has no attribute 'samplers' [E 200605 17:18:22 web:2250] 500 POST /v1/models/income:explain (127.0.0.1) 31.17ms ``` Presumably it would work on master. Does that sound right @cliveseldon ? If so maybe we should just close this.
[ { "content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='alibiexplainer',\n version='0.3.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kubeflow/kfserving/python/kfserving/alibiexplainer',\n description='Model Explaination Server. \\\n Not intended for use outside KFServing Frameworks Images',\n long_description=open('README.md').read(),\n python_requires='>=3.6',\n packages=find_packages(\"alibiexplainer\"),\n install_requires=[\n \"kfserving>=0.3.0\",\n \"alibi>=0.3\",\n \"scikit-learn>=0.20.3\",\n \"argparse>=1.4.0\",\n \"requests>=2.22.0\",\n \"joblib>=0.13.2\",\n \"pandas>=0.24.2\",\n \"numpy>=1.16.3\",\n \"dill>=0.3.0\",\n \"spacy>=2.1.4\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/alibiexplainer/setup.py" } ]
[ { "content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='alibiexplainer',\n version='0.3.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kubeflow/kfserving/python/kfserving/alibiexplainer',\n description='Model Explaination Server. \\\n Not intended for use outside KFServing Frameworks Images',\n long_description=open('README.md').read(),\n python_requires='>=3.6',\n packages=find_packages(\"alibiexplainer\"),\n install_requires=[\n \"kfserving>=0.3.0\",\n \"alibi==0.3.2\",\n \"scikit-learn>=0.20.3\",\n \"argparse>=1.4.0\",\n \"requests>=2.22.0\",\n \"joblib>=0.13.2\",\n \"pandas>=0.24.2\",\n \"numpy>=1.16.3\",\n \"dill>=0.3.0\",\n \"spacy>=2.1.4\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/alibiexplainer/setup.py" } ]
diff --git a/python/alibiexplainer.Dockerfile b/python/alibiexplainer.Dockerfile index 762a305ef20..5ef19232bae 100644 --- a/python/alibiexplainer.Dockerfile +++ b/python/alibiexplainer.Dockerfile @@ -5,8 +5,5 @@ COPY kfserving kfserving COPY third_party third_party RUN pip install --upgrade pip && pip install -e ./kfserving -RUN git clone https://github.com/SeldonIO/alibi.git && \ - cd alibi && \ - pip install . RUN pip install -e ./alibiexplainer ENTRYPOINT ["python", "-m", "alibiexplainer"] diff --git a/python/alibiexplainer/setup.py b/python/alibiexplainer/setup.py index 8eba40cdbb0..3591cb3e1b1 100644 --- a/python/alibiexplainer/setup.py +++ b/python/alibiexplainer/setup.py @@ -33,7 +33,7 @@ packages=find_packages("alibiexplainer"), install_requires=[ "kfserving>=0.3.0", - "alibi>=0.3", + "alibi==0.3.2", "scikit-learn>=0.20.3", "argparse>=1.4.0", "requests>=2.22.0", diff --git a/python/pytorch.Dockerfile b/python/pytorch.Dockerfile index b8b80db83f2..60ac34895e8 100644 --- a/python/pytorch.Dockerfile +++ b/python/pytorch.Dockerfile @@ -11,7 +11,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ libpng-dev && \ rm -rf /var/lib/apt/lists/* -RUN curl -o ~/miniconda.sh -O https://repo.continuum.io/miniconda/Miniconda3-4.2.12-Linux-x86_64.sh && \ +RUN curl -L -o ~/miniconda.sh -O https://repo.continuum.io/miniconda/Miniconda3-4.2.12-Linux-x86_64.sh && \ chmod +x ~/miniconda.sh && \ ~/miniconda.sh -b -p /opt/conda && \ rm ~/miniconda.sh && \
celery__celery-5898
Python 3.9 compatibility issue regarding usage of threading.Thread.isAlive <!-- Please fill this template entirely and do not erase parts of it. We reserve the right to close without a response bug reports which are incomplete. --> # Checklist <!-- To check an item on the list replace [ ] with [x]. --> - [x] I have verified that the issue exists against the `master` branch of Celery. - [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first. - [ ] I have read the relevant section in the [contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs) on reporting bugs. - [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22) for similar or identical bug reports. - [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22) for existing proposed fixes. - [x] I have checked the [commit log](https://github.com/celery/celery/commits/master) to find out if the bug was already fixed in the master branch. - [ ] I have included all related issues and possible duplicate issues in this issue (If there are none, check this box anyway). ## Mandatory Debugging Information - [x] I have verified that the issue exists against the `master` branch of Celery. ## Optional Debugging Information `isAlive` was deprecated and removed in Python 3.9 . Celery has the deprecation warning that will become error in Python 3.9 . https://travis-ci.org/celery/celery/jobs/628813003#L3262-L3263 Relevant CPython PR : https://github.com/python/cpython/pull/15225
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"Scheduler for Python functions.\n\n.. note::\n This is used for the thread-based worker only,\n not for amqp/redis/sqs/qpid where :mod:`kombu.asynchronous.timer` is used.\n\"\"\"\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport os\nimport sys\nimport threading\nfrom itertools import count\nfrom time import sleep\n\nfrom kombu.asynchronous.timer import Entry\nfrom kombu.asynchronous.timer import Timer as Schedule\nfrom kombu.asynchronous.timer import logger, to_timestamp\n\nfrom celery.five import THREAD_TIMEOUT_MAX\n\nTIMER_DEBUG = os.environ.get('TIMER_DEBUG')\n\n__all__ = ('Entry', 'Schedule', 'Timer', 'to_timestamp')\n\n\nclass Timer(threading.Thread):\n \"\"\"Timer thread.\n\n Note:\n This is only used for transports not supporting AsyncIO.\n \"\"\"\n\n Entry = Entry\n Schedule = Schedule\n\n running = False\n on_tick = None\n\n _timer_count = count(1)\n\n if TIMER_DEBUG: # pragma: no cover\n def start(self, *args, **kwargs):\n import traceback\n print('- Timer starting')\n traceback.print_stack()\n super(Timer, self).start(*args, **kwargs)\n\n def __init__(self, schedule=None, on_error=None, on_tick=None,\n on_start=None, max_interval=None, **kwargs):\n self.schedule = schedule or self.Schedule(on_error=on_error,\n max_interval=max_interval)\n self.on_start = on_start\n self.on_tick = on_tick or self.on_tick\n threading.Thread.__init__(self)\n self._is_shutdown = threading.Event()\n self._is_stopped = threading.Event()\n self.mutex = threading.Lock()\n self.not_empty = threading.Condition(self.mutex)\n self.daemon = True\n self.name = 'Timer-{0}'.format(next(self._timer_count))\n\n def _next_entry(self):\n with self.not_empty:\n delay, entry = next(self.scheduler)\n if entry is None:\n if delay is None:\n self.not_empty.wait(1.0)\n return delay\n return self.schedule.apply_entry(entry)\n __next__ = next = _next_entry # for 2to3\n\n def run(self):\n try:\n self.running = True\n self.scheduler = iter(self.schedule)\n\n while not self._is_shutdown.isSet():\n delay = self._next_entry()\n if delay:\n if self.on_tick:\n self.on_tick(delay)\n if sleep is None: # pragma: no cover\n break\n sleep(delay)\n try:\n self._is_stopped.set()\n except TypeError: # pragma: no cover\n # we lost the race at interpreter shutdown,\n # so gc collected built-in modules.\n pass\n except Exception as exc:\n logger.error('Thread Timer crashed: %r', exc, exc_info=True)\n sys.stderr.flush()\n os._exit(1)\n\n def stop(self):\n self._is_shutdown.set()\n if self.running:\n self._is_stopped.wait()\n self.join(THREAD_TIMEOUT_MAX)\n self.running = False\n\n def ensure_started(self):\n if not self.running and not self.isAlive():\n if self.on_start:\n self.on_start(self)\n self.start()\n\n def _do_enter(self, meth, *args, **kwargs):\n self.ensure_started()\n with self.mutex:\n entry = getattr(self.schedule, meth)(*args, **kwargs)\n self.not_empty.notify()\n return entry\n\n def enter(self, entry, eta, priority=None):\n return self._do_enter('enter_at', entry, eta, priority=priority)\n\n def call_at(self, *args, **kwargs):\n return self._do_enter('call_at', *args, **kwargs)\n\n def enter_after(self, *args, **kwargs):\n return self._do_enter('enter_after', *args, **kwargs)\n\n def call_after(self, *args, **kwargs):\n return self._do_enter('call_after', *args, **kwargs)\n\n def call_repeatedly(self, *args, **kwargs):\n return self._do_enter('call_repeatedly', *args, **kwargs)\n\n def exit_after(self, secs, priority=10):\n self.call_after(secs, sys.exit, priority)\n\n def cancel(self, tref):\n tref.cancel()\n\n def clear(self):\n self.schedule.clear()\n\n def empty(self):\n return not len(self)\n\n def __len__(self):\n return len(self.schedule)\n\n def __bool__(self):\n \"\"\"``bool(timer)``.\"\"\"\n return True\n __nonzero__ = __bool__\n\n @property\n def queue(self):\n return self.schedule.queue\n", "path": "celery/utils/timer2.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"Scheduler for Python functions.\n\n.. note::\n This is used for the thread-based worker only,\n not for amqp/redis/sqs/qpid where :mod:`kombu.asynchronous.timer` is used.\n\"\"\"\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport os\nimport sys\nimport threading\nfrom itertools import count\nfrom time import sleep\n\nfrom kombu.asynchronous.timer import Entry\nfrom kombu.asynchronous.timer import Timer as Schedule\nfrom kombu.asynchronous.timer import logger, to_timestamp\n\nfrom celery.five import THREAD_TIMEOUT_MAX\n\nTIMER_DEBUG = os.environ.get('TIMER_DEBUG')\n\n__all__ = ('Entry', 'Schedule', 'Timer', 'to_timestamp')\n\n\nclass Timer(threading.Thread):\n \"\"\"Timer thread.\n\n Note:\n This is only used for transports not supporting AsyncIO.\n \"\"\"\n\n Entry = Entry\n Schedule = Schedule\n\n running = False\n on_tick = None\n\n _timer_count = count(1)\n\n if TIMER_DEBUG: # pragma: no cover\n def start(self, *args, **kwargs):\n import traceback\n print('- Timer starting')\n traceback.print_stack()\n super(Timer, self).start(*args, **kwargs)\n\n def __init__(self, schedule=None, on_error=None, on_tick=None,\n on_start=None, max_interval=None, **kwargs):\n self.schedule = schedule or self.Schedule(on_error=on_error,\n max_interval=max_interval)\n self.on_start = on_start\n self.on_tick = on_tick or self.on_tick\n threading.Thread.__init__(self)\n self._is_shutdown = threading.Event()\n self._is_stopped = threading.Event()\n self.mutex = threading.Lock()\n self.not_empty = threading.Condition(self.mutex)\n self.daemon = True\n self.name = 'Timer-{0}'.format(next(self._timer_count))\n\n def _next_entry(self):\n with self.not_empty:\n delay, entry = next(self.scheduler)\n if entry is None:\n if delay is None:\n self.not_empty.wait(1.0)\n return delay\n return self.schedule.apply_entry(entry)\n __next__ = next = _next_entry # for 2to3\n\n def run(self):\n try:\n self.running = True\n self.scheduler = iter(self.schedule)\n\n while not self._is_shutdown.isSet():\n delay = self._next_entry()\n if delay:\n if self.on_tick:\n self.on_tick(delay)\n if sleep is None: # pragma: no cover\n break\n sleep(delay)\n try:\n self._is_stopped.set()\n except TypeError: # pragma: no cover\n # we lost the race at interpreter shutdown,\n # so gc collected built-in modules.\n pass\n except Exception as exc:\n logger.error('Thread Timer crashed: %r', exc, exc_info=True)\n sys.stderr.flush()\n os._exit(1)\n\n def stop(self):\n self._is_shutdown.set()\n if self.running:\n self._is_stopped.wait()\n self.join(THREAD_TIMEOUT_MAX)\n self.running = False\n\n def ensure_started(self):\n if not self.running and not self.is_alive():\n if self.on_start:\n self.on_start(self)\n self.start()\n\n def _do_enter(self, meth, *args, **kwargs):\n self.ensure_started()\n with self.mutex:\n entry = getattr(self.schedule, meth)(*args, **kwargs)\n self.not_empty.notify()\n return entry\n\n def enter(self, entry, eta, priority=None):\n return self._do_enter('enter_at', entry, eta, priority=priority)\n\n def call_at(self, *args, **kwargs):\n return self._do_enter('call_at', *args, **kwargs)\n\n def enter_after(self, *args, **kwargs):\n return self._do_enter('enter_after', *args, **kwargs)\n\n def call_after(self, *args, **kwargs):\n return self._do_enter('call_after', *args, **kwargs)\n\n def call_repeatedly(self, *args, **kwargs):\n return self._do_enter('call_repeatedly', *args, **kwargs)\n\n def exit_after(self, secs, priority=10):\n self.call_after(secs, sys.exit, priority)\n\n def cancel(self, tref):\n tref.cancel()\n\n def clear(self):\n self.schedule.clear()\n\n def empty(self):\n return not len(self)\n\n def __len__(self):\n return len(self.schedule)\n\n def __bool__(self):\n \"\"\"``bool(timer)``.\"\"\"\n return True\n __nonzero__ = __bool__\n\n @property\n def queue(self):\n return self.schedule.queue\n", "path": "celery/utils/timer2.py" } ]
diff --git a/celery/utils/timer2.py b/celery/utils/timer2.py index 58de4ac278b..87f29b36891 100644 --- a/celery/utils/timer2.py +++ b/celery/utils/timer2.py @@ -102,7 +102,7 @@ def stop(self): self.running = False def ensure_started(self): - if not self.running and not self.isAlive(): + if not self.running and not self.is_alive(): if self.on_start: self.on_start(self) self.start()
aio-libs__aiohttp-3295
request.content.iter_chunks() stalls when content is empty. ## Long story short The title says it all. Shouldn't [EmptyStreamReader.readchunk](https://github.com/aio-libs/aiohttp/blob/master/aiohttp/streams.py#L470) on its face `return (b'', True)` and not `return (b'', False)` ? Without it, the special EOS chunk is never sent... request.content.iter_chunks() stalls when content is empty. ## Long story short The title says it all. Shouldn't [EmptyStreamReader.readchunk](https://github.com/aio-libs/aiohttp/blob/master/aiohttp/streams.py#L470) on its face `return (b'', True)` and not `return (b'', False)` ? Without it, the special EOS chunk is never sent...
[ { "content": "import asyncio\nimport collections\nfrom typing import List # noqa\nfrom typing import Awaitable, Callable, Optional, Tuple\n\nfrom .base_protocol import BaseProtocol\nfrom .helpers import BaseTimerContext, set_exception, set_result\nfrom .log import internal_logger\n\n\ntry: # pragma: no cover\n from typing import Deque # noqa\nexcept ImportError:\n from typing_extensions import Deque # noqa\n\n\n__all__ = (\n 'EMPTY_PAYLOAD', 'EofStream', 'StreamReader', 'DataQueue',\n 'FlowControlDataQueue')\n\nDEFAULT_LIMIT = 2 ** 16\n\n\nclass EofStream(Exception):\n \"\"\"eof stream indication.\"\"\"\n\n\nclass AsyncStreamIterator:\n\n def __init__(self, read_func: Callable[[], Awaitable[bytes]]) -> None:\n self.read_func = read_func\n\n def __aiter__(self) -> 'AsyncStreamIterator':\n return self\n\n async def __anext__(self) -> bytes:\n try:\n rv = await self.read_func()\n except EofStream:\n raise StopAsyncIteration # NOQA\n if rv == b'':\n raise StopAsyncIteration # NOQA\n return rv\n\n\nclass ChunkTupleAsyncStreamIterator:\n\n def __init__(self, stream: 'StreamReader') -> None:\n self._stream = stream\n\n def __aiter__(self) -> 'ChunkTupleAsyncStreamIterator':\n return self\n\n async def __anext__(self) -> Tuple[bytes, bool]:\n rv = await self._stream.readchunk()\n if rv == (b'', False):\n raise StopAsyncIteration # NOQA\n return rv\n\n\nclass AsyncStreamReaderMixin:\n\n def __aiter__(self) -> AsyncStreamIterator:\n return AsyncStreamIterator(self.readline) # type: ignore\n\n def iter_chunked(self, n: int) -> AsyncStreamIterator:\n \"\"\"Returns an asynchronous iterator that yields chunks of size n.\n\n Python-3.5 available for Python 3.5+ only\n \"\"\"\n return AsyncStreamIterator(lambda: self.read(n)) # type: ignore\n\n def iter_any(self) -> AsyncStreamIterator:\n \"\"\"Returns an asynchronous iterator that yields all the available\n data as soon as it is received\n\n Python-3.5 available for Python 3.5+ only\n \"\"\"\n return AsyncStreamIterator(self.readany) # type: ignore\n\n def iter_chunks(self) -> ChunkTupleAsyncStreamIterator:\n \"\"\"Returns an asynchronous iterator that yields chunks of data\n as they are received by the server. The yielded objects are tuples\n of (bytes, bool) as returned by the StreamReader.readchunk method.\n\n Python-3.5 available for Python 3.5+ only\n \"\"\"\n return ChunkTupleAsyncStreamIterator(self) # type: ignore\n\n\nclass StreamReader(AsyncStreamReaderMixin):\n \"\"\"An enhancement of asyncio.StreamReader.\n\n Supports asynchronous iteration by line, chunk or as available::\n\n async for line in reader:\n ...\n async for chunk in reader.iter_chunked(1024):\n ...\n async for slice in reader.iter_any():\n ...\n\n \"\"\"\n\n total_bytes = 0\n\n def __init__(self, protocol: BaseProtocol,\n *, limit: int=DEFAULT_LIMIT,\n timer: Optional[BaseTimerContext]=None,\n loop: Optional[asyncio.AbstractEventLoop]=None) -> None:\n self._protocol = protocol\n self._low_water = limit\n self._high_water = limit * 2\n if loop is None:\n loop = asyncio.get_event_loop()\n self._loop = loop\n self._size = 0\n self._cursor = 0\n self._http_chunk_splits = None # type: Optional[List[int]]\n self._buffer = collections.deque() # type: Deque[bytes]\n self._buffer_offset = 0\n self._eof = False\n self._waiter = None # type: Optional[asyncio.Future[bool]]\n self._eof_waiter = None # type: Optional[asyncio.Future[bool]]\n self._exception = None # type: Optional[BaseException]\n self._timer = timer\n self._eof_callbacks = [] # type: List[Callable[[], None]]\n\n def __repr__(self) -> str:\n info = [self.__class__.__name__]\n if self._size:\n info.append('%d bytes' % self._size)\n if self._eof:\n info.append('eof')\n if self._low_water != DEFAULT_LIMIT:\n info.append('low=%d high=%d' % (self._low_water, self._high_water))\n if self._waiter:\n info.append('w=%r' % self._waiter)\n if self._exception:\n info.append('e=%r' % self._exception)\n return '<%s>' % ' '.join(info)\n\n def exception(self) -> Optional[BaseException]:\n return self._exception\n\n def set_exception(self, exc: BaseException) -> None:\n self._exception = exc\n self._eof_callbacks.clear()\n\n waiter = self._waiter\n if waiter is not None:\n self._waiter = None\n set_exception(waiter, exc)\n\n waiter = self._eof_waiter\n if waiter is not None:\n set_exception(waiter, exc)\n self._eof_waiter = None\n\n def on_eof(self, callback: Callable[[], None]) -> None:\n if self._eof:\n try:\n callback()\n except Exception:\n internal_logger.exception('Exception in eof callback')\n else:\n self._eof_callbacks.append(callback)\n\n def feed_eof(self) -> None:\n self._eof = True\n\n waiter = self._waiter\n if waiter is not None:\n self._waiter = None\n set_result(waiter, True)\n\n waiter = self._eof_waiter\n if waiter is not None:\n self._eof_waiter = None\n set_result(waiter, True)\n\n for cb in self._eof_callbacks:\n try:\n cb()\n except Exception:\n internal_logger.exception('Exception in eof callback')\n\n self._eof_callbacks.clear()\n\n def is_eof(self) -> bool:\n \"\"\"Return True if 'feed_eof' was called.\"\"\"\n return self._eof\n\n def at_eof(self) -> bool:\n \"\"\"Return True if the buffer is empty and 'feed_eof' was called.\"\"\"\n return self._eof and not self._buffer\n\n async def wait_eof(self) -> None:\n if self._eof:\n return\n\n assert self._eof_waiter is None\n self._eof_waiter = self._loop.create_future()\n try:\n await self._eof_waiter\n finally:\n self._eof_waiter = None\n\n def unread_data(self, data: bytes) -> None:\n \"\"\" rollback reading some data from stream, inserting it to buffer head.\n \"\"\"\n if not data:\n return\n\n if self._buffer_offset:\n self._buffer[0] = self._buffer[0][self._buffer_offset:]\n self._buffer_offset = 0\n self._size += len(data)\n self._cursor -= len(data)\n self._buffer.appendleft(data)\n self._eof_counter = 0\n\n # TODO: size is ignored, remove the param later\n def feed_data(self, data: bytes, size: int=0) -> None:\n assert not self._eof, 'feed_data after feed_eof'\n\n if not data:\n return\n\n self._size += len(data)\n self._buffer.append(data)\n self.total_bytes += len(data)\n\n waiter = self._waiter\n if waiter is not None:\n self._waiter = None\n set_result(waiter, False)\n\n if (self._size > self._high_water and\n not self._protocol._reading_paused):\n self._protocol.pause_reading()\n\n def begin_http_chunk_receiving(self) -> None:\n if self._http_chunk_splits is None:\n self._http_chunk_splits = []\n\n def end_http_chunk_receiving(self) -> None:\n if self._http_chunk_splits is None:\n raise RuntimeError(\"Called end_chunk_receiving without calling \"\n \"begin_chunk_receiving first\")\n if not self._http_chunk_splits or \\\n self._http_chunk_splits[-1] != self.total_bytes:\n self._http_chunk_splits.append(self.total_bytes)\n\n async def _wait(self, func_name: str) -> None:\n # StreamReader uses a future to link the protocol feed_data() method\n # to a read coroutine. Running two read coroutines at the same time\n # would have an unexpected behaviour. It would not possible to know\n # which coroutine would get the next data.\n if self._waiter is not None:\n raise RuntimeError('%s() called while another coroutine is '\n 'already waiting for incoming data' % func_name)\n\n waiter = self._waiter = self._loop.create_future()\n try:\n if self._timer:\n with self._timer:\n await waiter\n else:\n await waiter\n finally:\n self._waiter = None\n\n async def readline(self) -> bytes:\n if self._exception is not None:\n raise self._exception\n\n line = []\n line_size = 0\n not_enough = True\n\n while not_enough:\n while self._buffer and not_enough:\n offset = self._buffer_offset\n ichar = self._buffer[0].find(b'\\n', offset) + 1\n # Read from current offset to found b'\\n' or to the end.\n data = self._read_nowait_chunk(ichar - offset if ichar else -1)\n line.append(data)\n line_size += len(data)\n if ichar:\n not_enough = False\n\n if line_size > self._high_water:\n raise ValueError('Line is too long')\n\n if self._eof:\n break\n\n if not_enough:\n await self._wait('readline')\n\n return b''.join(line)\n\n async def read(self, n: int=-1) -> bytes:\n if self._exception is not None:\n raise self._exception\n\n # migration problem; with DataQueue you have to catch\n # EofStream exception, so common way is to run payload.read() inside\n # infinite loop. what can cause real infinite loop with StreamReader\n # lets keep this code one major release.\n if __debug__:\n if self._eof and not self._buffer:\n self._eof_counter = getattr(self, '_eof_counter', 0) + 1\n if self._eof_counter > 5:\n internal_logger.warning(\n 'Multiple access to StreamReader in eof state, '\n 'might be infinite loop.', stack_info=True)\n\n if not n:\n return b''\n\n if n < 0:\n # This used to just loop creating a new waiter hoping to\n # collect everything in self._buffer, but that would\n # deadlock if the subprocess sends more than self.limit\n # bytes. So just call self.readany() until EOF.\n blocks = []\n while True:\n block = await self.readany()\n if not block:\n break\n blocks.append(block)\n return b''.join(blocks)\n\n if not self._buffer and not self._eof:\n await self._wait('read')\n\n return self._read_nowait(n)\n\n async def readany(self) -> bytes:\n if self._exception is not None:\n raise self._exception\n\n if not self._buffer and not self._eof:\n await self._wait('readany')\n\n return self._read_nowait(-1)\n\n async def readchunk(self) -> Tuple[bytes, bool]:\n \"\"\"Returns a tuple of (data, end_of_http_chunk). When chunked transfer\n encoding is used, end_of_http_chunk is a boolean indicating if the end\n of the data corresponds to the end of a HTTP chunk , otherwise it is\n always False.\n \"\"\"\n if self._exception is not None:\n raise self._exception\n\n if not self._buffer and not self._eof:\n if (self._http_chunk_splits and\n self._cursor == self._http_chunk_splits[0]):\n # end of http chunk without available data\n self._http_chunk_splits = self._http_chunk_splits[1:]\n return (b\"\", True)\n await self._wait('readchunk')\n\n if not self._buffer:\n # end of file\n return (b\"\", False)\n elif self._http_chunk_splits is not None:\n while self._http_chunk_splits:\n pos = self._http_chunk_splits[0]\n self._http_chunk_splits = self._http_chunk_splits[1:]\n if pos > self._cursor:\n return (self._read_nowait(pos-self._cursor), True)\n return (self._read_nowait(-1), False)\n else:\n return (self._read_nowait_chunk(-1), False)\n\n async def readexactly(self, n: int) -> bytes:\n if self._exception is not None:\n raise self._exception\n\n blocks = [] # type: List[bytes]\n while n > 0:\n block = await self.read(n)\n if not block:\n partial = b''.join(blocks)\n raise asyncio.streams.IncompleteReadError(\n partial, len(partial) + n)\n blocks.append(block)\n n -= len(block)\n\n return b''.join(blocks)\n\n def read_nowait(self, n: int=-1) -> bytes:\n # default was changed to be consistent with .read(-1)\n #\n # I believe the most users don't know about the method and\n # they are not affected.\n if self._exception is not None:\n raise self._exception\n\n if self._waiter and not self._waiter.done():\n raise RuntimeError(\n 'Called while some coroutine is waiting for incoming data.')\n\n return self._read_nowait(n)\n\n def _read_nowait_chunk(self, n: int) -> bytes:\n first_buffer = self._buffer[0]\n offset = self._buffer_offset\n if n != -1 and len(first_buffer) - offset > n:\n data = first_buffer[offset:offset + n]\n self._buffer_offset += n\n\n elif offset:\n self._buffer.popleft()\n data = first_buffer[offset:]\n self._buffer_offset = 0\n\n else:\n data = self._buffer.popleft()\n\n self._size -= len(data)\n self._cursor += len(data)\n\n if self._size < self._low_water and self._protocol._reading_paused:\n self._protocol.resume_reading()\n return data\n\n def _read_nowait(self, n: int) -> bytes:\n chunks = []\n\n while self._buffer:\n chunk = self._read_nowait_chunk(n)\n chunks.append(chunk)\n if n != -1:\n n -= len(chunk)\n if n == 0:\n break\n\n return b''.join(chunks) if chunks else b''\n\n\nclass EmptyStreamReader(AsyncStreamReaderMixin):\n\n def exception(self) -> Optional[BaseException]:\n return None\n\n def set_exception(self, exc: BaseException) -> None:\n pass\n\n def on_eof(self, callback: Callable[[], None]) -> None:\n try:\n callback()\n except Exception:\n internal_logger.exception('Exception in eof callback')\n\n def feed_eof(self) -> None:\n pass\n\n def is_eof(self) -> bool:\n return True\n\n def at_eof(self) -> bool:\n return True\n\n async def wait_eof(self) -> None:\n return\n\n def feed_data(self, data: bytes, n: int=0) -> None:\n pass\n\n async def readline(self) -> bytes:\n return b''\n\n async def read(self, n: int=-1) -> bytes:\n return b''\n\n async def readany(self) -> bytes:\n return b''\n\n async def readchunk(self) -> Tuple[bytes, bool]:\n return (b'', False)\n\n async def readexactly(self, n: int) -> bytes:\n raise asyncio.streams.IncompleteReadError(b'', n)\n\n def read_nowait(self) -> bytes:\n return b''\n\n\nEMPTY_PAYLOAD = EmptyStreamReader()\n\n\nclass DataQueue:\n \"\"\"DataQueue is a general-purpose blocking queue with one reader.\"\"\"\n\n def __init__(self, *, loop: asyncio.AbstractEventLoop) -> None:\n self._loop = loop\n self._eof = False\n self._waiter = None # type: Optional[asyncio.Future[bool]]\n self._exception = None # type: Optional[BaseException]\n self._size = 0\n self._buffer = collections.deque() # type: Deque[Tuple[bytes, int]]\n\n def __len__(self) -> int:\n return len(self._buffer)\n\n def is_eof(self) -> bool:\n return self._eof\n\n def at_eof(self) -> bool:\n return self._eof and not self._buffer\n\n def exception(self) -> Optional[BaseException]:\n return self._exception\n\n def set_exception(self, exc: BaseException) -> None:\n self._eof = True\n self._exception = exc\n\n waiter = self._waiter\n if waiter is not None:\n set_exception(waiter, exc)\n self._waiter = None\n\n def feed_data(self, data: bytes, size: int=0) -> None:\n self._size += size\n self._buffer.append((data, size))\n\n waiter = self._waiter\n if waiter is not None:\n self._waiter = None\n set_result(waiter, True)\n\n def feed_eof(self) -> None:\n self._eof = True\n\n waiter = self._waiter\n if waiter is not None:\n self._waiter = None\n set_result(waiter, False)\n\n async def read(self) -> bytes:\n if not self._buffer and not self._eof:\n assert not self._waiter\n self._waiter = self._loop.create_future()\n try:\n await self._waiter\n except (asyncio.CancelledError, asyncio.TimeoutError):\n self._waiter = None\n raise\n\n if self._buffer:\n data, size = self._buffer.popleft()\n self._size -= size\n return data\n else:\n if self._exception is not None:\n raise self._exception\n else:\n raise EofStream\n\n def __aiter__(self) -> AsyncStreamIterator:\n return AsyncStreamIterator(self.read)\n\n\nclass FlowControlDataQueue(DataQueue):\n \"\"\"FlowControlDataQueue resumes and pauses an underlying stream.\n\n It is a destination for parsed data.\"\"\"\n\n def __init__(self, protocol: BaseProtocol, *,\n limit: int=DEFAULT_LIMIT,\n loop: asyncio.AbstractEventLoop) -> None:\n super().__init__(loop=loop)\n\n self._protocol = protocol\n self._limit = limit * 2\n\n def feed_data(self, data: bytes, size: int=0) -> None:\n super().feed_data(data, size)\n\n if self._size > self._limit and not self._protocol._reading_paused:\n self._protocol.pause_reading()\n\n async def read(self) -> bytes:\n try:\n return await super().read()\n finally:\n if self._size < self._limit and self._protocol._reading_paused:\n self._protocol.resume_reading()\n", "path": "aiohttp/streams.py" } ]
[ { "content": "import asyncio\nimport collections\nfrom typing import List # noqa\nfrom typing import Awaitable, Callable, Optional, Tuple\n\nfrom .base_protocol import BaseProtocol\nfrom .helpers import BaseTimerContext, set_exception, set_result\nfrom .log import internal_logger\n\n\ntry: # pragma: no cover\n from typing import Deque # noqa\nexcept ImportError:\n from typing_extensions import Deque # noqa\n\n\n__all__ = (\n 'EMPTY_PAYLOAD', 'EofStream', 'StreamReader', 'DataQueue',\n 'FlowControlDataQueue')\n\nDEFAULT_LIMIT = 2 ** 16\n\n\nclass EofStream(Exception):\n \"\"\"eof stream indication.\"\"\"\n\n\nclass AsyncStreamIterator:\n\n def __init__(self, read_func: Callable[[], Awaitable[bytes]]) -> None:\n self.read_func = read_func\n\n def __aiter__(self) -> 'AsyncStreamIterator':\n return self\n\n async def __anext__(self) -> bytes:\n try:\n rv = await self.read_func()\n except EofStream:\n raise StopAsyncIteration # NOQA\n if rv == b'':\n raise StopAsyncIteration # NOQA\n return rv\n\n\nclass ChunkTupleAsyncStreamIterator:\n\n def __init__(self, stream: 'StreamReader') -> None:\n self._stream = stream\n\n def __aiter__(self) -> 'ChunkTupleAsyncStreamIterator':\n return self\n\n async def __anext__(self) -> Tuple[bytes, bool]:\n rv = await self._stream.readchunk()\n if rv == (b'', False):\n raise StopAsyncIteration # NOQA\n return rv\n\n\nclass AsyncStreamReaderMixin:\n\n def __aiter__(self) -> AsyncStreamIterator:\n return AsyncStreamIterator(self.readline) # type: ignore\n\n def iter_chunked(self, n: int) -> AsyncStreamIterator:\n \"\"\"Returns an asynchronous iterator that yields chunks of size n.\n\n Python-3.5 available for Python 3.5+ only\n \"\"\"\n return AsyncStreamIterator(lambda: self.read(n)) # type: ignore\n\n def iter_any(self) -> AsyncStreamIterator:\n \"\"\"Returns an asynchronous iterator that yields all the available\n data as soon as it is received\n\n Python-3.5 available for Python 3.5+ only\n \"\"\"\n return AsyncStreamIterator(self.readany) # type: ignore\n\n def iter_chunks(self) -> ChunkTupleAsyncStreamIterator:\n \"\"\"Returns an asynchronous iterator that yields chunks of data\n as they are received by the server. The yielded objects are tuples\n of (bytes, bool) as returned by the StreamReader.readchunk method.\n\n Python-3.5 available for Python 3.5+ only\n \"\"\"\n return ChunkTupleAsyncStreamIterator(self) # type: ignore\n\n\nclass StreamReader(AsyncStreamReaderMixin):\n \"\"\"An enhancement of asyncio.StreamReader.\n\n Supports asynchronous iteration by line, chunk or as available::\n\n async for line in reader:\n ...\n async for chunk in reader.iter_chunked(1024):\n ...\n async for slice in reader.iter_any():\n ...\n\n \"\"\"\n\n total_bytes = 0\n\n def __init__(self, protocol: BaseProtocol,\n *, limit: int=DEFAULT_LIMIT,\n timer: Optional[BaseTimerContext]=None,\n loop: Optional[asyncio.AbstractEventLoop]=None) -> None:\n self._protocol = protocol\n self._low_water = limit\n self._high_water = limit * 2\n if loop is None:\n loop = asyncio.get_event_loop()\n self._loop = loop\n self._size = 0\n self._cursor = 0\n self._http_chunk_splits = None # type: Optional[List[int]]\n self._buffer = collections.deque() # type: Deque[bytes]\n self._buffer_offset = 0\n self._eof = False\n self._waiter = None # type: Optional[asyncio.Future[bool]]\n self._eof_waiter = None # type: Optional[asyncio.Future[bool]]\n self._exception = None # type: Optional[BaseException]\n self._timer = timer\n self._eof_callbacks = [] # type: List[Callable[[], None]]\n\n def __repr__(self) -> str:\n info = [self.__class__.__name__]\n if self._size:\n info.append('%d bytes' % self._size)\n if self._eof:\n info.append('eof')\n if self._low_water != DEFAULT_LIMIT:\n info.append('low=%d high=%d' % (self._low_water, self._high_water))\n if self._waiter:\n info.append('w=%r' % self._waiter)\n if self._exception:\n info.append('e=%r' % self._exception)\n return '<%s>' % ' '.join(info)\n\n def exception(self) -> Optional[BaseException]:\n return self._exception\n\n def set_exception(self, exc: BaseException) -> None:\n self._exception = exc\n self._eof_callbacks.clear()\n\n waiter = self._waiter\n if waiter is not None:\n self._waiter = None\n set_exception(waiter, exc)\n\n waiter = self._eof_waiter\n if waiter is not None:\n set_exception(waiter, exc)\n self._eof_waiter = None\n\n def on_eof(self, callback: Callable[[], None]) -> None:\n if self._eof:\n try:\n callback()\n except Exception:\n internal_logger.exception('Exception in eof callback')\n else:\n self._eof_callbacks.append(callback)\n\n def feed_eof(self) -> None:\n self._eof = True\n\n waiter = self._waiter\n if waiter is not None:\n self._waiter = None\n set_result(waiter, True)\n\n waiter = self._eof_waiter\n if waiter is not None:\n self._eof_waiter = None\n set_result(waiter, True)\n\n for cb in self._eof_callbacks:\n try:\n cb()\n except Exception:\n internal_logger.exception('Exception in eof callback')\n\n self._eof_callbacks.clear()\n\n def is_eof(self) -> bool:\n \"\"\"Return True if 'feed_eof' was called.\"\"\"\n return self._eof\n\n def at_eof(self) -> bool:\n \"\"\"Return True if the buffer is empty and 'feed_eof' was called.\"\"\"\n return self._eof and not self._buffer\n\n async def wait_eof(self) -> None:\n if self._eof:\n return\n\n assert self._eof_waiter is None\n self._eof_waiter = self._loop.create_future()\n try:\n await self._eof_waiter\n finally:\n self._eof_waiter = None\n\n def unread_data(self, data: bytes) -> None:\n \"\"\" rollback reading some data from stream, inserting it to buffer head.\n \"\"\"\n if not data:\n return\n\n if self._buffer_offset:\n self._buffer[0] = self._buffer[0][self._buffer_offset:]\n self._buffer_offset = 0\n self._size += len(data)\n self._cursor -= len(data)\n self._buffer.appendleft(data)\n self._eof_counter = 0\n\n # TODO: size is ignored, remove the param later\n def feed_data(self, data: bytes, size: int=0) -> None:\n assert not self._eof, 'feed_data after feed_eof'\n\n if not data:\n return\n\n self._size += len(data)\n self._buffer.append(data)\n self.total_bytes += len(data)\n\n waiter = self._waiter\n if waiter is not None:\n self._waiter = None\n set_result(waiter, False)\n\n if (self._size > self._high_water and\n not self._protocol._reading_paused):\n self._protocol.pause_reading()\n\n def begin_http_chunk_receiving(self) -> None:\n if self._http_chunk_splits is None:\n self._http_chunk_splits = []\n\n def end_http_chunk_receiving(self) -> None:\n if self._http_chunk_splits is None:\n raise RuntimeError(\"Called end_chunk_receiving without calling \"\n \"begin_chunk_receiving first\")\n if not self._http_chunk_splits or \\\n self._http_chunk_splits[-1] != self.total_bytes:\n self._http_chunk_splits.append(self.total_bytes)\n\n async def _wait(self, func_name: str) -> None:\n # StreamReader uses a future to link the protocol feed_data() method\n # to a read coroutine. Running two read coroutines at the same time\n # would have an unexpected behaviour. It would not possible to know\n # which coroutine would get the next data.\n if self._waiter is not None:\n raise RuntimeError('%s() called while another coroutine is '\n 'already waiting for incoming data' % func_name)\n\n waiter = self._waiter = self._loop.create_future()\n try:\n if self._timer:\n with self._timer:\n await waiter\n else:\n await waiter\n finally:\n self._waiter = None\n\n async def readline(self) -> bytes:\n if self._exception is not None:\n raise self._exception\n\n line = []\n line_size = 0\n not_enough = True\n\n while not_enough:\n while self._buffer and not_enough:\n offset = self._buffer_offset\n ichar = self._buffer[0].find(b'\\n', offset) + 1\n # Read from current offset to found b'\\n' or to the end.\n data = self._read_nowait_chunk(ichar - offset if ichar else -1)\n line.append(data)\n line_size += len(data)\n if ichar:\n not_enough = False\n\n if line_size > self._high_water:\n raise ValueError('Line is too long')\n\n if self._eof:\n break\n\n if not_enough:\n await self._wait('readline')\n\n return b''.join(line)\n\n async def read(self, n: int=-1) -> bytes:\n if self._exception is not None:\n raise self._exception\n\n # migration problem; with DataQueue you have to catch\n # EofStream exception, so common way is to run payload.read() inside\n # infinite loop. what can cause real infinite loop with StreamReader\n # lets keep this code one major release.\n if __debug__:\n if self._eof and not self._buffer:\n self._eof_counter = getattr(self, '_eof_counter', 0) + 1\n if self._eof_counter > 5:\n internal_logger.warning(\n 'Multiple access to StreamReader in eof state, '\n 'might be infinite loop.', stack_info=True)\n\n if not n:\n return b''\n\n if n < 0:\n # This used to just loop creating a new waiter hoping to\n # collect everything in self._buffer, but that would\n # deadlock if the subprocess sends more than self.limit\n # bytes. So just call self.readany() until EOF.\n blocks = []\n while True:\n block = await self.readany()\n if not block:\n break\n blocks.append(block)\n return b''.join(blocks)\n\n if not self._buffer and not self._eof:\n await self._wait('read')\n\n return self._read_nowait(n)\n\n async def readany(self) -> bytes:\n if self._exception is not None:\n raise self._exception\n\n if not self._buffer and not self._eof:\n await self._wait('readany')\n\n return self._read_nowait(-1)\n\n async def readchunk(self) -> Tuple[bytes, bool]:\n \"\"\"Returns a tuple of (data, end_of_http_chunk). When chunked transfer\n encoding is used, end_of_http_chunk is a boolean indicating if the end\n of the data corresponds to the end of a HTTP chunk , otherwise it is\n always False.\n \"\"\"\n if self._exception is not None:\n raise self._exception\n\n if not self._buffer and not self._eof:\n if (self._http_chunk_splits and\n self._cursor == self._http_chunk_splits[0]):\n # end of http chunk without available data\n self._http_chunk_splits = self._http_chunk_splits[1:]\n return (b\"\", True)\n await self._wait('readchunk')\n\n if not self._buffer:\n # end of file\n return (b\"\", False)\n elif self._http_chunk_splits is not None:\n while self._http_chunk_splits:\n pos = self._http_chunk_splits[0]\n self._http_chunk_splits = self._http_chunk_splits[1:]\n if pos > self._cursor:\n return (self._read_nowait(pos-self._cursor), True)\n return (self._read_nowait(-1), False)\n else:\n return (self._read_nowait_chunk(-1), False)\n\n async def readexactly(self, n: int) -> bytes:\n if self._exception is not None:\n raise self._exception\n\n blocks = [] # type: List[bytes]\n while n > 0:\n block = await self.read(n)\n if not block:\n partial = b''.join(blocks)\n raise asyncio.streams.IncompleteReadError(\n partial, len(partial) + n)\n blocks.append(block)\n n -= len(block)\n\n return b''.join(blocks)\n\n def read_nowait(self, n: int=-1) -> bytes:\n # default was changed to be consistent with .read(-1)\n #\n # I believe the most users don't know about the method and\n # they are not affected.\n if self._exception is not None:\n raise self._exception\n\n if self._waiter and not self._waiter.done():\n raise RuntimeError(\n 'Called while some coroutine is waiting for incoming data.')\n\n return self._read_nowait(n)\n\n def _read_nowait_chunk(self, n: int) -> bytes:\n first_buffer = self._buffer[0]\n offset = self._buffer_offset\n if n != -1 and len(first_buffer) - offset > n:\n data = first_buffer[offset:offset + n]\n self._buffer_offset += n\n\n elif offset:\n self._buffer.popleft()\n data = first_buffer[offset:]\n self._buffer_offset = 0\n\n else:\n data = self._buffer.popleft()\n\n self._size -= len(data)\n self._cursor += len(data)\n\n if self._size < self._low_water and self._protocol._reading_paused:\n self._protocol.resume_reading()\n return data\n\n def _read_nowait(self, n: int) -> bytes:\n chunks = []\n\n while self._buffer:\n chunk = self._read_nowait_chunk(n)\n chunks.append(chunk)\n if n != -1:\n n -= len(chunk)\n if n == 0:\n break\n\n return b''.join(chunks) if chunks else b''\n\n\nclass EmptyStreamReader(AsyncStreamReaderMixin):\n\n def exception(self) -> Optional[BaseException]:\n return None\n\n def set_exception(self, exc: BaseException) -> None:\n pass\n\n def on_eof(self, callback: Callable[[], None]) -> None:\n try:\n callback()\n except Exception:\n internal_logger.exception('Exception in eof callback')\n\n def feed_eof(self) -> None:\n pass\n\n def is_eof(self) -> bool:\n return True\n\n def at_eof(self) -> bool:\n return True\n\n async def wait_eof(self) -> None:\n return\n\n def feed_data(self, data: bytes, n: int=0) -> None:\n pass\n\n async def readline(self) -> bytes:\n return b''\n\n async def read(self, n: int=-1) -> bytes:\n return b''\n\n async def readany(self) -> bytes:\n return b''\n\n async def readchunk(self) -> Tuple[bytes, bool]:\n return (b'', True)\n\n async def readexactly(self, n: int) -> bytes:\n raise asyncio.streams.IncompleteReadError(b'', n)\n\n def read_nowait(self) -> bytes:\n return b''\n\n\nEMPTY_PAYLOAD = EmptyStreamReader()\n\n\nclass DataQueue:\n \"\"\"DataQueue is a general-purpose blocking queue with one reader.\"\"\"\n\n def __init__(self, *, loop: asyncio.AbstractEventLoop) -> None:\n self._loop = loop\n self._eof = False\n self._waiter = None # type: Optional[asyncio.Future[bool]]\n self._exception = None # type: Optional[BaseException]\n self._size = 0\n self._buffer = collections.deque() # type: Deque[Tuple[bytes, int]]\n\n def __len__(self) -> int:\n return len(self._buffer)\n\n def is_eof(self) -> bool:\n return self._eof\n\n def at_eof(self) -> bool:\n return self._eof and not self._buffer\n\n def exception(self) -> Optional[BaseException]:\n return self._exception\n\n def set_exception(self, exc: BaseException) -> None:\n self._eof = True\n self._exception = exc\n\n waiter = self._waiter\n if waiter is not None:\n set_exception(waiter, exc)\n self._waiter = None\n\n def feed_data(self, data: bytes, size: int=0) -> None:\n self._size += size\n self._buffer.append((data, size))\n\n waiter = self._waiter\n if waiter is not None:\n self._waiter = None\n set_result(waiter, True)\n\n def feed_eof(self) -> None:\n self._eof = True\n\n waiter = self._waiter\n if waiter is not None:\n self._waiter = None\n set_result(waiter, False)\n\n async def read(self) -> bytes:\n if not self._buffer and not self._eof:\n assert not self._waiter\n self._waiter = self._loop.create_future()\n try:\n await self._waiter\n except (asyncio.CancelledError, asyncio.TimeoutError):\n self._waiter = None\n raise\n\n if self._buffer:\n data, size = self._buffer.popleft()\n self._size -= size\n return data\n else:\n if self._exception is not None:\n raise self._exception\n else:\n raise EofStream\n\n def __aiter__(self) -> AsyncStreamIterator:\n return AsyncStreamIterator(self.read)\n\n\nclass FlowControlDataQueue(DataQueue):\n \"\"\"FlowControlDataQueue resumes and pauses an underlying stream.\n\n It is a destination for parsed data.\"\"\"\n\n def __init__(self, protocol: BaseProtocol, *,\n limit: int=DEFAULT_LIMIT,\n loop: asyncio.AbstractEventLoop) -> None:\n super().__init__(loop=loop)\n\n self._protocol = protocol\n self._limit = limit * 2\n\n def feed_data(self, data: bytes, size: int=0) -> None:\n super().feed_data(data, size)\n\n if self._size > self._limit and not self._protocol._reading_paused:\n self._protocol.pause_reading()\n\n async def read(self) -> bytes:\n try:\n return await super().read()\n finally:\n if self._size < self._limit and self._protocol._reading_paused:\n self._protocol.resume_reading()\n", "path": "aiohttp/streams.py" } ]
diff --git a/CHANGES/3186.bugfix b/CHANGES/3186.bugfix new file mode 100644 index 00000000000..e434938b3c1 --- /dev/null +++ b/CHANGES/3186.bugfix @@ -0,0 +1 @@ +Return empty bytes with end-of-chunk marker in empty stream reader. diff --git a/aiohttp/streams.py b/aiohttp/streams.py index 20f38f06039..7cdb97abc61 100644 --- a/aiohttp/streams.py +++ b/aiohttp/streams.py @@ -482,7 +482,7 @@ async def readany(self) -> bytes: return b'' async def readchunk(self) -> Tuple[bytes, bool]: - return (b'', False) + return (b'', True) async def readexactly(self, n: int) -> bytes: raise asyncio.streams.IncompleteReadError(b'', n) diff --git a/tests/test_streams.py b/tests/test_streams.py index 66a96a23379..fbb760c80d0 100644 --- a/tests/test_streams.py +++ b/tests/test_streams.py @@ -750,7 +750,7 @@ async def test_empty_stream_reader() -> None: assert await s.read() == b'' assert await s.readline() == b'' assert await s.readany() == b'' - assert await s.readchunk() == (b'', False) + assert await s.readchunk() == (b'', True) with pytest.raises(asyncio.IncompleteReadError): await s.readexactly(10) assert s.read_nowait() == b''
canonical__snapcraft-4329
SNAPCRAFT_BUILD_ENVIRONMENT apparently takes precedence over "--use-lxd" ### Bug Description Despite what is printed here, https://snapcraft.io/docs/build-providers, if I set the env var SNAPCRAFT_BUILD_ENVIRONMENT=host, that appears to take priority over using "--use-lxd" on the command line. ### To Reproduce Set env var SNAPCRAFT_BUILD_ENVIRONMENT=host, then try overriding that with "--use-lxd". ### Environment Ubuntu 22.04 ### snapcraft.yaml ```shell Not relevant. ``` ### Relevant log output ```shell Build is done on host in destructive mode. ``` ### Additional context _No response_
[ { "content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright 2022-2023 Canonical Ltd.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Parts lifecycle preparation and execution.\"\"\"\n\nimport copy\nimport os\nimport shutil\nimport subprocess\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Dict, List, Tuple\n\nimport craft_parts\nfrom craft_cli import emit\nfrom craft_parts import ProjectInfo, Step, StepInfo, callbacks\nfrom craft_providers import Executor\n\nfrom snapcraft import errors, extensions, linters, pack, providers, ua_manager, utils\nfrom snapcraft.elf import Patcher, SonameCache, elf_utils\nfrom snapcraft.elf import errors as elf_errors\nfrom snapcraft.linters import LinterStatus\nfrom snapcraft.meta import manifest, snap_yaml\nfrom snapcraft.projects import (\n Architecture,\n ArchitectureProject,\n GrammarAwareProject,\n Project,\n)\nfrom snapcraft.utils import (\n convert_architecture_deb_to_platform,\n get_host_architecture,\n process_version,\n)\n\nfrom . import grammar, yaml_utils\nfrom .parts import PartsLifecycle, launch_shell\nfrom .project_check import run_project_checks\nfrom .setup_assets import setup_assets\nfrom .update_metadata import update_project_metadata\n\nif TYPE_CHECKING:\n import argparse\n\n\n@dataclass\nclass _SnapProject:\n project_file: Path\n assets_dir: Path = Path(\"snap\")\n\n\n_SNAP_PROJECT_FILES = [\n _SnapProject(project_file=Path(\"snapcraft.yaml\")),\n _SnapProject(project_file=Path(\"snap/snapcraft.yaml\")),\n _SnapProject(\n project_file=Path(\"build-aux/snap/snapcraft.yaml\"),\n assets_dir=Path(\"build-aux/snap\"),\n ),\n _SnapProject(project_file=Path(\".snapcraft.yaml\")),\n]\n\n_CORE_PART_KEYS = [\"build-packages\", \"build-snaps\"]\n_CORE_PART_NAME = \"snapcraft/core\"\n_EXPERIMENTAL_PLUGINS = [\"kernel\"]\n\n\ndef get_snap_project() -> _SnapProject:\n \"\"\"Find the snapcraft.yaml to load.\n\n :raises SnapcraftError: if the project yaml file cannot be found.\n \"\"\"\n for snap_project in _SNAP_PROJECT_FILES:\n if snap_project.project_file.exists():\n return snap_project\n\n raise errors.ProjectMissing()\n\n\ndef apply_yaml(\n yaml_data: Dict[str, Any], build_on: str, build_for: str\n) -> Dict[str, Any]:\n \"\"\"Apply Snapcraft logic to yaml_data.\n\n Extensions are applied and advanced grammar is processed.\n The architectures data is reduced to architectures in the current build plan.\n\n :param yaml_data: The project YAML data.\n :param build_on: Architecture the snap project will be built on.\n :param build_for: Target architecture the snap project will be built to.\n\n :return: A dictionary of yaml data with snapcraft logic applied.\n \"\"\"\n # validate project grammar\n GrammarAwareProject.validate_grammar(yaml_data)\n\n # Special Snapcraft Part\n core_part = {k: yaml_data.pop(k) for k in _CORE_PART_KEYS if k in yaml_data}\n if core_part:\n core_part[\"plugin\"] = \"nil\"\n yaml_data[\"parts\"][_CORE_PART_NAME] = core_part\n\n yaml_data = extensions.apply_extensions(\n yaml_data, arch=build_on, target_arch=build_for\n )\n\n if \"parts\" in yaml_data:\n yaml_data[\"parts\"] = grammar.process_parts(\n parts_yaml_data=yaml_data[\"parts\"], arch=build_on, target_arch=build_for\n )\n\n # replace all architectures with the architectures in the current build plan\n yaml_data[\"architectures\"] = [Architecture(build_on=build_on, build_for=build_for)]\n\n return yaml_data\n\n\ndef process_yaml(project_file: Path) -> Dict[str, Any]:\n \"\"\"Process yaml data from file into a dictionary.\n\n :param project_file: Path to project.\n\n :raises SnapcraftError: if the project yaml file cannot be loaded.\n\n :return: The processed YAML data.\n \"\"\"\n try:\n with open(project_file, encoding=\"utf-8\") as yaml_file:\n yaml_data = yaml_utils.load(yaml_file)\n except OSError as err:\n msg = err.strerror\n if err.filename:\n msg = f\"{msg}: {err.filename!r}.\"\n raise errors.SnapcraftError(msg) from err\n\n return yaml_data\n\n\ndef extract_parse_info(yaml_data: Dict[str, Any]) -> Dict[str, List[str]]:\n \"\"\"Remove parse-info data from parts.\n\n :param yaml_data: The project YAML data.\n\n :return: The extracted parse info for each part.\n \"\"\"\n parse_info: Dict[str, List[str]] = {}\n\n if \"parts\" in yaml_data:\n for name, data in yaml_data[\"parts\"].items():\n if \"parse-info\" in data:\n parse_info[name] = data.pop(\"parse-info\")\n\n return parse_info\n\n\ndef run(command_name: str, parsed_args: \"argparse.Namespace\") -> None:\n \"\"\"Run the parts lifecycle.\n\n :raises SnapcraftError: if the step name is invalid, or the project\n yaml file cannot be loaded.\n :raises LegacyFallback: if the project's base is not core22.\n \"\"\"\n emit.debug(f\"command: {command_name}, arguments: {parsed_args}\")\n\n snap_project = get_snap_project()\n yaml_data = process_yaml(snap_project.project_file)\n start_time = datetime.now()\n\n if parsed_args.provider:\n raise errors.SnapcraftError(\"Option --provider is not supported.\")\n\n if yaml_data.get(\"ua-services\"):\n if not parsed_args.ua_token:\n raise errors.SnapcraftError(\n \"UA services require a UA token to be specified.\"\n )\n\n if not parsed_args.enable_experimental_ua_services:\n raise errors.SnapcraftError(\n \"Using UA services requires --enable-experimental-ua-services.\"\n )\n\n build_plan = get_build_plan(yaml_data, parsed_args)\n\n # Register our own callbacks\n callbacks.register_prologue(_set_global_environment)\n callbacks.register_pre_step(_set_step_environment)\n callbacks.register_post_step(_patch_elf, step_list=[Step.PRIME])\n\n build_count = utils.get_parallel_build_count()\n\n for build_on, build_for in build_plan:\n emit.verbose(f\"Running on {build_on} for {build_for}\")\n yaml_data_for_arch = apply_yaml(yaml_data, build_on, build_for)\n parse_info = extract_parse_info(yaml_data_for_arch)\n _expand_environment(\n yaml_data_for_arch,\n parallel_build_count=build_count,\n target_arch=build_for,\n )\n project = Project.unmarshal(yaml_data_for_arch)\n\n _run_command(\n command_name,\n project=project,\n parse_info=parse_info,\n parallel_build_count=build_count,\n assets_dir=snap_project.assets_dir,\n start_time=start_time,\n parsed_args=parsed_args,\n )\n\n\ndef _run_command(\n command_name: str,\n *,\n project: Project,\n parse_info: Dict[str, List[str]],\n assets_dir: Path,\n start_time: datetime,\n parallel_build_count: int,\n parsed_args: \"argparse.Namespace\",\n) -> None:\n managed_mode = utils.is_managed_mode()\n part_names = getattr(parsed_args, \"parts\", None)\n\n enable_experimental_plugins = getattr(\n parsed_args, \"enable_experimental_plugins\", False\n )\n _check_experimental_plugins(project, enable_experimental_plugins)\n\n if not managed_mode:\n run_project_checks(project, assets_dir=assets_dir)\n\n if command_name == \"snap\":\n emit.progress(\n \"The 'snap' command is deprecated, use 'pack' instead.\",\n permanent=True,\n )\n\n if (\n not managed_mode\n and not parsed_args.destructive_mode\n and not os.getenv(\"SNAPCRAFT_BUILD_ENVIRONMENT\") == \"host\"\n ):\n if command_name == \"clean\" and not part_names:\n _clean_provider(project, parsed_args)\n else:\n _run_in_provider(project, command_name, parsed_args)\n return\n\n if managed_mode:\n work_dir = utils.get_managed_environment_home_path()\n project_dir = utils.get_managed_environment_project_path()\n else:\n work_dir = project_dir = Path.cwd()\n\n step_name = \"prime\" if command_name in (\"pack\", \"snap\", \"try\") else command_name\n\n track_stage_packages = getattr(parsed_args, \"enable_manifest\", False)\n\n lifecycle = PartsLifecycle(\n project.parts,\n work_dir=work_dir,\n assets_dir=assets_dir,\n base=project.get_effective_base(),\n project_base=project.base or \"\",\n confinement=project.confinement,\n package_repositories=project.package_repositories,\n parallel_build_count=parallel_build_count,\n part_names=part_names,\n adopt_info=project.adopt_info,\n project_name=project.name,\n parse_info=parse_info,\n project_vars={\n \"version\": project.version or \"\",\n \"grade\": project.grade or \"\",\n },\n extra_build_snaps=project.get_extra_build_snaps(),\n target_arch=project.get_build_for(),\n track_stage_packages=track_stage_packages,\n )\n\n if command_name == \"clean\":\n lifecycle.clean(part_names=part_names)\n return\n\n try:\n _run_lifecycle_and_pack(\n lifecycle,\n command_name=command_name,\n step_name=step_name,\n project=project,\n project_dir=project_dir,\n assets_dir=assets_dir,\n start_time=start_time,\n parsed_args=parsed_args,\n )\n except PermissionError as err:\n if parsed_args.debug:\n emit.progress(str(err), permanent=True)\n launch_shell()\n raise errors.FilePermissionError(err.filename, reason=err.strerror)\n except OSError as err:\n msg = err.strerror\n if err.filename:\n msg = f\"{err.filename}: {msg}\"\n if parsed_args.debug:\n emit.progress(msg, permanent=True)\n launch_shell()\n raise errors.SnapcraftError(msg) from err\n except Exception as err:\n if parsed_args.debug:\n emit.progress(str(err), permanent=True)\n launch_shell()\n raise errors.SnapcraftError(str(err)) from err\n\n\ndef _run_lifecycle_and_pack(\n lifecycle: PartsLifecycle,\n *,\n command_name: str,\n step_name: str,\n project: Project,\n project_dir: Path,\n assets_dir: Path,\n start_time: datetime,\n parsed_args: \"argparse.Namespace\",\n) -> None:\n \"\"\"Execute the parts lifecycle, generate metadata, and create the snap.\"\"\"\n with ua_manager.ua_manager(parsed_args.ua_token, services=project.ua_services):\n lifecycle.run(\n step_name,\n shell=getattr(parsed_args, \"shell\", False),\n shell_after=getattr(parsed_args, \"shell_after\", False),\n )\n\n # Extract metadata and generate snap.yaml\n part_names = getattr(parsed_args, \"part_names\", None)\n\n if step_name == \"prime\" and not part_names:\n _generate_metadata(\n project=project,\n lifecycle=lifecycle,\n project_dir=project_dir,\n assets_dir=assets_dir,\n start_time=start_time,\n parsed_args=parsed_args,\n )\n\n if command_name in (\"pack\", \"snap\"):\n issues = linters.run_linters(lifecycle.prime_dir, lint=project.lint)\n status = linters.report(issues, intermediate=True)\n\n # In case of linter errors, stop execution and return the error code.\n if status in (LinterStatus.ERRORS, LinterStatus.FATAL):\n raise errors.LinterError(\"Linter errors found\", exit_code=status)\n\n snap_filename = pack.pack_snap(\n lifecycle.prime_dir,\n output=parsed_args.output,\n compression=project.compression,\n name=project.name,\n version=process_version(project.version),\n target_arch=project.get_build_for(),\n )\n emit.message(f\"Created snap package {snap_filename}\")\n\n\ndef _generate_metadata(\n *,\n project: Project,\n lifecycle: PartsLifecycle,\n project_dir: Path,\n assets_dir: Path,\n start_time: datetime,\n parsed_args: \"argparse.Namespace\",\n):\n project_vars = lifecycle.project_vars\n\n emit.progress(\"Extracting and updating metadata...\")\n metadata_list = lifecycle.extract_metadata()\n update_project_metadata(\n project,\n project_vars=project_vars,\n metadata_list=metadata_list,\n assets_dir=assets_dir,\n prime_dir=lifecycle.prime_dir,\n )\n\n emit.progress(\"Copying snap assets...\")\n setup_assets(\n project,\n assets_dir=assets_dir,\n project_dir=project_dir,\n prime_dir=lifecycle.prime_dir,\n )\n\n emit.progress(\"Generating snap metadata...\")\n snap_yaml.write(project, lifecycle.prime_dir, arch=project.get_build_for())\n emit.progress(\"Generated snap metadata\", permanent=True)\n\n if parsed_args.enable_manifest:\n _generate_manifest(\n project,\n lifecycle=lifecycle,\n start_time=start_time,\n parsed_args=parsed_args,\n )\n\n\ndef _generate_manifest(\n project: Project,\n *,\n lifecycle: PartsLifecycle,\n start_time: datetime,\n parsed_args: \"argparse.Namespace\",\n) -> None:\n \"\"\"Create and populate the manifest file.\"\"\"\n emit.progress(\"Generating snap manifest...\")\n image_information = parsed_args.manifest_image_information or \"{}\"\n\n parts = copy.deepcopy(project.parts)\n for name, part in parts.items():\n assets = lifecycle.get_part_pull_assets(part_name=name)\n if assets:\n part[\"stage-packages\"] = assets.get(\"stage-packages\", []) or []\n for key in (\"stage\", \"prime\", \"stage-packages\", \"build-packages\"):\n part.setdefault(key, [])\n\n manifest.write(\n project,\n lifecycle.prime_dir,\n arch=project.get_build_for(),\n parts=parts,\n start_time=start_time,\n image_information=image_information,\n primed_stage_packages=lifecycle.get_primed_stage_packages(),\n )\n emit.progress(\"Generated snap manifest\", permanent=True)\n\n # Also copy the original snapcraft.yaml\n snap_project = get_snap_project()\n shutil.copy(snap_project.project_file, lifecycle.prime_dir / \"snap\")\n\n\ndef _clean_provider(project: Project, parsed_args: \"argparse.Namespace\") -> None:\n \"\"\"Clean the provider environment.\n\n :param project: The project to clean.\n \"\"\"\n emit.progress(\"Cleaning build provider\")\n provider_name = \"lxd\" if parsed_args.use_lxd else None\n provider = providers.get_provider(provider_name)\n instance_name = providers.get_instance_name(\n project_name=project.name,\n project_path=Path().absolute(),\n build_on=project.get_build_on(),\n build_for=project.get_build_for(),\n )\n emit.debug(f\"Cleaning instance {instance_name}\")\n provider.clean_project_environments(instance_name=instance_name)\n emit.progress(\"Cleaned build provider\", permanent=True)\n\n\n# pylint: disable-next=too-many-branches, too-many-statements\ndef _run_in_provider(\n project: Project, command_name: str, parsed_args: \"argparse.Namespace\"\n) -> None:\n \"\"\"Pack image in provider instance.\"\"\"\n emit.debug(\"Checking build provider availability\")\n provider_name = \"lxd\" if parsed_args.use_lxd else None\n provider = providers.get_provider(provider_name)\n providers.ensure_provider_is_available(provider)\n\n cmd = [\"snapcraft\", command_name]\n\n if hasattr(parsed_args, \"parts\"):\n cmd.extend(parsed_args.parts)\n\n if getattr(parsed_args, \"output\", None):\n cmd.extend([\"--output\", parsed_args.output])\n\n mode = emit.get_mode().name.lower()\n cmd.append(f\"--verbosity={mode}\")\n\n if parsed_args.debug:\n cmd.append(\"--debug\")\n if getattr(parsed_args, \"shell\", False):\n cmd.append(\"--shell\")\n if getattr(parsed_args, \"shell_after\", False):\n cmd.append(\"--shell-after\")\n\n if getattr(parsed_args, \"enable_manifest\", False):\n cmd.append(\"--enable-manifest\")\n image_information = getattr(parsed_args, \"manifest_image_information\", None)\n if image_information:\n cmd.extend([\"--manifest-image-information\", image_information])\n\n cmd.append(\"--build-for\")\n cmd.append(project.get_build_for())\n\n ua_token = getattr(parsed_args, \"ua_token\", \"\")\n if ua_token:\n cmd.extend([\"--ua-token\", ua_token])\n\n if getattr(parsed_args, \"enable_experimental_ua_services\", False):\n cmd.append(\"--enable-experimental-ua-services\")\n\n if getattr(parsed_args, \"enable_experimental_plugins\", False):\n cmd.append(\"--enable-experimental-plugins\")\n\n project_path = Path().absolute()\n output_dir = utils.get_managed_environment_project_path()\n\n instance_name = providers.get_instance_name(\n project_name=project.name,\n project_path=project_path,\n build_on=project.get_build_on(),\n build_for=project.get_build_for(),\n )\n\n snapcraft_base = project.get_effective_base()\n build_base = providers.SNAPCRAFT_BASE_TO_PROVIDER_BASE[snapcraft_base]\n\n if snapcraft_base == \"devel\":\n emit.progress(\n \"Running snapcraft with a devel instance is for testing purposes only.\",\n permanent=True,\n )\n allow_unstable = True\n else:\n allow_unstable = False\n\n base_configuration = providers.get_base_configuration(\n alias=build_base,\n instance_name=instance_name,\n http_proxy=parsed_args.http_proxy,\n https_proxy=parsed_args.https_proxy,\n )\n\n emit.progress(\"Launching instance...\")\n with provider.launched_environment(\n project_name=project.name,\n project_path=project_path,\n base_configuration=base_configuration,\n instance_name=instance_name,\n allow_unstable=allow_unstable,\n ) as instance:\n try:\n providers.prepare_instance(\n instance=instance,\n host_project_path=project_path,\n bind_ssh=parsed_args.bind_ssh,\n )\n with emit.pause():\n if command_name == \"try\":\n _expose_prime(project_path, instance)\n # run snapcraft inside the instance\n instance.execute_run(cmd, check=True, cwd=output_dir)\n except subprocess.CalledProcessError as err:\n raise errors.SnapcraftError(\n f\"Failed to execute {command_name} in instance.\",\n details=(\n \"Run the same command again with --debug to shell into \"\n \"the environment if you wish to introspect this failure.\"\n ),\n ) from err\n finally:\n providers.capture_logs_from_instance(instance)\n\n\ndef _expose_prime(project_path: Path, instance: Executor):\n \"\"\"Expose the instance's prime directory in ``project_path`` on the host.\"\"\"\n host_prime = project_path / \"prime\"\n host_prime.mkdir(exist_ok=True)\n\n managed_root = utils.get_managed_environment_home_path()\n dirs = craft_parts.ProjectDirs(work_dir=managed_root)\n\n instance.mount(host_source=project_path / \"prime\", target=dirs.prime_dir)\n\n\ndef _set_global_environment(info: ProjectInfo) -> None:\n \"\"\"Set global environment variables.\"\"\"\n info.global_environment.update(\n {\n \"SNAPCRAFT_ARCH_TRIPLET\": info.arch_triplet,\n \"SNAPCRAFT_TARGET_ARCH\": info.target_arch,\n \"SNAPCRAFT_PARALLEL_BUILD_COUNT\": str(info.parallel_build_count),\n \"SNAPCRAFT_PROJECT_VERSION\": info.get_project_var(\"version\", raw_read=True),\n \"SNAPCRAFT_PROJECT_GRADE\": info.get_project_var(\"grade\", raw_read=True),\n \"SNAPCRAFT_PROJECT_DIR\": str(info.project_dir),\n \"SNAPCRAFT_PROJECT_NAME\": str(info.project_name),\n \"SNAPCRAFT_STAGE\": str(info.stage_dir),\n \"SNAPCRAFT_PRIME\": str(info.prime_dir),\n }\n )\n\n\ndef _check_experimental_plugins(\n project: Project, enable_experimental_plugins: bool\n) -> None:\n \"\"\"Ensure the experimental plugin flag is enabled to use unstable plugins.\"\"\"\n for name, part in project.parts.items():\n if not isinstance(part, Dict):\n continue\n\n plugin = part.get(\"plugin\", \"\")\n if plugin not in _EXPERIMENTAL_PLUGINS:\n continue\n\n if enable_experimental_plugins:\n emit.progress(f\"*EXPERIMENTAL* plugin '{name}' enabled\", permanent=True)\n continue\n\n raise errors.SnapcraftError(\n f\"Plugin '{plugin}' in part '{name}' is unstable and may change in the future.\",\n resolution=\"Rerun with --enable-experimental-plugins to use this plugin.\",\n )\n\n\ndef _set_step_environment(step_info: StepInfo) -> bool:\n \"\"\"Set the step environment before executing each lifecycle step.\"\"\"\n step_info.step_environment.update(\n {\n \"SNAPCRAFT_PART_SRC\": str(step_info.part_src_dir),\n \"SNAPCRAFT_PART_SRC_WORK\": str(step_info.part_src_subdir),\n \"SNAPCRAFT_PART_BUILD\": str(step_info.part_build_dir),\n \"SNAPCRAFT_PART_BUILD_WORK\": str(step_info.part_build_subdir),\n \"SNAPCRAFT_PART_INSTALL\": str(step_info.part_install_dir),\n }\n )\n return True\n\n\ndef _patch_elf(step_info: StepInfo) -> bool:\n \"\"\"Patch rpath and interpreter in ELF files for classic mode.\"\"\"\n if \"enable-patchelf\" not in step_info.build_attributes:\n emit.debug(f\"patch_elf: not enabled for part {step_info.part_name!r}\")\n return True\n\n if not step_info.state:\n emit.debug(\"patch_elf: no state information\")\n return True\n\n try:\n # If libc is staged we'll find a dynamic linker in the payload. At\n # runtime the linker will be in the installed snap path.\n linker = elf_utils.get_dynamic_linker(\n root_path=step_info.prime_dir,\n snap_path=Path(f\"/snap/{step_info.project_name}/current\"),\n )\n except elf_errors.DynamicLinkerNotFound:\n # Otherwise look for the host linker, which should match the base\n # system linker. At runtime use the linker from the installed base\n # snap.\n linker = elf_utils.get_dynamic_linker(\n root_path=Path(\"/\"), snap_path=Path(f\"/snap/{step_info.base}/current\")\n )\n\n migrated_files = step_info.state.files\n patcher = Patcher(dynamic_linker=linker, root_path=step_info.prime_dir)\n elf_files = elf_utils.get_elf_files_from_list(step_info.prime_dir, migrated_files)\n soname_cache = SonameCache()\n arch_triplet = elf_utils.get_arch_triplet()\n\n for elf_file in elf_files:\n elf_file.load_dependencies(\n root_path=step_info.prime_dir,\n base_path=Path(f\"/snap/{step_info.base}/current\"),\n content_dirs=[], # classic snaps don't use content providers\n arch_triplet=arch_triplet,\n soname_cache=soname_cache,\n )\n\n relative_path = elf_file.path.relative_to(step_info.prime_dir)\n emit.progress(f\"Patch ELF file: {str(relative_path)!r}\")\n patcher.patch(elf_file=elf_file)\n\n return True\n\n\ndef _expand_environment(\n snapcraft_yaml: Dict[str, Any], *, parallel_build_count: int, target_arch: str\n) -> None:\n \"\"\"Expand global variables in the provided dictionary values.\n\n :param snapcraft_yaml: A dictionary containing the contents of the\n snapcraft.yaml project file.\n \"\"\"\n if utils.is_managed_mode():\n work_dir = utils.get_managed_environment_home_path()\n else:\n work_dir = Path.cwd()\n\n project_vars = {\n \"version\": snapcraft_yaml.get(\"version\", \"\"),\n \"grade\": snapcraft_yaml.get(\"grade\", \"\"),\n }\n\n if target_arch == \"all\":\n target_arch = get_host_architecture()\n\n dirs = craft_parts.ProjectDirs(work_dir=work_dir)\n info = craft_parts.ProjectInfo(\n application_name=\"snapcraft\", # not used in environment expansion\n cache_dir=Path(), # not used in environment expansion\n arch=convert_architecture_deb_to_platform(target_arch),\n parallel_build_count=parallel_build_count,\n project_name=snapcraft_yaml.get(\"name\", \"\"),\n project_dirs=dirs,\n project_vars=project_vars,\n )\n _set_global_environment(info)\n\n craft_parts.expand_environment(snapcraft_yaml, info=info, skip=[\"name\", \"version\"])\n\n\ndef get_build_plan(\n yaml_data: Dict[str, Any], parsed_args: \"argparse.Namespace\"\n) -> List[Tuple[str, str]]:\n \"\"\"Get a list of all build_on->build_for architectures from the project file.\n\n Additionally, check for the command line argument `--build-for <architecture>`\n When defined, the build plan will only contain builds where `build-for`\n matches `SNAPCRAFT_BUILD_FOR`.\n Note: `--build-for` defaults to the environmental variable `SNAPCRAFT_BUILD_FOR`.\n\n :param yaml_data: The project YAML data.\n :param parsed_args: snapcraft's argument namespace\n\n :return: List of tuples of every valid build-on->build-for combination.\n \"\"\"\n archs = ArchitectureProject.unmarshal(yaml_data).architectures\n\n host_arch = get_host_architecture()\n build_plan: List[Tuple[str, str]] = []\n\n # `isinstance()` calls are for mypy type checking and should not change logic\n for arch in [arch for arch in archs if isinstance(arch, Architecture)]:\n for build_on in arch.build_on:\n if build_on in host_arch and isinstance(arch.build_for, list):\n build_plan.append((host_arch, arch.build_for[0]))\n else:\n emit.verbose(\n f\"Skipping build-on: {build_on} build-for: {arch.build_for}\"\n f\" because build-on doesn't match host arch: {host_arch}\"\n )\n\n # filter out builds not matching argument `--build_for` or env `SNAPCRAFT_BUILD_FOR`\n build_for_arg = parsed_args.build_for\n if build_for_arg is not None:\n build_plan = [build for build in build_plan if build[1] == build_for_arg]\n\n if len(build_plan) == 0:\n emit.message(\n \"Could not make build plan:\"\n \" build-on architectures in snapcraft.yaml\"\n f\" does not match host architecture ({host_arch}).\"\n )\n else:\n log_output = \"Created build plan:\"\n for build in build_plan:\n log_output += f\"\\n build-on: {build[0]} build-for: {build[1]}\"\n emit.trace(log_output)\n\n return build_plan\n", "path": "snapcraft/parts/lifecycle.py" } ]
[ { "content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright 2022-2023 Canonical Ltd.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Parts lifecycle preparation and execution.\"\"\"\n\nimport copy\nimport os\nimport shutil\nimport subprocess\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Dict, List, Tuple\n\nimport craft_parts\nfrom craft_cli import emit\nfrom craft_parts import ProjectInfo, Step, StepInfo, callbacks\nfrom craft_providers import Executor\n\nfrom snapcraft import errors, extensions, linters, pack, providers, ua_manager, utils\nfrom snapcraft.elf import Patcher, SonameCache, elf_utils\nfrom snapcraft.elf import errors as elf_errors\nfrom snapcraft.linters import LinterStatus\nfrom snapcraft.meta import manifest, snap_yaml\nfrom snapcraft.projects import (\n Architecture,\n ArchitectureProject,\n GrammarAwareProject,\n Project,\n)\nfrom snapcraft.utils import (\n convert_architecture_deb_to_platform,\n get_host_architecture,\n process_version,\n)\n\nfrom . import grammar, yaml_utils\nfrom .parts import PartsLifecycle, launch_shell\nfrom .project_check import run_project_checks\nfrom .setup_assets import setup_assets\nfrom .update_metadata import update_project_metadata\n\nif TYPE_CHECKING:\n import argparse\n\n\n@dataclass\nclass _SnapProject:\n project_file: Path\n assets_dir: Path = Path(\"snap\")\n\n\n_SNAP_PROJECT_FILES = [\n _SnapProject(project_file=Path(\"snapcraft.yaml\")),\n _SnapProject(project_file=Path(\"snap/snapcraft.yaml\")),\n _SnapProject(\n project_file=Path(\"build-aux/snap/snapcraft.yaml\"),\n assets_dir=Path(\"build-aux/snap\"),\n ),\n _SnapProject(project_file=Path(\".snapcraft.yaml\")),\n]\n\n_CORE_PART_KEYS = [\"build-packages\", \"build-snaps\"]\n_CORE_PART_NAME = \"snapcraft/core\"\n_EXPERIMENTAL_PLUGINS = [\"kernel\"]\n\n\ndef get_snap_project() -> _SnapProject:\n \"\"\"Find the snapcraft.yaml to load.\n\n :raises SnapcraftError: if the project yaml file cannot be found.\n \"\"\"\n for snap_project in _SNAP_PROJECT_FILES:\n if snap_project.project_file.exists():\n return snap_project\n\n raise errors.ProjectMissing()\n\n\ndef apply_yaml(\n yaml_data: Dict[str, Any], build_on: str, build_for: str\n) -> Dict[str, Any]:\n \"\"\"Apply Snapcraft logic to yaml_data.\n\n Extensions are applied and advanced grammar is processed.\n The architectures data is reduced to architectures in the current build plan.\n\n :param yaml_data: The project YAML data.\n :param build_on: Architecture the snap project will be built on.\n :param build_for: Target architecture the snap project will be built to.\n\n :return: A dictionary of yaml data with snapcraft logic applied.\n \"\"\"\n # validate project grammar\n GrammarAwareProject.validate_grammar(yaml_data)\n\n # Special Snapcraft Part\n core_part = {k: yaml_data.pop(k) for k in _CORE_PART_KEYS if k in yaml_data}\n if core_part:\n core_part[\"plugin\"] = \"nil\"\n yaml_data[\"parts\"][_CORE_PART_NAME] = core_part\n\n yaml_data = extensions.apply_extensions(\n yaml_data, arch=build_on, target_arch=build_for\n )\n\n if \"parts\" in yaml_data:\n yaml_data[\"parts\"] = grammar.process_parts(\n parts_yaml_data=yaml_data[\"parts\"], arch=build_on, target_arch=build_for\n )\n\n # replace all architectures with the architectures in the current build plan\n yaml_data[\"architectures\"] = [Architecture(build_on=build_on, build_for=build_for)]\n\n return yaml_data\n\n\ndef process_yaml(project_file: Path) -> Dict[str, Any]:\n \"\"\"Process yaml data from file into a dictionary.\n\n :param project_file: Path to project.\n\n :raises SnapcraftError: if the project yaml file cannot be loaded.\n\n :return: The processed YAML data.\n \"\"\"\n try:\n with open(project_file, encoding=\"utf-8\") as yaml_file:\n yaml_data = yaml_utils.load(yaml_file)\n except OSError as err:\n msg = err.strerror\n if err.filename:\n msg = f\"{msg}: {err.filename!r}.\"\n raise errors.SnapcraftError(msg) from err\n\n return yaml_data\n\n\ndef extract_parse_info(yaml_data: Dict[str, Any]) -> Dict[str, List[str]]:\n \"\"\"Remove parse-info data from parts.\n\n :param yaml_data: The project YAML data.\n\n :return: The extracted parse info for each part.\n \"\"\"\n parse_info: Dict[str, List[str]] = {}\n\n if \"parts\" in yaml_data:\n for name, data in yaml_data[\"parts\"].items():\n if \"parse-info\" in data:\n parse_info[name] = data.pop(\"parse-info\")\n\n return parse_info\n\n\ndef run(command_name: str, parsed_args: \"argparse.Namespace\") -> None:\n \"\"\"Run the parts lifecycle.\n\n :raises SnapcraftError: if the step name is invalid, or the project\n yaml file cannot be loaded.\n :raises LegacyFallback: if the project's base is not core22.\n \"\"\"\n emit.debug(f\"command: {command_name}, arguments: {parsed_args}\")\n\n snap_project = get_snap_project()\n yaml_data = process_yaml(snap_project.project_file)\n start_time = datetime.now()\n\n if parsed_args.provider:\n raise errors.SnapcraftError(\"Option --provider is not supported.\")\n\n if yaml_data.get(\"ua-services\"):\n if not parsed_args.ua_token:\n raise errors.SnapcraftError(\n \"UA services require a UA token to be specified.\"\n )\n\n if not parsed_args.enable_experimental_ua_services:\n raise errors.SnapcraftError(\n \"Using UA services requires --enable-experimental-ua-services.\"\n )\n\n build_plan = get_build_plan(yaml_data, parsed_args)\n\n # Register our own callbacks\n callbacks.register_prologue(_set_global_environment)\n callbacks.register_pre_step(_set_step_environment)\n callbacks.register_post_step(_patch_elf, step_list=[Step.PRIME])\n\n build_count = utils.get_parallel_build_count()\n\n for build_on, build_for in build_plan:\n emit.verbose(f\"Running on {build_on} for {build_for}\")\n yaml_data_for_arch = apply_yaml(yaml_data, build_on, build_for)\n parse_info = extract_parse_info(yaml_data_for_arch)\n _expand_environment(\n yaml_data_for_arch,\n parallel_build_count=build_count,\n target_arch=build_for,\n )\n project = Project.unmarshal(yaml_data_for_arch)\n\n _run_command(\n command_name,\n project=project,\n parse_info=parse_info,\n parallel_build_count=build_count,\n assets_dir=snap_project.assets_dir,\n start_time=start_time,\n parsed_args=parsed_args,\n )\n\n\ndef _run_command(\n command_name: str,\n *,\n project: Project,\n parse_info: Dict[str, List[str]],\n assets_dir: Path,\n start_time: datetime,\n parallel_build_count: int,\n parsed_args: \"argparse.Namespace\",\n) -> None:\n managed_mode = utils.is_managed_mode()\n part_names = getattr(parsed_args, \"parts\", None)\n\n enable_experimental_plugins = getattr(\n parsed_args, \"enable_experimental_plugins\", False\n )\n _check_experimental_plugins(project, enable_experimental_plugins)\n\n if not managed_mode:\n run_project_checks(project, assets_dir=assets_dir)\n\n if command_name == \"snap\":\n emit.progress(\n \"The 'snap' command is deprecated, use 'pack' instead.\",\n permanent=True,\n )\n\n if parsed_args.use_lxd or (\n not managed_mode\n and not parsed_args.destructive_mode\n and not os.getenv(\"SNAPCRAFT_BUILD_ENVIRONMENT\") == \"host\"\n ):\n if command_name == \"clean\" and not part_names:\n _clean_provider(project, parsed_args)\n else:\n _run_in_provider(project, command_name, parsed_args)\n return\n\n if managed_mode:\n work_dir = utils.get_managed_environment_home_path()\n project_dir = utils.get_managed_environment_project_path()\n else:\n work_dir = project_dir = Path.cwd()\n\n step_name = \"prime\" if command_name in (\"pack\", \"snap\", \"try\") else command_name\n\n track_stage_packages = getattr(parsed_args, \"enable_manifest\", False)\n\n lifecycle = PartsLifecycle(\n project.parts,\n work_dir=work_dir,\n assets_dir=assets_dir,\n base=project.get_effective_base(),\n project_base=project.base or \"\",\n confinement=project.confinement,\n package_repositories=project.package_repositories,\n parallel_build_count=parallel_build_count,\n part_names=part_names,\n adopt_info=project.adopt_info,\n project_name=project.name,\n parse_info=parse_info,\n project_vars={\n \"version\": project.version or \"\",\n \"grade\": project.grade or \"\",\n },\n extra_build_snaps=project.get_extra_build_snaps(),\n target_arch=project.get_build_for(),\n track_stage_packages=track_stage_packages,\n )\n\n if command_name == \"clean\":\n lifecycle.clean(part_names=part_names)\n return\n\n try:\n _run_lifecycle_and_pack(\n lifecycle,\n command_name=command_name,\n step_name=step_name,\n project=project,\n project_dir=project_dir,\n assets_dir=assets_dir,\n start_time=start_time,\n parsed_args=parsed_args,\n )\n except PermissionError as err:\n if parsed_args.debug:\n emit.progress(str(err), permanent=True)\n launch_shell()\n raise errors.FilePermissionError(err.filename, reason=err.strerror)\n except OSError as err:\n msg = err.strerror\n if err.filename:\n msg = f\"{err.filename}: {msg}\"\n if parsed_args.debug:\n emit.progress(msg, permanent=True)\n launch_shell()\n raise errors.SnapcraftError(msg) from err\n except Exception as err:\n if parsed_args.debug:\n emit.progress(str(err), permanent=True)\n launch_shell()\n raise errors.SnapcraftError(str(err)) from err\n\n\ndef _run_lifecycle_and_pack(\n lifecycle: PartsLifecycle,\n *,\n command_name: str,\n step_name: str,\n project: Project,\n project_dir: Path,\n assets_dir: Path,\n start_time: datetime,\n parsed_args: \"argparse.Namespace\",\n) -> None:\n \"\"\"Execute the parts lifecycle, generate metadata, and create the snap.\"\"\"\n with ua_manager.ua_manager(parsed_args.ua_token, services=project.ua_services):\n lifecycle.run(\n step_name,\n shell=getattr(parsed_args, \"shell\", False),\n shell_after=getattr(parsed_args, \"shell_after\", False),\n )\n\n # Extract metadata and generate snap.yaml\n part_names = getattr(parsed_args, \"part_names\", None)\n\n if step_name == \"prime\" and not part_names:\n _generate_metadata(\n project=project,\n lifecycle=lifecycle,\n project_dir=project_dir,\n assets_dir=assets_dir,\n start_time=start_time,\n parsed_args=parsed_args,\n )\n\n if command_name in (\"pack\", \"snap\"):\n issues = linters.run_linters(lifecycle.prime_dir, lint=project.lint)\n status = linters.report(issues, intermediate=True)\n\n # In case of linter errors, stop execution and return the error code.\n if status in (LinterStatus.ERRORS, LinterStatus.FATAL):\n raise errors.LinterError(\"Linter errors found\", exit_code=status)\n\n snap_filename = pack.pack_snap(\n lifecycle.prime_dir,\n output=parsed_args.output,\n compression=project.compression,\n name=project.name,\n version=process_version(project.version),\n target_arch=project.get_build_for(),\n )\n emit.message(f\"Created snap package {snap_filename}\")\n\n\ndef _generate_metadata(\n *,\n project: Project,\n lifecycle: PartsLifecycle,\n project_dir: Path,\n assets_dir: Path,\n start_time: datetime,\n parsed_args: \"argparse.Namespace\",\n):\n project_vars = lifecycle.project_vars\n\n emit.progress(\"Extracting and updating metadata...\")\n metadata_list = lifecycle.extract_metadata()\n update_project_metadata(\n project,\n project_vars=project_vars,\n metadata_list=metadata_list,\n assets_dir=assets_dir,\n prime_dir=lifecycle.prime_dir,\n )\n\n emit.progress(\"Copying snap assets...\")\n setup_assets(\n project,\n assets_dir=assets_dir,\n project_dir=project_dir,\n prime_dir=lifecycle.prime_dir,\n )\n\n emit.progress(\"Generating snap metadata...\")\n snap_yaml.write(project, lifecycle.prime_dir, arch=project.get_build_for())\n emit.progress(\"Generated snap metadata\", permanent=True)\n\n if parsed_args.enable_manifest:\n _generate_manifest(\n project,\n lifecycle=lifecycle,\n start_time=start_time,\n parsed_args=parsed_args,\n )\n\n\ndef _generate_manifest(\n project: Project,\n *,\n lifecycle: PartsLifecycle,\n start_time: datetime,\n parsed_args: \"argparse.Namespace\",\n) -> None:\n \"\"\"Create and populate the manifest file.\"\"\"\n emit.progress(\"Generating snap manifest...\")\n image_information = parsed_args.manifest_image_information or \"{}\"\n\n parts = copy.deepcopy(project.parts)\n for name, part in parts.items():\n assets = lifecycle.get_part_pull_assets(part_name=name)\n if assets:\n part[\"stage-packages\"] = assets.get(\"stage-packages\", []) or []\n for key in (\"stage\", \"prime\", \"stage-packages\", \"build-packages\"):\n part.setdefault(key, [])\n\n manifest.write(\n project,\n lifecycle.prime_dir,\n arch=project.get_build_for(),\n parts=parts,\n start_time=start_time,\n image_information=image_information,\n primed_stage_packages=lifecycle.get_primed_stage_packages(),\n )\n emit.progress(\"Generated snap manifest\", permanent=True)\n\n # Also copy the original snapcraft.yaml\n snap_project = get_snap_project()\n shutil.copy(snap_project.project_file, lifecycle.prime_dir / \"snap\")\n\n\ndef _clean_provider(project: Project, parsed_args: \"argparse.Namespace\") -> None:\n \"\"\"Clean the provider environment.\n\n :param project: The project to clean.\n \"\"\"\n emit.progress(\"Cleaning build provider\")\n provider_name = \"lxd\" if parsed_args.use_lxd else None\n provider = providers.get_provider(provider_name)\n instance_name = providers.get_instance_name(\n project_name=project.name,\n project_path=Path().absolute(),\n build_on=project.get_build_on(),\n build_for=project.get_build_for(),\n )\n emit.debug(f\"Cleaning instance {instance_name}\")\n provider.clean_project_environments(instance_name=instance_name)\n emit.progress(\"Cleaned build provider\", permanent=True)\n\n\n# pylint: disable-next=too-many-branches, too-many-statements\ndef _run_in_provider(\n project: Project, command_name: str, parsed_args: \"argparse.Namespace\"\n) -> None:\n \"\"\"Pack image in provider instance.\"\"\"\n emit.debug(\"Checking build provider availability\")\n provider_name = \"lxd\" if parsed_args.use_lxd else None\n provider = providers.get_provider(provider_name)\n providers.ensure_provider_is_available(provider)\n\n cmd = [\"snapcraft\", command_name]\n\n if hasattr(parsed_args, \"parts\"):\n cmd.extend(parsed_args.parts)\n\n if getattr(parsed_args, \"output\", None):\n cmd.extend([\"--output\", parsed_args.output])\n\n mode = emit.get_mode().name.lower()\n cmd.append(f\"--verbosity={mode}\")\n\n if parsed_args.debug:\n cmd.append(\"--debug\")\n if getattr(parsed_args, \"shell\", False):\n cmd.append(\"--shell\")\n if getattr(parsed_args, \"shell_after\", False):\n cmd.append(\"--shell-after\")\n\n if getattr(parsed_args, \"enable_manifest\", False):\n cmd.append(\"--enable-manifest\")\n image_information = getattr(parsed_args, \"manifest_image_information\", None)\n if image_information:\n cmd.extend([\"--manifest-image-information\", image_information])\n\n cmd.append(\"--build-for\")\n cmd.append(project.get_build_for())\n\n ua_token = getattr(parsed_args, \"ua_token\", \"\")\n if ua_token:\n cmd.extend([\"--ua-token\", ua_token])\n\n if getattr(parsed_args, \"enable_experimental_ua_services\", False):\n cmd.append(\"--enable-experimental-ua-services\")\n\n if getattr(parsed_args, \"enable_experimental_plugins\", False):\n cmd.append(\"--enable-experimental-plugins\")\n\n project_path = Path().absolute()\n output_dir = utils.get_managed_environment_project_path()\n\n instance_name = providers.get_instance_name(\n project_name=project.name,\n project_path=project_path,\n build_on=project.get_build_on(),\n build_for=project.get_build_for(),\n )\n\n snapcraft_base = project.get_effective_base()\n build_base = providers.SNAPCRAFT_BASE_TO_PROVIDER_BASE[snapcraft_base]\n\n if snapcraft_base == \"devel\":\n emit.progress(\n \"Running snapcraft with a devel instance is for testing purposes only.\",\n permanent=True,\n )\n allow_unstable = True\n else:\n allow_unstable = False\n\n base_configuration = providers.get_base_configuration(\n alias=build_base,\n instance_name=instance_name,\n http_proxy=parsed_args.http_proxy,\n https_proxy=parsed_args.https_proxy,\n )\n\n emit.progress(\"Launching instance...\")\n with provider.launched_environment(\n project_name=project.name,\n project_path=project_path,\n base_configuration=base_configuration,\n instance_name=instance_name,\n allow_unstable=allow_unstable,\n ) as instance:\n try:\n providers.prepare_instance(\n instance=instance,\n host_project_path=project_path,\n bind_ssh=parsed_args.bind_ssh,\n )\n with emit.pause():\n if command_name == \"try\":\n _expose_prime(project_path, instance)\n # run snapcraft inside the instance\n instance.execute_run(cmd, check=True, cwd=output_dir)\n except subprocess.CalledProcessError as err:\n raise errors.SnapcraftError(\n f\"Failed to execute {command_name} in instance.\",\n details=(\n \"Run the same command again with --debug to shell into \"\n \"the environment if you wish to introspect this failure.\"\n ),\n ) from err\n finally:\n providers.capture_logs_from_instance(instance)\n\n\ndef _expose_prime(project_path: Path, instance: Executor):\n \"\"\"Expose the instance's prime directory in ``project_path`` on the host.\"\"\"\n host_prime = project_path / \"prime\"\n host_prime.mkdir(exist_ok=True)\n\n managed_root = utils.get_managed_environment_home_path()\n dirs = craft_parts.ProjectDirs(work_dir=managed_root)\n\n instance.mount(host_source=project_path / \"prime\", target=dirs.prime_dir)\n\n\ndef _set_global_environment(info: ProjectInfo) -> None:\n \"\"\"Set global environment variables.\"\"\"\n info.global_environment.update(\n {\n \"SNAPCRAFT_ARCH_TRIPLET\": info.arch_triplet,\n \"SNAPCRAFT_TARGET_ARCH\": info.target_arch,\n \"SNAPCRAFT_PARALLEL_BUILD_COUNT\": str(info.parallel_build_count),\n \"SNAPCRAFT_PROJECT_VERSION\": info.get_project_var(\"version\", raw_read=True),\n \"SNAPCRAFT_PROJECT_GRADE\": info.get_project_var(\"grade\", raw_read=True),\n \"SNAPCRAFT_PROJECT_DIR\": str(info.project_dir),\n \"SNAPCRAFT_PROJECT_NAME\": str(info.project_name),\n \"SNAPCRAFT_STAGE\": str(info.stage_dir),\n \"SNAPCRAFT_PRIME\": str(info.prime_dir),\n }\n )\n\n\ndef _check_experimental_plugins(\n project: Project, enable_experimental_plugins: bool\n) -> None:\n \"\"\"Ensure the experimental plugin flag is enabled to use unstable plugins.\"\"\"\n for name, part in project.parts.items():\n if not isinstance(part, Dict):\n continue\n\n plugin = part.get(\"plugin\", \"\")\n if plugin not in _EXPERIMENTAL_PLUGINS:\n continue\n\n if enable_experimental_plugins:\n emit.progress(f\"*EXPERIMENTAL* plugin '{name}' enabled\", permanent=True)\n continue\n\n raise errors.SnapcraftError(\n f\"Plugin '{plugin}' in part '{name}' is unstable and may change in the future.\",\n resolution=\"Rerun with --enable-experimental-plugins to use this plugin.\",\n )\n\n\ndef _set_step_environment(step_info: StepInfo) -> bool:\n \"\"\"Set the step environment before executing each lifecycle step.\"\"\"\n step_info.step_environment.update(\n {\n \"SNAPCRAFT_PART_SRC\": str(step_info.part_src_dir),\n \"SNAPCRAFT_PART_SRC_WORK\": str(step_info.part_src_subdir),\n \"SNAPCRAFT_PART_BUILD\": str(step_info.part_build_dir),\n \"SNAPCRAFT_PART_BUILD_WORK\": str(step_info.part_build_subdir),\n \"SNAPCRAFT_PART_INSTALL\": str(step_info.part_install_dir),\n }\n )\n return True\n\n\ndef _patch_elf(step_info: StepInfo) -> bool:\n \"\"\"Patch rpath and interpreter in ELF files for classic mode.\"\"\"\n if \"enable-patchelf\" not in step_info.build_attributes:\n emit.debug(f\"patch_elf: not enabled for part {step_info.part_name!r}\")\n return True\n\n if not step_info.state:\n emit.debug(\"patch_elf: no state information\")\n return True\n\n try:\n # If libc is staged we'll find a dynamic linker in the payload. At\n # runtime the linker will be in the installed snap path.\n linker = elf_utils.get_dynamic_linker(\n root_path=step_info.prime_dir,\n snap_path=Path(f\"/snap/{step_info.project_name}/current\"),\n )\n except elf_errors.DynamicLinkerNotFound:\n # Otherwise look for the host linker, which should match the base\n # system linker. At runtime use the linker from the installed base\n # snap.\n linker = elf_utils.get_dynamic_linker(\n root_path=Path(\"/\"), snap_path=Path(f\"/snap/{step_info.base}/current\")\n )\n\n migrated_files = step_info.state.files\n patcher = Patcher(dynamic_linker=linker, root_path=step_info.prime_dir)\n elf_files = elf_utils.get_elf_files_from_list(step_info.prime_dir, migrated_files)\n soname_cache = SonameCache()\n arch_triplet = elf_utils.get_arch_triplet()\n\n for elf_file in elf_files:\n elf_file.load_dependencies(\n root_path=step_info.prime_dir,\n base_path=Path(f\"/snap/{step_info.base}/current\"),\n content_dirs=[], # classic snaps don't use content providers\n arch_triplet=arch_triplet,\n soname_cache=soname_cache,\n )\n\n relative_path = elf_file.path.relative_to(step_info.prime_dir)\n emit.progress(f\"Patch ELF file: {str(relative_path)!r}\")\n patcher.patch(elf_file=elf_file)\n\n return True\n\n\ndef _expand_environment(\n snapcraft_yaml: Dict[str, Any], *, parallel_build_count: int, target_arch: str\n) -> None:\n \"\"\"Expand global variables in the provided dictionary values.\n\n :param snapcraft_yaml: A dictionary containing the contents of the\n snapcraft.yaml project file.\n \"\"\"\n if utils.is_managed_mode():\n work_dir = utils.get_managed_environment_home_path()\n else:\n work_dir = Path.cwd()\n\n project_vars = {\n \"version\": snapcraft_yaml.get(\"version\", \"\"),\n \"grade\": snapcraft_yaml.get(\"grade\", \"\"),\n }\n\n if target_arch == \"all\":\n target_arch = get_host_architecture()\n\n dirs = craft_parts.ProjectDirs(work_dir=work_dir)\n info = craft_parts.ProjectInfo(\n application_name=\"snapcraft\", # not used in environment expansion\n cache_dir=Path(), # not used in environment expansion\n arch=convert_architecture_deb_to_platform(target_arch),\n parallel_build_count=parallel_build_count,\n project_name=snapcraft_yaml.get(\"name\", \"\"),\n project_dirs=dirs,\n project_vars=project_vars,\n )\n _set_global_environment(info)\n\n craft_parts.expand_environment(snapcraft_yaml, info=info, skip=[\"name\", \"version\"])\n\n\ndef get_build_plan(\n yaml_data: Dict[str, Any], parsed_args: \"argparse.Namespace\"\n) -> List[Tuple[str, str]]:\n \"\"\"Get a list of all build_on->build_for architectures from the project file.\n\n Additionally, check for the command line argument `--build-for <architecture>`\n When defined, the build plan will only contain builds where `build-for`\n matches `SNAPCRAFT_BUILD_FOR`.\n Note: `--build-for` defaults to the environmental variable `SNAPCRAFT_BUILD_FOR`.\n\n :param yaml_data: The project YAML data.\n :param parsed_args: snapcraft's argument namespace\n\n :return: List of tuples of every valid build-on->build-for combination.\n \"\"\"\n archs = ArchitectureProject.unmarshal(yaml_data).architectures\n\n host_arch = get_host_architecture()\n build_plan: List[Tuple[str, str]] = []\n\n # `isinstance()` calls are for mypy type checking and should not change logic\n for arch in [arch for arch in archs if isinstance(arch, Architecture)]:\n for build_on in arch.build_on:\n if build_on in host_arch and isinstance(arch.build_for, list):\n build_plan.append((host_arch, arch.build_for[0]))\n else:\n emit.verbose(\n f\"Skipping build-on: {build_on} build-for: {arch.build_for}\"\n f\" because build-on doesn't match host arch: {host_arch}\"\n )\n\n # filter out builds not matching argument `--build_for` or env `SNAPCRAFT_BUILD_FOR`\n build_for_arg = parsed_args.build_for\n if build_for_arg is not None:\n build_plan = [build for build in build_plan if build[1] == build_for_arg]\n\n if len(build_plan) == 0:\n emit.message(\n \"Could not make build plan:\"\n \" build-on architectures in snapcraft.yaml\"\n f\" does not match host architecture ({host_arch}).\"\n )\n else:\n log_output = \"Created build plan:\"\n for build in build_plan:\n log_output += f\"\\n build-on: {build[0]} build-for: {build[1]}\"\n emit.trace(log_output)\n\n return build_plan\n", "path": "snapcraft/parts/lifecycle.py" } ]
diff --git a/snapcraft/parts/lifecycle.py b/snapcraft/parts/lifecycle.py index 3b70b876e1..6fb3ceb3c6 100644 --- a/snapcraft/parts/lifecycle.py +++ b/snapcraft/parts/lifecycle.py @@ -251,7 +251,7 @@ def _run_command( permanent=True, ) - if ( + if parsed_args.use_lxd or ( not managed_mode and not parsed_args.destructive_mode and not os.getenv("SNAPCRAFT_BUILD_ENVIRONMENT") == "host" diff --git a/tests/spread/providers/use-lxd/task.yaml b/tests/spread/providers/use-lxd/task.yaml new file mode 100644 index 0000000000..e636b1e922 --- /dev/null +++ b/tests/spread/providers/use-lxd/task.yaml @@ -0,0 +1,29 @@ +summary: Test --use-lxd takes priority over environment variables +systems: + - ubuntu-22.04* + +prepare: | + snapcraft init + +restore: | + rm -rf ./*.snap + +execute: | + export SNAPCRAFT_BUILD_ENVIRONMENT="host" + + snapcraft pull --use-lxd + + if [[ -d parts ]]; then + echo "snapcraft did not run inside a lxd instance" + exit 1 + fi + + unset SNAPCRAFT_BUILD_ENVIRONMENT + export SNAPCRAFT_MANAGED_MODE=1 + + snapcraft pull --use-lxd + + if [[ -d parts ]]; then + echo "snapcraft did not run inside a lxd instance" + exit 1 + fi diff --git a/tests/unit/parts/test_lifecycle.py b/tests/unit/parts/test_lifecycle.py index ed1d7b9c27..e770f2f7c3 100644 --- a/tests/unit/parts/test_lifecycle.py +++ b/tests/unit/parts/test_lifecycle.py @@ -306,11 +306,33 @@ def test_lifecycle_run_command_step( assert run_mock.mock_calls == [call(step, **call_args)] [email protected]("managed_mode", [True, False]) [email protected]("build_env", [None, "host", "multipass", "lxd", "other"]) @pytest.mark.parametrize("cmd", ["pack", "snap"]) -def test_lifecycle_run_command_pack(cmd, snapcraft_yaml, project_vars, new_dir, mocker): +def test_lifecycle_run_local_destructive_mode( + managed_mode, + build_env, + cmd, + snapcraft_yaml, + project_vars, + new_dir, + mocker, + monkeypatch, +): + """Run the lifecycle locally when destructive_mode is True.""" project = Project.unmarshal(snapcraft_yaml(base="core22")) + run_in_provider_mock = mocker.patch("snapcraft.parts.lifecycle._run_in_provider") run_mock = mocker.patch("snapcraft.parts.PartsLifecycle.run") pack_mock = mocker.patch("snapcraft.pack.pack_snap") + mocker.patch("snapcraft.utils.is_managed_mode", return_value=managed_mode) + mocker.patch( + "snapcraft.utils.get_managed_environment_home_path", + return_value=new_dir / "home", + ) + if build_env: + monkeypatch.setenv("SNAPCRAFT_BUILD_ENVIRONMENT", build_env) + else: + monkeypatch.delenv("SNAPCRAFT_BUILD_ENVIRONMENT", raising=False) parts_lifecycle._run_command( cmd, @@ -333,10 +355,11 @@ def test_lifecycle_run_command_pack(cmd, snapcraft_yaml, project_vars, new_dir, ), ) + assert run_in_provider_mock.mock_calls == [] assert run_mock.mock_calls == [call("prime", shell=False, shell_after=False)] assert pack_mock.mock_calls[:1] == [ call( - new_dir / "prime", + new_dir / "home/prime" if managed_mode else new_dir / "prime", output=None, compression="xz", name="mytest", @@ -346,10 +369,20 @@ def test_lifecycle_run_command_pack(cmd, snapcraft_yaml, project_vars, new_dir, ] [email protected]("destructive_mode", [True, False]) [email protected]("build_env", [None, "host", "multipass", "lxd", "other"]) @pytest.mark.parametrize("cmd", ["pack", "snap"]) -def test_lifecycle_pack_destructive_mode( - cmd, snapcraft_yaml, project_vars, new_dir, mocker +def test_lifecycle_run_local_managed_mode( + destructive_mode, + build_env, + cmd, + snapcraft_yaml, + project_vars, + new_dir, + mocker, + monkeypatch, ): + """Run the lifecycle locally when managed_mode is True.""" project = Project.unmarshal(snapcraft_yaml(base="core22")) run_in_provider_mock = mocker.patch("snapcraft.parts.lifecycle._run_in_provider") run_mock = mocker.patch("snapcraft.parts.PartsLifecycle.run") @@ -359,6 +392,10 @@ def test_lifecycle_pack_destructive_mode( "snapcraft.utils.get_managed_environment_home_path", return_value=new_dir / "home", ) + if build_env: + monkeypatch.setenv("SNAPCRAFT_BUILD_ENVIRONMENT", build_env) + else: + monkeypatch.delenv("SNAPCRAFT_BUILD_ENVIRONMENT", raising=False) parts_lifecycle._run_command( cmd, @@ -372,7 +409,7 @@ def test_lifecycle_pack_destructive_mode( output=None, debug=False, enable_manifest=False, - destructive_mode=True, + destructive_mode=destructive_mode, shell=False, shell_after=False, use_lxd=False, @@ -395,17 +432,30 @@ def test_lifecycle_pack_destructive_mode( ] [email protected]("managed_mode", [True, False]) [email protected]("destructive_mode", [True, False]) @pytest.mark.parametrize("cmd", ["pack", "snap"]) -def test_lifecycle_pack_managed(cmd, snapcraft_yaml, project_vars, new_dir, mocker): +def test_lifecycle_run_local_build_env( + managed_mode, + destructive_mode, + cmd, + monkeypatch, + snapcraft_yaml, + project_vars, + new_dir, + mocker, +): + """Run the lifecycle locally when the build environment is 'host'.""" project = Project.unmarshal(snapcraft_yaml(base="core22")) run_in_provider_mock = mocker.patch("snapcraft.parts.lifecycle._run_in_provider") run_mock = mocker.patch("snapcraft.parts.PartsLifecycle.run") pack_mock = mocker.patch("snapcraft.pack.pack_snap") - mocker.patch("snapcraft.utils.is_managed_mode", return_value=True) + mocker.patch("snapcraft.utils.is_managed_mode", return_value=managed_mode) mocker.patch( "snapcraft.utils.get_managed_environment_home_path", return_value=new_dir / "home", ) + monkeypatch.setenv("SNAPCRAFT_BUILD_ENVIRONMENT", "host") parts_lifecycle._run_command( cmd, @@ -422,7 +472,7 @@ def test_lifecycle_pack_managed(cmd, snapcraft_yaml, project_vars, new_dir, mock build_for=None, enable_manifest=False, manifest_image_information=None, - destructive_mode=False, + destructive_mode=destructive_mode, shell=False, shell_after=False, use_lxd=False, @@ -435,7 +485,7 @@ def test_lifecycle_pack_managed(cmd, snapcraft_yaml, project_vars, new_dir, mock assert run_mock.mock_calls == [call("prime", shell=False, shell_after=False)] assert pack_mock.mock_calls[:1] == [ call( - new_dir / "home/prime", + new_dir / "home/prime" if managed_mode else new_dir / "prime", output=None, compression="xz", name="mytest", @@ -445,12 +495,21 @@ def test_lifecycle_pack_managed(cmd, snapcraft_yaml, project_vars, new_dir, mock ] [email protected]("build_env", [None, "lxd", "multipass", "other"]) @pytest.mark.parametrize("cmd", ["pack", "snap"]) -def test_lifecycle_pack_not_managed(cmd, snapcraft_yaml, new_dir, mocker): +def test_lifecycle_run_in_provider_by_default( + build_env, cmd, snapcraft_yaml, new_dir, mocker, monkeypatch +): + """Run lifecycle in a provider when not in managed_mode, not in destructive_mode, + and the build environment is not 'host'.""" project = Project.unmarshal(snapcraft_yaml(base="core22")) run_in_provider_mock = mocker.patch("snapcraft.parts.lifecycle._run_in_provider") run_mock = mocker.patch("snapcraft.parts.PartsLifecycle.run") mocker.patch("snapcraft.utils.is_managed_mode", return_value=False) + if build_env: + monkeypatch.setenv("SNAPCRAFT_BUILD_ENVIRONMENT", build_env) + else: + monkeypatch.delenv("SNAPCRAFT_BUILD_ENVIRONMENT", raising=False) parts_lifecycle._run_command( cmd, @@ -484,6 +543,78 @@ def test_lifecycle_pack_not_managed(cmd, snapcraft_yaml, new_dir, mocker): ] [email protected]("managed_mode", [True, False]) [email protected]("destructive_mode", [True, False]) [email protected]("build_env", [None, "host", "lxd", "multipass", "other"]) [email protected]("cmd", ["pack", "snap"]) +def test_lifecycle_run_in_provider_use_lxd( + managed_mode, + destructive_mode, + build_env, + cmd, + mocker, + monkeypatch, + new_dir, + project_vars, + snapcraft_yaml, +): + """Run the lifecycle in a provider when `use_lxd` is true.""" + project = Project.unmarshal(snapcraft_yaml(base="core22")) + run_in_provider_mock = mocker.patch("snapcraft.parts.lifecycle._run_in_provider") + run_mock = mocker.patch("snapcraft.parts.PartsLifecycle.run") + mocker.patch("snapcraft.pack.pack_snap") + mocker.patch( + "snapcraft.utils.get_managed_environment_home_path", + return_value=new_dir / "home", + ) + mocker.patch("snapcraft.utils.is_managed_mode", return_value=managed_mode) + if build_env: + monkeypatch.setenv("SNAPCRAFT_BUILD_ENVIRONMENT", build_env) + else: + monkeypatch.delenv("SNAPCRAFT_BUILD_ENVIRONMENT", raising=False) + + parts_lifecycle._run_command( + cmd, + project=project, + parse_info={}, + assets_dir=Path(), + start_time=datetime.now(), + parallel_build_count=8, + parsed_args=argparse.Namespace( + directory=None, + output=None, + debug=False, + enable_manifest=False, + destructive_mode=destructive_mode, + shell=False, + shell_after=False, + use_lxd=True, + ua_token=None, + parts=[], + ), + ) + + assert run_mock.mock_calls == [] + assert run_in_provider_mock.mock_calls == [ + call( + project, + cmd, + argparse.Namespace( + directory=None, + output=None, + debug=False, + enable_manifest=False, + destructive_mode=destructive_mode, + shell=False, + shell_after=False, + use_lxd=True, + ua_token=None, + parts=[], + ), + ) + ] + + @pytest.mark.parametrize("cmd", ["pack", "snap"]) def test_lifecycle_pack_metadata_error(cmd, snapcraft_yaml, new_dir, mocker): project = Project.unmarshal(snapcraft_yaml(base="core22")) @@ -1703,6 +1834,7 @@ def test_lifecycle_write_metadata( parsed_args = argparse.Namespace( debug=False, destructive_mode=True, + use_lxd=False, enable_manifest=True, ua_token=None, parts=[],
pypa__pip-1390
pip doesn't detect a venv created virtual environment as a virtual environment The venv integration in Python 3.4 fails if PIP_REQUIREVIRTUALENV is set (http://bugs.python.org/issue19734) I'm currently working around this by forcibly clearing the setting in the test, but the PIP_REQUIREVIRTUALENV check should pass when sys.prefix and sys.base_prefix are different.
[ { "content": "\"\"\"Locations where we look for configs, install stuff, etc\"\"\"\n\nimport sys\nimport site\nimport os\nimport tempfile\nfrom distutils.command.install import install, SCHEME_KEYS\nimport getpass\nfrom pip.backwardcompat import get_python_lib, get_path_uid, user_site\nimport pip.exceptions\n\n\nDELETE_MARKER_MESSAGE = '''\\\nThis file is placed here by pip to indicate the source was put\nhere by pip.\n\nOnce this package is successfully installed this source code will be\ndeleted (unless you remove this file).\n'''\nPIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt'\n\ndef write_delete_marker_file(directory):\n \"\"\"\n Write the pip delete marker file into this directory.\n \"\"\"\n filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME)\n marker_fp = open(filepath, 'w')\n marker_fp.write(DELETE_MARKER_MESSAGE)\n marker_fp.close()\n\n\ndef running_under_virtualenv():\n \"\"\"\n Return True if we're running inside a virtualenv, False otherwise.\n\n \"\"\"\n return hasattr(sys, 'real_prefix')\n\n\ndef virtualenv_no_global():\n \"\"\"\n Return True if in a venv and no system site packages.\n \"\"\"\n #this mirrors the logic in virtualenv.py for locating the no-global-site-packages.txt file\n site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))\n no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt')\n if running_under_virtualenv() and os.path.isfile(no_global_file):\n return True\n\ndef __get_username():\n \"\"\" Returns the effective username of the current process. \"\"\"\n if sys.platform == 'win32':\n return getpass.getuser()\n import pwd\n return pwd.getpwuid(os.geteuid()).pw_name\n\ndef _get_build_prefix():\n \"\"\" Returns a safe build_prefix \"\"\"\n path = os.path.join(tempfile.gettempdir(), 'pip_build_%s' %\n __get_username())\n if sys.platform == 'win32':\n \"\"\" on windows(tested on 7) temp dirs are isolated \"\"\"\n return path\n try:\n os.mkdir(path)\n write_delete_marker_file(path)\n except OSError:\n file_uid = None\n try:\n # raises OSError for symlinks\n # https://github.com/pypa/pip/pull/935#discussion_r5307003\n file_uid = get_path_uid(path)\n except OSError:\n file_uid = None\n\n if file_uid != os.geteuid():\n msg = \"The temporary folder for building (%s) is either not owned by you, or is a symlink.\" \\\n % path\n print (msg)\n print(\"pip will not work until the temporary folder is \" + \\\n \"either deleted or is a real directory owned by your user account.\")\n raise pip.exceptions.InstallationError(msg)\n return path\n\nif running_under_virtualenv():\n build_prefix = os.path.join(sys.prefix, 'build')\n src_prefix = os.path.join(sys.prefix, 'src')\nelse:\n # Note: intentionally NOT using mkdtemp\n # See https://github.com/pypa/pip/issues/906 for plan to move to mkdtemp\n build_prefix = _get_build_prefix()\n\n ## FIXME: keep src in cwd for now (it is not a temporary folder)\n try:\n src_prefix = os.path.join(os.getcwd(), 'src')\n except OSError:\n # In case the current working directory has been renamed or deleted\n sys.exit(\"The folder you are executing pip from can no longer be found.\")\n\n# under Mac OS X + virtualenv sys.prefix is not properly resolved\n# it is something like /path/to/python/bin/..\n# Note: using realpath due to tmp dirs on OSX being symlinks\nbuild_prefix = os.path.abspath(os.path.realpath(build_prefix))\nsrc_prefix = os.path.abspath(src_prefix)\n\n# FIXME doesn't account for venv linked to global site-packages\n\nsite_packages = get_python_lib()\nuser_dir = os.path.expanduser('~')\nif sys.platform == 'win32':\n bin_py = os.path.join(sys.prefix, 'Scripts')\n bin_user = os.path.join(user_site, 'Scripts') if user_site else None\n # buildout uses 'bin' on Windows too?\n if not os.path.exists(bin_py):\n bin_py = os.path.join(sys.prefix, 'bin')\n bin_user = os.path.join(user_site, 'bin') if user_site else None\n default_storage_dir = os.path.join(user_dir, 'pip')\n default_config_file = os.path.join(default_storage_dir, 'pip.ini')\n default_log_file = os.path.join(default_storage_dir, 'pip.log')\nelse:\n bin_py = os.path.join(sys.prefix, 'bin')\n bin_user = os.path.join(user_site, 'bin') if user_site else None\n default_storage_dir = os.path.join(user_dir, '.pip')\n default_config_file = os.path.join(default_storage_dir, 'pip.conf')\n default_log_file = os.path.join(default_storage_dir, 'pip.log')\n\n # Forcing to use /usr/local/bin for standard Mac OS X framework installs\n # Also log to ~/Library/Logs/ for use with the Console.app log viewer\n if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/':\n bin_py = '/usr/local/bin'\n default_log_file = os.path.join(user_dir, 'Library/Logs/pip.log')\n\n\ndef distutils_scheme(dist_name, user=False, home=None, root=None):\n \"\"\"\n Return a distutils install scheme\n \"\"\"\n from distutils.dist import Distribution\n\n scheme = {}\n d = Distribution({'name': dist_name})\n d.parse_config_files()\n i = d.get_command_obj('install', create=True)\n # NOTE: setting user or home has the side-effect of creating the home dir or\n # user base for installations during finalize_options()\n # ideally, we'd prefer a scheme class that has no side-effects.\n i.user = user or i.user\n i.home = home or i.home\n i.root = root or i.root\n i.finalize_options()\n for key in SCHEME_KEYS:\n scheme[key] = getattr(i, 'install_'+key)\n\n if running_under_virtualenv():\n scheme['headers'] = os.path.join(sys.prefix,\n 'include',\n 'site',\n 'python' + sys.version[:3],\n dist_name)\n\n if root is not None:\n scheme[\"headers\"] = os.path.join(\n root,\n os.path.abspath(scheme[\"headers\"])[1:],\n )\n\n return scheme\n", "path": "pip/locations.py" } ]
[ { "content": "\"\"\"Locations where we look for configs, install stuff, etc\"\"\"\n\nimport sys\nimport site\nimport os\nimport tempfile\nfrom distutils.command.install import install, SCHEME_KEYS\nimport getpass\nfrom pip.backwardcompat import get_python_lib, get_path_uid, user_site\nimport pip.exceptions\n\n\nDELETE_MARKER_MESSAGE = '''\\\nThis file is placed here by pip to indicate the source was put\nhere by pip.\n\nOnce this package is successfully installed this source code will be\ndeleted (unless you remove this file).\n'''\nPIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt'\n\ndef write_delete_marker_file(directory):\n \"\"\"\n Write the pip delete marker file into this directory.\n \"\"\"\n filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME)\n marker_fp = open(filepath, 'w')\n marker_fp.write(DELETE_MARKER_MESSAGE)\n marker_fp.close()\n\n\ndef running_under_virtualenv():\n \"\"\"\n Return True if we're running inside a virtualenv, False otherwise.\n\n \"\"\"\n if hasattr(sys, 'real_prefix'):\n return True\n elif sys.prefix != getattr(sys, \"base_prefix\", sys.prefix):\n return True\n\n return False\n\n\ndef virtualenv_no_global():\n \"\"\"\n Return True if in a venv and no system site packages.\n \"\"\"\n #this mirrors the logic in virtualenv.py for locating the no-global-site-packages.txt file\n site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))\n no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt')\n if running_under_virtualenv() and os.path.isfile(no_global_file):\n return True\n\ndef __get_username():\n \"\"\" Returns the effective username of the current process. \"\"\"\n if sys.platform == 'win32':\n return getpass.getuser()\n import pwd\n return pwd.getpwuid(os.geteuid()).pw_name\n\ndef _get_build_prefix():\n \"\"\" Returns a safe build_prefix \"\"\"\n path = os.path.join(tempfile.gettempdir(), 'pip_build_%s' %\n __get_username())\n if sys.platform == 'win32':\n \"\"\" on windows(tested on 7) temp dirs are isolated \"\"\"\n return path\n try:\n os.mkdir(path)\n write_delete_marker_file(path)\n except OSError:\n file_uid = None\n try:\n # raises OSError for symlinks\n # https://github.com/pypa/pip/pull/935#discussion_r5307003\n file_uid = get_path_uid(path)\n except OSError:\n file_uid = None\n\n if file_uid != os.geteuid():\n msg = \"The temporary folder for building (%s) is either not owned by you, or is a symlink.\" \\\n % path\n print (msg)\n print(\"pip will not work until the temporary folder is \" + \\\n \"either deleted or is a real directory owned by your user account.\")\n raise pip.exceptions.InstallationError(msg)\n return path\n\nif running_under_virtualenv():\n build_prefix = os.path.join(sys.prefix, 'build')\n src_prefix = os.path.join(sys.prefix, 'src')\nelse:\n # Note: intentionally NOT using mkdtemp\n # See https://github.com/pypa/pip/issues/906 for plan to move to mkdtemp\n build_prefix = _get_build_prefix()\n\n ## FIXME: keep src in cwd for now (it is not a temporary folder)\n try:\n src_prefix = os.path.join(os.getcwd(), 'src')\n except OSError:\n # In case the current working directory has been renamed or deleted\n sys.exit(\"The folder you are executing pip from can no longer be found.\")\n\n# under Mac OS X + virtualenv sys.prefix is not properly resolved\n# it is something like /path/to/python/bin/..\n# Note: using realpath due to tmp dirs on OSX being symlinks\nbuild_prefix = os.path.abspath(os.path.realpath(build_prefix))\nsrc_prefix = os.path.abspath(src_prefix)\n\n# FIXME doesn't account for venv linked to global site-packages\n\nsite_packages = get_python_lib()\nuser_dir = os.path.expanduser('~')\nif sys.platform == 'win32':\n bin_py = os.path.join(sys.prefix, 'Scripts')\n bin_user = os.path.join(user_site, 'Scripts') if user_site else None\n # buildout uses 'bin' on Windows too?\n if not os.path.exists(bin_py):\n bin_py = os.path.join(sys.prefix, 'bin')\n bin_user = os.path.join(user_site, 'bin') if user_site else None\n default_storage_dir = os.path.join(user_dir, 'pip')\n default_config_file = os.path.join(default_storage_dir, 'pip.ini')\n default_log_file = os.path.join(default_storage_dir, 'pip.log')\nelse:\n bin_py = os.path.join(sys.prefix, 'bin')\n bin_user = os.path.join(user_site, 'bin') if user_site else None\n default_storage_dir = os.path.join(user_dir, '.pip')\n default_config_file = os.path.join(default_storage_dir, 'pip.conf')\n default_log_file = os.path.join(default_storage_dir, 'pip.log')\n\n # Forcing to use /usr/local/bin for standard Mac OS X framework installs\n # Also log to ~/Library/Logs/ for use with the Console.app log viewer\n if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/':\n bin_py = '/usr/local/bin'\n default_log_file = os.path.join(user_dir, 'Library/Logs/pip.log')\n\n\ndef distutils_scheme(dist_name, user=False, home=None, root=None):\n \"\"\"\n Return a distutils install scheme\n \"\"\"\n from distutils.dist import Distribution\n\n scheme = {}\n d = Distribution({'name': dist_name})\n d.parse_config_files()\n i = d.get_command_obj('install', create=True)\n # NOTE: setting user or home has the side-effect of creating the home dir or\n # user base for installations during finalize_options()\n # ideally, we'd prefer a scheme class that has no side-effects.\n i.user = user or i.user\n i.home = home or i.home\n i.root = root or i.root\n i.finalize_options()\n for key in SCHEME_KEYS:\n scheme[key] = getattr(i, 'install_'+key)\n\n if running_under_virtualenv():\n scheme['headers'] = os.path.join(sys.prefix,\n 'include',\n 'site',\n 'python' + sys.version[:3],\n dist_name)\n\n if root is not None:\n scheme[\"headers\"] = os.path.join(\n root,\n os.path.abspath(scheme[\"headers\"])[1:],\n )\n\n return scheme\n", "path": "pip/locations.py" } ]
diff --git a/pip/locations.py b/pip/locations.py index 61699434665..1d402651689 100644 --- a/pip/locations.py +++ b/pip/locations.py @@ -34,7 +34,12 @@ def running_under_virtualenv(): Return True if we're running inside a virtualenv, False otherwise. """ - return hasattr(sys, 'real_prefix') + if hasattr(sys, 'real_prefix'): + return True + elif sys.prefix != getattr(sys, "base_prefix", sys.prefix): + return True + + return False def virtualenv_no_global():
paperless-ngx__paperless-ngx-3554
[BUG] Mail rule action "Move to specified folder" not working ### Description If mail rule action is "Move to specified folder", the mail.log contains: ``` [2023-06-06 23:50:00,229] [DEBUG] [paperless_mail] Processing mail account T-Online (Thorsten) [2023-06-06 23:50:00,342] [DEBUG] [paperless_mail] GMAIL Label Support: False [2023-06-06 23:50:00,343] [DEBUG] [paperless_mail] AUTH=PLAIN Support: False [2023-06-06 23:50:00,389] [DEBUG] [paperless_mail] Account T-Online (Thorsten): Processing 1 rule(s) [2023-06-06 23:50:00,395] [DEBUG] [paperless_mail] Rule T-Online (Thorsten).Paperless Consume Ordner: Selecting folder INBOX.Paperless [2023-06-06 23:50:00,410] [DEBUG] [paperless_mail] Rule T-Online (Thorsten).Paperless Consume Ordner: Searching folder with criteria None [2023-06-06 23:50:00,422] [ERROR] [paperless_mail] Rule T-Online (Thorsten).Paperless Consume Ordner: Error while processing rule: SEARCH command error: BAD [b'Error in IMAP command SEARCH: Unknown argument NONE (0.001 + 0.000 secs).'] Traceback (most recent call last): File "/usr/src/paperless/src/paperless_mail/mail.py", line 495, in handle_mail_account total_processed_files += self._handle_mail_rule( File "/usr/src/paperless/src/paperless_mail/mail.py", line 562, in _handle_mail_rule for message in messages: File "/usr/local/lib/python3.9/site-packages/imap_tools/mailbox.py", line 170, in fetch nums = tuple((reversed if reverse else iter)(self.numbers(criteria, charset)))[limit_range] File "/usr/local/lib/python3.9/site-packages/imap_tools/mailbox.py", line 108, in numbers search_result = self.client.search(charset, encoded_criteria) File "/usr/local/lib/python3.9/imaplib.py", line 732, in search typ, dat = self._simple_command(name, 'CHARSET', charset, *criteria) File "/usr/local/lib/python3.9/imaplib.py", line 1230, in _simple_command return self._command_complete(name, self._command(name, *args)) File "/usr/local/lib/python3.9/imaplib.py", line 1055, in _command_complete raise self.error('%s command error: %s %s' % (name, typ, data)) imaplib.IMAP4.error: SEARCH command error: BAD [b'Error in IMAP command SEARCH: Unknown argument NONE (0.001 + 0.000 secs).'] ``` It seems that this action is the only one not working, since it's the only one with `Searching folder with criteria None` , and this seems to be the root cause for the error. If mail rule action is set to e.g "Flag the mail, don't process flaged mails", then the mail.log looks fine: ``` [2023-06-06 23:40:00,273] [DEBUG] [paperless_mail] Processing mail account T-Online (Thorsten) [2023-06-06 23:40:00,425] [DEBUG] [paperless_mail] GMAIL Label Support: False [2023-06-06 23:40:00,426] [DEBUG] [paperless_mail] AUTH=PLAIN Support: False [2023-06-06 23:40:00,468] [DEBUG] [paperless_mail] Account T-Online (Thorsten): Processing 1 rule(s) [2023-06-06 23:40:00,474] [DEBUG] [paperless_mail] Rule T-Online (Thorsten).Paperless Consume Ordner: Selecting folder INBOX.Paperless [2023-06-06 23:40:00,489] [DEBUG] [paperless_mail] Rule T-Online (Thorsten).Paperless Consume Ordner: Searching folder with criteria (UNKEYWORD PaperlessConsumed) [2023-06-06 23:40:00,502] [DEBUG] [paperless_mail] Rule T-Online (Thorsten).Paperless Consume Ordner: Processed 0 matching mail(s) ``` ### Steps to reproduce 1. Create mail rule, choose action "Move to specified folder" 2. Wait ten minutes 3. Check mail.log ### Webserver logs ```bash See error description ``` ### Browser logs _No response_ ### Paperless-ngx version 1.15.0 ### Host OS Synology ### Installation method Docker - official image ### Browser _No response_ ### Configuration changes _No response_ ### Other _No response_
[ { "content": "import datetime\nimport itertools\nimport logging\nimport os\nimport tempfile\nimport traceback\nfrom datetime import date\nfrom datetime import timedelta\nfrom fnmatch import fnmatch\nfrom typing import Dict\nfrom typing import List\nfrom typing import Union\n\nimport magic\nimport pathvalidate\nfrom celery import chord\nfrom celery import shared_task\nfrom celery.canvas import Signature\nfrom django.conf import settings\nfrom django.db import DatabaseError\nfrom django.utils.timezone import is_naive\nfrom django.utils.timezone import make_aware\nfrom imap_tools import AND\nfrom imap_tools import NOT\nfrom imap_tools import MailBox\nfrom imap_tools import MailboxFolderSelectError\nfrom imap_tools import MailBoxUnencrypted\nfrom imap_tools import MailMessage\nfrom imap_tools import MailMessageFlags\nfrom imap_tools.mailbox import MailBoxTls\nfrom imap_tools.query import LogicOperator\n\nfrom documents.data_models import ConsumableDocument\nfrom documents.data_models import DocumentMetadataOverrides\nfrom documents.data_models import DocumentSource\nfrom documents.loggers import LoggingMixin\nfrom documents.models import Correspondent\nfrom documents.parsers import is_mime_type_supported\nfrom documents.tasks import consume_file\nfrom paperless_mail.models import MailAccount\nfrom paperless_mail.models import MailRule\nfrom paperless_mail.models import ProcessedMail\n\n# Apple Mail sets multiple IMAP KEYWORD and the general \"\\Flagged\" FLAG\n# imaplib => conn.fetch(b\"<message_id>\", \"FLAGS\")\n\n# no flag - (FLAGS (\\\\Seen $NotJunk NotJunk))'\n# red - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk))'\n# orange - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit0))'\n# yellow - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit1))'\n# blue - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit2))'\n# green - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit0 $MailFlagBit1))'\n# violet - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit0 $MailFlagBit2))'\n# grey - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit1 $MailFlagBit2))'\n\nAPPLE_MAIL_TAG_COLORS = {\n \"red\": [],\n \"orange\": [\"$MailFlagBit0\"],\n \"yellow\": [\"$MailFlagBit1\"],\n \"blue\": [\"$MailFlagBit2\"],\n \"green\": [\"$MailFlagBit0\", \"$MailFlagBit1\"],\n \"violet\": [\"$MailFlagBit0\", \"$MailFlagBit2\"],\n \"grey\": [\"$MailFlagBit1\", \"$MailFlagBit2\"],\n}\n\n\nclass MailError(Exception):\n pass\n\n\nclass BaseMailAction:\n \"\"\"\n Base class for mail actions. A mail action is performed on a mail after\n consumption of the document is complete and is used to signal to the user\n that this mail was processed by paperless via the mail client.\n\n Furthermore, mail actions reduce the amount of mails to be analyzed by\n excluding mails on which the action was already performed (i.e., excluding\n read mails when the action is to mark mails as read).\n \"\"\"\n\n def get_criteria(self) -> Union[Dict, LogicOperator]:\n \"\"\"\n Returns filtering criteria/query for this mail action.\n \"\"\"\n return {}\n\n def post_consume(\n self,\n M: MailBox,\n message_uid: str,\n parameter: str,\n ): # pragma: nocover\n \"\"\"\n Perform mail action on the given mail uid in the mailbox.\n \"\"\"\n raise NotImplementedError\n\n\nclass DeleteMailAction(BaseMailAction):\n \"\"\"\n A mail action that deletes mails after processing.\n \"\"\"\n\n def post_consume(self, M: MailBox, message_uid: str, parameter: str):\n M.delete(message_uid)\n\n\nclass MarkReadMailAction(BaseMailAction):\n \"\"\"\n A mail action that marks mails as read after processing.\n \"\"\"\n\n def get_criteria(self):\n return {\"seen\": False}\n\n def post_consume(self, M: MailBox, message_uid: str, parameter: str):\n M.flag(message_uid, [MailMessageFlags.SEEN], True)\n\n\nclass MoveMailAction(BaseMailAction):\n \"\"\"\n A mail action that moves mails to a different folder after processing.\n \"\"\"\n\n def post_consume(self, M, message_uid, parameter):\n M.move(message_uid, parameter)\n\n\nclass FlagMailAction(BaseMailAction):\n \"\"\"\n A mail action that marks mails as important (\"star\") after processing.\n \"\"\"\n\n def get_criteria(self):\n return {\"flagged\": False}\n\n def post_consume(self, M: MailBox, message_uid: str, parameter: str):\n M.flag(message_uid, [MailMessageFlags.FLAGGED], True)\n\n\nclass TagMailAction(BaseMailAction):\n \"\"\"\n A mail action that tags mails after processing.\n \"\"\"\n\n def __init__(self, parameter: str, supports_gmail_labels: bool):\n # The custom tag should look like \"apple:<color>\"\n if \"apple:\" in parameter.lower():\n _, self.color = parameter.split(\":\")\n self.color = self.color.strip()\n\n if self.color.lower() not in APPLE_MAIL_TAG_COLORS.keys():\n raise MailError(\"Not a valid AppleMail tag color.\")\n\n self.keyword = None\n\n else:\n self.keyword = parameter\n self.color = None\n self.supports_gmail_labels = supports_gmail_labels\n\n def get_criteria(self):\n # AppleMail: We only need to check if mails are \\Flagged\n if self.color:\n return {\"flagged\": False}\n elif self.keyword:\n if self.supports_gmail_labels:\n return AND(NOT(gmail_label=self.keyword), no_keyword=self.keyword)\n else:\n return {\"no_keyword\": self.keyword}\n else: # pragma: nocover\n raise ValueError(\"This should never happen.\")\n\n def post_consume(self, M: MailBox, message_uid: str, parameter: str):\n if self.supports_gmail_labels:\n M.client.uid(\"STORE\", message_uid, \"+X-GM-LABELS\", self.keyword)\n\n # AppleMail\n elif self.color:\n # Remove all existing $MailFlagBits\n M.flag(\n message_uid,\n set(itertools.chain(*APPLE_MAIL_TAG_COLORS.values())),\n False,\n )\n\n # Set new $MailFlagBits\n M.flag(message_uid, APPLE_MAIL_TAG_COLORS.get(self.color), True)\n\n # Set the general \\Flagged\n # This defaults to the \"red\" flag in AppleMail and\n # \"stars\" in Thunderbird or GMail\n M.flag(message_uid, [MailMessageFlags.FLAGGED], True)\n\n elif self.keyword:\n M.flag(message_uid, [self.keyword], True)\n\n else:\n raise MailError(\"No keyword specified.\")\n\n\ndef mailbox_login(mailbox: MailBox, account: MailAccount):\n logger = logging.getLogger(\"paperless_mail\")\n\n try:\n if account.is_token:\n mailbox.xoauth2(account.username, account.password)\n else:\n try:\n _ = account.password.encode(\"ascii\")\n use_ascii_login = True\n except UnicodeEncodeError:\n use_ascii_login = False\n\n if use_ascii_login:\n mailbox.login(account.username, account.password)\n else:\n logger.debug(\"Falling back to AUTH=PLAIN\")\n mailbox.login_utf8(account.username, account.password)\n\n except Exception as e:\n logger.error(\n f\"Error while authenticating account {account}: {e}\",\n exc_info=False,\n )\n raise MailError(\n f\"Error while authenticating account {account}\",\n ) from e\n\n\n@shared_task\ndef apply_mail_action(\n result: List[str],\n rule_id: int,\n message_uid: str,\n message_subject: str,\n message_date: datetime.datetime,\n):\n \"\"\"\n This shared task applies the mail action of a particular mail rule to the\n given mail. Creates a ProcessedMail object, so that the mail won't be\n processed in the future.\n \"\"\"\n\n rule = MailRule.objects.get(pk=rule_id)\n account = MailAccount.objects.get(pk=rule.account.pk)\n\n # Ensure the date is properly timezone aware\n if is_naive(message_date):\n message_date = make_aware(message_date)\n\n try:\n with get_mailbox(\n server=account.imap_server,\n port=account.imap_port,\n security=account.imap_security,\n ) as M:\n # Need to know the support for the possible tagging\n supports_gmail_labels = \"X-GM-EXT-1\" in M.client.capabilities\n\n mailbox_login(M, account)\n M.folder.set(rule.folder)\n\n action = get_rule_action(rule, supports_gmail_labels)\n action.post_consume(M, message_uid, rule.action_parameter)\n\n ProcessedMail.objects.create(\n owner=rule.owner,\n rule=rule,\n folder=rule.folder,\n uid=message_uid,\n subject=message_subject,\n received=message_date,\n status=\"SUCCESS\",\n )\n\n except Exception:\n ProcessedMail.objects.create(\n owner=rule.owner,\n rule=rule,\n folder=rule.folder,\n uid=message_uid,\n subject=message_subject,\n received=message_date,\n status=\"FAILED\",\n error=traceback.format_exc(),\n )\n raise\n\n\n@shared_task\ndef error_callback(\n request,\n exc,\n tb,\n rule_id: int,\n message_uid: str,\n message_subject: str,\n message_date: datetime.datetime,\n):\n \"\"\"\n A shared task that is called whenever something goes wrong during\n consumption of a file. See queue_consumption_tasks.\n \"\"\"\n rule = MailRule.objects.get(pk=rule_id)\n\n ProcessedMail.objects.create(\n rule=rule,\n folder=rule.folder,\n uid=message_uid,\n subject=message_subject,\n received=message_date,\n status=\"FAILED\",\n error=traceback.format_exc(),\n )\n\n\ndef queue_consumption_tasks(\n *,\n consume_tasks: List[Signature],\n rule: MailRule,\n message: MailMessage,\n):\n \"\"\"\n Queue a list of consumption tasks (Signatures for the consume_file shared\n task) with celery.\n \"\"\"\n\n mail_action_task = apply_mail_action.s(\n rule_id=rule.pk,\n message_uid=message.uid,\n message_subject=message.subject,\n message_date=message.date,\n )\n chord(header=consume_tasks, body=mail_action_task).on_error(\n error_callback.s(\n rule_id=rule.pk,\n message_uid=message.uid,\n message_subject=message.subject,\n message_date=message.date,\n ),\n ).delay()\n\n\ndef get_rule_action(rule: MailRule, supports_gmail_labels: bool) -> BaseMailAction:\n \"\"\"\n Returns a BaseMailAction instance for the given rule.\n \"\"\"\n\n if rule.action == MailRule.MailAction.FLAG:\n return FlagMailAction()\n elif rule.action == MailRule.MailAction.DELETE:\n return DeleteMailAction()\n elif rule.action == MailRule.MailAction.MOVE:\n return MoveMailAction()\n elif rule.action == MailRule.MailAction.MARK_READ:\n return MarkReadMailAction()\n elif rule.action == MailRule.MailAction.TAG:\n return TagMailAction(rule.action_parameter, supports_gmail_labels)\n else:\n raise NotImplementedError(\"Unknown action.\") # pragma: nocover\n\n\ndef make_criterias(rule: MailRule, supports_gmail_labels: bool):\n \"\"\"\n Returns criteria to be applied to MailBox.fetch for the given rule.\n \"\"\"\n\n maximum_age = date.today() - timedelta(days=rule.maximum_age)\n criterias = {}\n if rule.maximum_age > 0:\n criterias[\"date_gte\"] = maximum_age\n if rule.filter_from:\n criterias[\"from_\"] = rule.filter_from\n if rule.filter_to:\n criterias[\"to\"] = rule.filter_to\n if rule.filter_subject:\n criterias[\"subject\"] = rule.filter_subject\n if rule.filter_body:\n criterias[\"body\"] = rule.filter_body\n\n rule_query = get_rule_action(rule, supports_gmail_labels).get_criteria()\n if isinstance(rule_query, dict):\n if len(rule_query) or len(criterias):\n return AND(**rule_query, **criterias)\n else:\n return AND(rule_query, **criterias)\n\n\ndef get_mailbox(server, port, security) -> MailBox:\n \"\"\"\n Returns the correct MailBox instance for the given configuration.\n \"\"\"\n\n if security == MailAccount.ImapSecurity.NONE:\n mailbox = MailBoxUnencrypted(server, port)\n elif security == MailAccount.ImapSecurity.STARTTLS:\n mailbox = MailBoxTls(server, port)\n elif security == MailAccount.ImapSecurity.SSL:\n mailbox = MailBox(server, port)\n else:\n raise NotImplementedError(\"Unknown IMAP security\") # pragma: nocover\n return mailbox\n\n\nclass MailAccountHandler(LoggingMixin):\n \"\"\"\n The main class that handles mail accounts.\n\n * processes all rules for a given mail account\n * for each mail rule, fetches relevant mails, and queues documents from\n matching mails for consumption\n * marks processed mails in the database, so that they won't be processed\n again\n * runs mail actions on the mail server, when consumption is completed\n \"\"\"\n\n logging_name = \"paperless_mail\"\n\n def _correspondent_from_name(self, name):\n try:\n return Correspondent.objects.get_or_create(name=name)[0]\n except DatabaseError as e:\n self.log.error(f\"Error while retrieving correspondent {name}: {e}\")\n return None\n\n def _get_title(self, message, att, rule):\n if rule.assign_title_from == MailRule.TitleSource.FROM_SUBJECT:\n return message.subject\n\n elif rule.assign_title_from == MailRule.TitleSource.FROM_FILENAME:\n return os.path.splitext(os.path.basename(att.filename))[0]\n\n else:\n raise NotImplementedError(\n \"Unknown title selector.\",\n ) # pragma: nocover\n\n def _get_correspondent(self, message: MailMessage, rule):\n c_from = rule.assign_correspondent_from\n\n if c_from == MailRule.CorrespondentSource.FROM_NOTHING:\n return None\n\n elif c_from == MailRule.CorrespondentSource.FROM_EMAIL:\n return self._correspondent_from_name(message.from_)\n\n elif c_from == MailRule.CorrespondentSource.FROM_NAME:\n from_values = message.from_values\n if from_values is not None and len(from_values.name) > 0:\n return self._correspondent_from_name(from_values.name)\n else:\n return self._correspondent_from_name(message.from_)\n\n elif c_from == MailRule.CorrespondentSource.FROM_CUSTOM:\n return rule.assign_correspondent\n\n else:\n raise NotImplementedError(\n \"Unknown correspondent selector\",\n ) # pragma: nocover\n\n def handle_mail_account(self, account: MailAccount):\n \"\"\"\n Main entry method to handle a specific mail account.\n \"\"\"\n\n self.renew_logging_group()\n\n self.log.debug(f\"Processing mail account {account}\")\n\n total_processed_files = 0\n try:\n with get_mailbox(\n account.imap_server,\n account.imap_port,\n account.imap_security,\n ) as M:\n supports_gmail_labels = \"X-GM-EXT-1\" in M.client.capabilities\n supports_auth_plain = \"AUTH=PLAIN\" in M.client.capabilities\n\n self.log.debug(f\"GMAIL Label Support: {supports_gmail_labels}\")\n self.log.debug(f\"AUTH=PLAIN Support: {supports_auth_plain}\")\n\n mailbox_login(M, account)\n\n self.log.debug(\n f\"Account {account}: Processing \"\n f\"{account.rules.count()} rule(s)\",\n )\n\n for rule in account.rules.order_by(\"order\"):\n try:\n total_processed_files += self._handle_mail_rule(\n M,\n rule,\n supports_gmail_labels,\n )\n except Exception as e:\n self.log.exception(\n f\"Rule {rule}: Error while processing rule: {e}\",\n )\n except MailError:\n raise\n except Exception as e:\n self.log.error(\n f\"Error while retrieving mailbox {account}: {e}\",\n exc_info=False,\n )\n\n return total_processed_files\n\n def _handle_mail_rule(\n self,\n M: MailBox,\n rule: MailRule,\n supports_gmail_labels: bool,\n ):\n self.log.debug(f\"Rule {rule}: Selecting folder {rule.folder}\")\n\n try:\n M.folder.set(rule.folder)\n except MailboxFolderSelectError as err:\n self.log.error(\n f\"Unable to access folder {rule.folder}, attempting folder listing\",\n )\n try:\n for folder_info in M.folder.list():\n self.log.info(f\"Located folder: {folder_info.name}\")\n except Exception as e:\n self.log.error(\n \"Exception during folder listing, unable to provide list folders: \"\n + str(e),\n )\n\n raise MailError(\n f\"Rule {rule}: Folder {rule.folder} \"\n f\"does not exist in account {rule.account}\",\n ) from err\n\n criterias = make_criterias(rule, supports_gmail_labels)\n\n self.log.debug(\n f\"Rule {rule}: Searching folder with criteria {str(criterias)}\",\n )\n\n try:\n messages = M.fetch(\n criteria=criterias,\n mark_seen=False,\n charset=rule.account.character_set,\n )\n except Exception as err:\n raise MailError(\n f\"Rule {rule}: Error while fetching folder {rule.folder}\",\n ) from err\n\n mails_processed = 0\n total_processed_files = 0\n\n for message in messages:\n if ProcessedMail.objects.filter(\n rule=rule,\n uid=message.uid,\n folder=rule.folder,\n ).exists():\n self.log.debug(f\"Skipping mail {message}, already processed.\")\n continue\n\n try:\n processed_files = self._handle_message(message, rule)\n\n total_processed_files += processed_files\n mails_processed += 1\n except Exception as e:\n self.log.exception(\n f\"Rule {rule}: Error while processing mail {message.uid}: {e}\",\n )\n\n self.log.debug(f\"Rule {rule}: Processed {mails_processed} matching mail(s)\")\n\n return total_processed_files\n\n def _handle_message(self, message, rule: MailRule) -> int:\n processed_elements = 0\n\n # Skip Message handling when only attachments are to be processed but\n # message doesn't have any.\n if (\n not message.attachments\n and rule.consumption_scope == MailRule.ConsumptionScope.ATTACHMENTS_ONLY\n ):\n return processed_elements\n\n self.log.debug(\n f\"Rule {rule}: \"\n f\"Processing mail {message.subject} from {message.from_} with \"\n f\"{len(message.attachments)} attachment(s)\",\n )\n\n correspondent = self._get_correspondent(message, rule)\n tag_ids = [tag.id for tag in rule.assign_tags.all()]\n doc_type = rule.assign_document_type\n\n if (\n rule.consumption_scope == MailRule.ConsumptionScope.EML_ONLY\n or rule.consumption_scope == MailRule.ConsumptionScope.EVERYTHING\n ):\n processed_elements += self._process_eml(\n message,\n rule,\n correspondent,\n tag_ids,\n doc_type,\n )\n\n if (\n rule.consumption_scope == MailRule.ConsumptionScope.ATTACHMENTS_ONLY\n or rule.consumption_scope == MailRule.ConsumptionScope.EVERYTHING\n ):\n processed_elements += self._process_attachments(\n message,\n rule,\n correspondent,\n tag_ids,\n doc_type,\n )\n\n return processed_elements\n\n def _process_attachments(\n self,\n message: MailMessage,\n rule: MailRule,\n correspondent,\n tag_ids,\n doc_type,\n ):\n processed_attachments = 0\n\n consume_tasks = list()\n\n for att in message.attachments:\n if (\n att.content_disposition != \"attachment\"\n and rule.attachment_type\n == MailRule.AttachmentProcessing.ATTACHMENTS_ONLY\n ):\n self.log.debug(\n f\"Rule {rule}: \"\n f\"Skipping attachment {att.filename} \"\n f\"with content disposition {att.content_disposition}\",\n )\n continue\n\n if rule.filter_attachment_filename and not fnmatch(\n att.filename.lower(),\n rule.filter_attachment_filename.lower(),\n ):\n # Force the filename and pattern to the lowercase\n # as this is system dependent otherwise\n continue\n\n title = self._get_title(message, att, rule)\n\n # don't trust the content type of the attachment. Could be\n # generic application/octet-stream.\n mime_type = magic.from_buffer(att.payload, mime=True)\n\n if is_mime_type_supported(mime_type):\n os.makedirs(settings.SCRATCH_DIR, exist_ok=True)\n _, temp_filename = tempfile.mkstemp(\n prefix=\"paperless-mail-\",\n dir=settings.SCRATCH_DIR,\n )\n with open(temp_filename, \"wb\") as f:\n f.write(att.payload)\n\n self.log.info(\n f\"Rule {rule}: \"\n f\"Consuming attachment {att.filename} from mail \"\n f\"{message.subject} from {message.from_}\",\n )\n\n input_doc = ConsumableDocument(\n source=DocumentSource.MailFetch,\n original_file=temp_filename,\n )\n doc_overrides = DocumentMetadataOverrides(\n title=title,\n filename=pathvalidate.sanitize_filename(att.filename),\n correspondent_id=correspondent.id if correspondent else None,\n document_type_id=doc_type.id if doc_type else None,\n tag_ids=tag_ids,\n owner_id=rule.owner.id if rule.owner else None,\n )\n\n consume_task = consume_file.s(\n input_doc,\n doc_overrides,\n )\n\n consume_tasks.append(consume_task)\n\n processed_attachments += 1\n else:\n self.log.debug(\n f\"Rule {rule}: \"\n f\"Skipping attachment {att.filename} \"\n f\"since guessed mime type {mime_type} is not supported \"\n f\"by paperless\",\n )\n\n if len(consume_tasks) > 0:\n queue_consumption_tasks(\n consume_tasks=consume_tasks,\n rule=rule,\n message=message,\n )\n else:\n # No files to consume, just mark as processed if it wasnt by .eml processing\n if not ProcessedMail.objects.filter(\n rule=rule,\n uid=message.uid,\n folder=rule.folder,\n ).exists():\n ProcessedMail.objects.create(\n rule=rule,\n folder=rule.folder,\n uid=message.uid,\n subject=message.subject,\n received=message.date,\n status=\"PROCESSED_WO_CONSUMPTION\",\n )\n\n return processed_attachments\n\n def _process_eml(\n self,\n message: MailMessage,\n rule: MailRule,\n correspondent,\n tag_ids,\n doc_type,\n ):\n os.makedirs(settings.SCRATCH_DIR, exist_ok=True)\n _, temp_filename = tempfile.mkstemp(\n prefix=\"paperless-mail-\",\n dir=settings.SCRATCH_DIR,\n suffix=\".eml\",\n )\n with open(temp_filename, \"wb\") as f:\n # Move \"From\"-header to beginning of file\n # TODO: This ugly workaround is needed because the parser is\n # chosen only by the mime_type detected via magic\n # (see documents/consumer.py \"mime_type = magic.from_file\")\n # Unfortunately magic sometimes fails to detect the mime\n # type of .eml files correctly as message/rfc822 and instead\n # detects text/plain.\n # This also effects direct file consumption of .eml files\n # which are not treated with this workaround.\n from_element = None\n for i, header in enumerate(message.obj._headers):\n if header[0] == \"From\":\n from_element = i\n if from_element:\n new_headers = [message.obj._headers.pop(from_element)]\n new_headers += message.obj._headers\n message.obj._headers = new_headers\n\n f.write(message.obj.as_bytes())\n\n self.log.info(\n f\"Rule {rule}: \"\n f\"Consuming eml from mail \"\n f\"{message.subject} from {message.from_}\",\n )\n\n input_doc = ConsumableDocument(\n source=DocumentSource.MailFetch,\n original_file=temp_filename,\n )\n doc_overrides = DocumentMetadataOverrides(\n title=message.subject,\n filename=pathvalidate.sanitize_filename(f\"{message.subject}.eml\"),\n correspondent_id=correspondent.id if correspondent else None,\n document_type_id=doc_type.id if doc_type else None,\n tag_ids=tag_ids,\n owner_id=rule.owner.id if rule.owner else None,\n )\n\n consume_task = consume_file.s(\n input_doc,\n doc_overrides,\n )\n\n queue_consumption_tasks(\n consume_tasks=[consume_task],\n rule=rule,\n message=message,\n )\n\n processed_elements = 1\n return processed_elements\n", "path": "src/paperless_mail/mail.py" } ]
[ { "content": "import datetime\nimport itertools\nimport logging\nimport os\nimport tempfile\nimport traceback\nfrom datetime import date\nfrom datetime import timedelta\nfrom fnmatch import fnmatch\nfrom typing import Dict\nfrom typing import List\nfrom typing import Union\n\nimport magic\nimport pathvalidate\nfrom celery import chord\nfrom celery import shared_task\nfrom celery.canvas import Signature\nfrom django.conf import settings\nfrom django.db import DatabaseError\nfrom django.utils.timezone import is_naive\nfrom django.utils.timezone import make_aware\nfrom imap_tools import AND\nfrom imap_tools import NOT\nfrom imap_tools import MailBox\nfrom imap_tools import MailboxFolderSelectError\nfrom imap_tools import MailBoxUnencrypted\nfrom imap_tools import MailMessage\nfrom imap_tools import MailMessageFlags\nfrom imap_tools.mailbox import MailBoxTls\nfrom imap_tools.query import LogicOperator\n\nfrom documents.data_models import ConsumableDocument\nfrom documents.data_models import DocumentMetadataOverrides\nfrom documents.data_models import DocumentSource\nfrom documents.loggers import LoggingMixin\nfrom documents.models import Correspondent\nfrom documents.parsers import is_mime_type_supported\nfrom documents.tasks import consume_file\nfrom paperless_mail.models import MailAccount\nfrom paperless_mail.models import MailRule\nfrom paperless_mail.models import ProcessedMail\n\n# Apple Mail sets multiple IMAP KEYWORD and the general \"\\Flagged\" FLAG\n# imaplib => conn.fetch(b\"<message_id>\", \"FLAGS\")\n\n# no flag - (FLAGS (\\\\Seen $NotJunk NotJunk))'\n# red - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk))'\n# orange - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit0))'\n# yellow - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit1))'\n# blue - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit2))'\n# green - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit0 $MailFlagBit1))'\n# violet - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit0 $MailFlagBit2))'\n# grey - (FLAGS (\\\\Flagged \\\\Seen $NotJunk NotJunk $MailFlagBit1 $MailFlagBit2))'\n\nAPPLE_MAIL_TAG_COLORS = {\n \"red\": [],\n \"orange\": [\"$MailFlagBit0\"],\n \"yellow\": [\"$MailFlagBit1\"],\n \"blue\": [\"$MailFlagBit2\"],\n \"green\": [\"$MailFlagBit0\", \"$MailFlagBit1\"],\n \"violet\": [\"$MailFlagBit0\", \"$MailFlagBit2\"],\n \"grey\": [\"$MailFlagBit1\", \"$MailFlagBit2\"],\n}\n\n\nclass MailError(Exception):\n pass\n\n\nclass BaseMailAction:\n \"\"\"\n Base class for mail actions. A mail action is performed on a mail after\n consumption of the document is complete and is used to signal to the user\n that this mail was processed by paperless via the mail client.\n\n Furthermore, mail actions reduce the amount of mails to be analyzed by\n excluding mails on which the action was already performed (i.e., excluding\n read mails when the action is to mark mails as read).\n \"\"\"\n\n def get_criteria(self) -> Union[Dict, LogicOperator]:\n \"\"\"\n Returns filtering criteria/query for this mail action.\n \"\"\"\n return {}\n\n def post_consume(\n self,\n M: MailBox,\n message_uid: str,\n parameter: str,\n ): # pragma: nocover\n \"\"\"\n Perform mail action on the given mail uid in the mailbox.\n \"\"\"\n raise NotImplementedError\n\n\nclass DeleteMailAction(BaseMailAction):\n \"\"\"\n A mail action that deletes mails after processing.\n \"\"\"\n\n def post_consume(self, M: MailBox, message_uid: str, parameter: str):\n M.delete(message_uid)\n\n\nclass MarkReadMailAction(BaseMailAction):\n \"\"\"\n A mail action that marks mails as read after processing.\n \"\"\"\n\n def get_criteria(self):\n return {\"seen\": False}\n\n def post_consume(self, M: MailBox, message_uid: str, parameter: str):\n M.flag(message_uid, [MailMessageFlags.SEEN], True)\n\n\nclass MoveMailAction(BaseMailAction):\n \"\"\"\n A mail action that moves mails to a different folder after processing.\n \"\"\"\n\n def post_consume(self, M, message_uid, parameter):\n M.move(message_uid, parameter)\n\n\nclass FlagMailAction(BaseMailAction):\n \"\"\"\n A mail action that marks mails as important (\"star\") after processing.\n \"\"\"\n\n def get_criteria(self):\n return {\"flagged\": False}\n\n def post_consume(self, M: MailBox, message_uid: str, parameter: str):\n M.flag(message_uid, [MailMessageFlags.FLAGGED], True)\n\n\nclass TagMailAction(BaseMailAction):\n \"\"\"\n A mail action that tags mails after processing.\n \"\"\"\n\n def __init__(self, parameter: str, supports_gmail_labels: bool):\n # The custom tag should look like \"apple:<color>\"\n if \"apple:\" in parameter.lower():\n _, self.color = parameter.split(\":\")\n self.color = self.color.strip()\n\n if self.color.lower() not in APPLE_MAIL_TAG_COLORS.keys():\n raise MailError(\"Not a valid AppleMail tag color.\")\n\n self.keyword = None\n\n else:\n self.keyword = parameter\n self.color = None\n self.supports_gmail_labels = supports_gmail_labels\n\n def get_criteria(self):\n # AppleMail: We only need to check if mails are \\Flagged\n if self.color:\n return {\"flagged\": False}\n elif self.keyword:\n if self.supports_gmail_labels:\n return AND(NOT(gmail_label=self.keyword), no_keyword=self.keyword)\n else:\n return {\"no_keyword\": self.keyword}\n else: # pragma: nocover\n raise ValueError(\"This should never happen.\")\n\n def post_consume(self, M: MailBox, message_uid: str, parameter: str):\n if self.supports_gmail_labels:\n M.client.uid(\"STORE\", message_uid, \"+X-GM-LABELS\", self.keyword)\n\n # AppleMail\n elif self.color:\n # Remove all existing $MailFlagBits\n M.flag(\n message_uid,\n set(itertools.chain(*APPLE_MAIL_TAG_COLORS.values())),\n False,\n )\n\n # Set new $MailFlagBits\n M.flag(message_uid, APPLE_MAIL_TAG_COLORS.get(self.color), True)\n\n # Set the general \\Flagged\n # This defaults to the \"red\" flag in AppleMail and\n # \"stars\" in Thunderbird or GMail\n M.flag(message_uid, [MailMessageFlags.FLAGGED], True)\n\n elif self.keyword:\n M.flag(message_uid, [self.keyword], True)\n\n else:\n raise MailError(\"No keyword specified.\")\n\n\ndef mailbox_login(mailbox: MailBox, account: MailAccount):\n logger = logging.getLogger(\"paperless_mail\")\n\n try:\n if account.is_token:\n mailbox.xoauth2(account.username, account.password)\n else:\n try:\n _ = account.password.encode(\"ascii\")\n use_ascii_login = True\n except UnicodeEncodeError:\n use_ascii_login = False\n\n if use_ascii_login:\n mailbox.login(account.username, account.password)\n else:\n logger.debug(\"Falling back to AUTH=PLAIN\")\n mailbox.login_utf8(account.username, account.password)\n\n except Exception as e:\n logger.error(\n f\"Error while authenticating account {account}: {e}\",\n exc_info=False,\n )\n raise MailError(\n f\"Error while authenticating account {account}\",\n ) from e\n\n\n@shared_task\ndef apply_mail_action(\n result: List[str],\n rule_id: int,\n message_uid: str,\n message_subject: str,\n message_date: datetime.datetime,\n):\n \"\"\"\n This shared task applies the mail action of a particular mail rule to the\n given mail. Creates a ProcessedMail object, so that the mail won't be\n processed in the future.\n \"\"\"\n\n rule = MailRule.objects.get(pk=rule_id)\n account = MailAccount.objects.get(pk=rule.account.pk)\n\n # Ensure the date is properly timezone aware\n if is_naive(message_date):\n message_date = make_aware(message_date)\n\n try:\n with get_mailbox(\n server=account.imap_server,\n port=account.imap_port,\n security=account.imap_security,\n ) as M:\n # Need to know the support for the possible tagging\n supports_gmail_labels = \"X-GM-EXT-1\" in M.client.capabilities\n\n mailbox_login(M, account)\n M.folder.set(rule.folder)\n\n action = get_rule_action(rule, supports_gmail_labels)\n action.post_consume(M, message_uid, rule.action_parameter)\n\n ProcessedMail.objects.create(\n owner=rule.owner,\n rule=rule,\n folder=rule.folder,\n uid=message_uid,\n subject=message_subject,\n received=message_date,\n status=\"SUCCESS\",\n )\n\n except Exception:\n ProcessedMail.objects.create(\n owner=rule.owner,\n rule=rule,\n folder=rule.folder,\n uid=message_uid,\n subject=message_subject,\n received=message_date,\n status=\"FAILED\",\n error=traceback.format_exc(),\n )\n raise\n\n\n@shared_task\ndef error_callback(\n request,\n exc,\n tb,\n rule_id: int,\n message_uid: str,\n message_subject: str,\n message_date: datetime.datetime,\n):\n \"\"\"\n A shared task that is called whenever something goes wrong during\n consumption of a file. See queue_consumption_tasks.\n \"\"\"\n rule = MailRule.objects.get(pk=rule_id)\n\n ProcessedMail.objects.create(\n rule=rule,\n folder=rule.folder,\n uid=message_uid,\n subject=message_subject,\n received=message_date,\n status=\"FAILED\",\n error=traceback.format_exc(),\n )\n\n\ndef queue_consumption_tasks(\n *,\n consume_tasks: List[Signature],\n rule: MailRule,\n message: MailMessage,\n):\n \"\"\"\n Queue a list of consumption tasks (Signatures for the consume_file shared\n task) with celery.\n \"\"\"\n\n mail_action_task = apply_mail_action.s(\n rule_id=rule.pk,\n message_uid=message.uid,\n message_subject=message.subject,\n message_date=message.date,\n )\n chord(header=consume_tasks, body=mail_action_task).on_error(\n error_callback.s(\n rule_id=rule.pk,\n message_uid=message.uid,\n message_subject=message.subject,\n message_date=message.date,\n ),\n ).delay()\n\n\ndef get_rule_action(rule: MailRule, supports_gmail_labels: bool) -> BaseMailAction:\n \"\"\"\n Returns a BaseMailAction instance for the given rule.\n \"\"\"\n\n if rule.action == MailRule.MailAction.FLAG:\n return FlagMailAction()\n elif rule.action == MailRule.MailAction.DELETE:\n return DeleteMailAction()\n elif rule.action == MailRule.MailAction.MOVE:\n return MoveMailAction()\n elif rule.action == MailRule.MailAction.MARK_READ:\n return MarkReadMailAction()\n elif rule.action == MailRule.MailAction.TAG:\n return TagMailAction(rule.action_parameter, supports_gmail_labels)\n else:\n raise NotImplementedError(\"Unknown action.\") # pragma: nocover\n\n\ndef make_criterias(rule: MailRule, supports_gmail_labels: bool):\n \"\"\"\n Returns criteria to be applied to MailBox.fetch for the given rule.\n \"\"\"\n\n maximum_age = date.today() - timedelta(days=rule.maximum_age)\n criterias = {}\n if rule.maximum_age > 0:\n criterias[\"date_gte\"] = maximum_age\n if rule.filter_from:\n criterias[\"from_\"] = rule.filter_from\n if rule.filter_to:\n criterias[\"to\"] = rule.filter_to\n if rule.filter_subject:\n criterias[\"subject\"] = rule.filter_subject\n if rule.filter_body:\n criterias[\"body\"] = rule.filter_body\n\n rule_query = get_rule_action(rule, supports_gmail_labels).get_criteria()\n if isinstance(rule_query, dict):\n if len(rule_query) or len(criterias):\n return AND(**rule_query, **criterias)\n else:\n return \"ALL\"\n else:\n return AND(rule_query, **criterias)\n\n\ndef get_mailbox(server, port, security) -> MailBox:\n \"\"\"\n Returns the correct MailBox instance for the given configuration.\n \"\"\"\n\n if security == MailAccount.ImapSecurity.NONE:\n mailbox = MailBoxUnencrypted(server, port)\n elif security == MailAccount.ImapSecurity.STARTTLS:\n mailbox = MailBoxTls(server, port)\n elif security == MailAccount.ImapSecurity.SSL:\n mailbox = MailBox(server, port)\n else:\n raise NotImplementedError(\"Unknown IMAP security\") # pragma: nocover\n return mailbox\n\n\nclass MailAccountHandler(LoggingMixin):\n \"\"\"\n The main class that handles mail accounts.\n\n * processes all rules for a given mail account\n * for each mail rule, fetches relevant mails, and queues documents from\n matching mails for consumption\n * marks processed mails in the database, so that they won't be processed\n again\n * runs mail actions on the mail server, when consumption is completed\n \"\"\"\n\n logging_name = \"paperless_mail\"\n\n def _correspondent_from_name(self, name):\n try:\n return Correspondent.objects.get_or_create(name=name)[0]\n except DatabaseError as e:\n self.log.error(f\"Error while retrieving correspondent {name}: {e}\")\n return None\n\n def _get_title(self, message, att, rule):\n if rule.assign_title_from == MailRule.TitleSource.FROM_SUBJECT:\n return message.subject\n\n elif rule.assign_title_from == MailRule.TitleSource.FROM_FILENAME:\n return os.path.splitext(os.path.basename(att.filename))[0]\n\n else:\n raise NotImplementedError(\n \"Unknown title selector.\",\n ) # pragma: nocover\n\n def _get_correspondent(self, message: MailMessage, rule):\n c_from = rule.assign_correspondent_from\n\n if c_from == MailRule.CorrespondentSource.FROM_NOTHING:\n return None\n\n elif c_from == MailRule.CorrespondentSource.FROM_EMAIL:\n return self._correspondent_from_name(message.from_)\n\n elif c_from == MailRule.CorrespondentSource.FROM_NAME:\n from_values = message.from_values\n if from_values is not None and len(from_values.name) > 0:\n return self._correspondent_from_name(from_values.name)\n else:\n return self._correspondent_from_name(message.from_)\n\n elif c_from == MailRule.CorrespondentSource.FROM_CUSTOM:\n return rule.assign_correspondent\n\n else:\n raise NotImplementedError(\n \"Unknown correspondent selector\",\n ) # pragma: nocover\n\n def handle_mail_account(self, account: MailAccount):\n \"\"\"\n Main entry method to handle a specific mail account.\n \"\"\"\n\n self.renew_logging_group()\n\n self.log.debug(f\"Processing mail account {account}\")\n\n total_processed_files = 0\n try:\n with get_mailbox(\n account.imap_server,\n account.imap_port,\n account.imap_security,\n ) as M:\n supports_gmail_labels = \"X-GM-EXT-1\" in M.client.capabilities\n supports_auth_plain = \"AUTH=PLAIN\" in M.client.capabilities\n\n self.log.debug(f\"GMAIL Label Support: {supports_gmail_labels}\")\n self.log.debug(f\"AUTH=PLAIN Support: {supports_auth_plain}\")\n\n mailbox_login(M, account)\n\n self.log.debug(\n f\"Account {account}: Processing \"\n f\"{account.rules.count()} rule(s)\",\n )\n\n for rule in account.rules.order_by(\"order\"):\n try:\n total_processed_files += self._handle_mail_rule(\n M,\n rule,\n supports_gmail_labels,\n )\n except Exception as e:\n self.log.exception(\n f\"Rule {rule}: Error while processing rule: {e}\",\n )\n except MailError:\n raise\n except Exception as e:\n self.log.error(\n f\"Error while retrieving mailbox {account}: {e}\",\n exc_info=False,\n )\n\n return total_processed_files\n\n def _handle_mail_rule(\n self,\n M: MailBox,\n rule: MailRule,\n supports_gmail_labels: bool,\n ):\n self.log.debug(f\"Rule {rule}: Selecting folder {rule.folder}\")\n\n try:\n M.folder.set(rule.folder)\n except MailboxFolderSelectError as err:\n self.log.error(\n f\"Unable to access folder {rule.folder}, attempting folder listing\",\n )\n try:\n for folder_info in M.folder.list():\n self.log.info(f\"Located folder: {folder_info.name}\")\n except Exception as e:\n self.log.error(\n \"Exception during folder listing, unable to provide list folders: \"\n + str(e),\n )\n\n raise MailError(\n f\"Rule {rule}: Folder {rule.folder} \"\n f\"does not exist in account {rule.account}\",\n ) from err\n\n criterias = make_criterias(rule, supports_gmail_labels)\n\n self.log.debug(\n f\"Rule {rule}: Searching folder with criteria {str(criterias)}\",\n )\n\n try:\n messages = M.fetch(\n criteria=criterias,\n mark_seen=False,\n charset=rule.account.character_set,\n )\n except Exception as err:\n raise MailError(\n f\"Rule {rule}: Error while fetching folder {rule.folder}\",\n ) from err\n\n mails_processed = 0\n total_processed_files = 0\n\n for message in messages:\n if ProcessedMail.objects.filter(\n rule=rule,\n uid=message.uid,\n folder=rule.folder,\n ).exists():\n self.log.debug(f\"Skipping mail {message}, already processed.\")\n continue\n\n try:\n processed_files = self._handle_message(message, rule)\n\n total_processed_files += processed_files\n mails_processed += 1\n except Exception as e:\n self.log.exception(\n f\"Rule {rule}: Error while processing mail {message.uid}: {e}\",\n )\n\n self.log.debug(f\"Rule {rule}: Processed {mails_processed} matching mail(s)\")\n\n return total_processed_files\n\n def _handle_message(self, message, rule: MailRule) -> int:\n processed_elements = 0\n\n # Skip Message handling when only attachments are to be processed but\n # message doesn't have any.\n if (\n not message.attachments\n and rule.consumption_scope == MailRule.ConsumptionScope.ATTACHMENTS_ONLY\n ):\n return processed_elements\n\n self.log.debug(\n f\"Rule {rule}: \"\n f\"Processing mail {message.subject} from {message.from_} with \"\n f\"{len(message.attachments)} attachment(s)\",\n )\n\n correspondent = self._get_correspondent(message, rule)\n tag_ids = [tag.id for tag in rule.assign_tags.all()]\n doc_type = rule.assign_document_type\n\n if (\n rule.consumption_scope == MailRule.ConsumptionScope.EML_ONLY\n or rule.consumption_scope == MailRule.ConsumptionScope.EVERYTHING\n ):\n processed_elements += self._process_eml(\n message,\n rule,\n correspondent,\n tag_ids,\n doc_type,\n )\n\n if (\n rule.consumption_scope == MailRule.ConsumptionScope.ATTACHMENTS_ONLY\n or rule.consumption_scope == MailRule.ConsumptionScope.EVERYTHING\n ):\n processed_elements += self._process_attachments(\n message,\n rule,\n correspondent,\n tag_ids,\n doc_type,\n )\n\n return processed_elements\n\n def _process_attachments(\n self,\n message: MailMessage,\n rule: MailRule,\n correspondent,\n tag_ids,\n doc_type,\n ):\n processed_attachments = 0\n\n consume_tasks = list()\n\n for att in message.attachments:\n if (\n att.content_disposition != \"attachment\"\n and rule.attachment_type\n == MailRule.AttachmentProcessing.ATTACHMENTS_ONLY\n ):\n self.log.debug(\n f\"Rule {rule}: \"\n f\"Skipping attachment {att.filename} \"\n f\"with content disposition {att.content_disposition}\",\n )\n continue\n\n if rule.filter_attachment_filename and not fnmatch(\n att.filename.lower(),\n rule.filter_attachment_filename.lower(),\n ):\n # Force the filename and pattern to the lowercase\n # as this is system dependent otherwise\n continue\n\n title = self._get_title(message, att, rule)\n\n # don't trust the content type of the attachment. Could be\n # generic application/octet-stream.\n mime_type = magic.from_buffer(att.payload, mime=True)\n\n if is_mime_type_supported(mime_type):\n os.makedirs(settings.SCRATCH_DIR, exist_ok=True)\n _, temp_filename = tempfile.mkstemp(\n prefix=\"paperless-mail-\",\n dir=settings.SCRATCH_DIR,\n )\n with open(temp_filename, \"wb\") as f:\n f.write(att.payload)\n\n self.log.info(\n f\"Rule {rule}: \"\n f\"Consuming attachment {att.filename} from mail \"\n f\"{message.subject} from {message.from_}\",\n )\n\n input_doc = ConsumableDocument(\n source=DocumentSource.MailFetch,\n original_file=temp_filename,\n )\n doc_overrides = DocumentMetadataOverrides(\n title=title,\n filename=pathvalidate.sanitize_filename(att.filename),\n correspondent_id=correspondent.id if correspondent else None,\n document_type_id=doc_type.id if doc_type else None,\n tag_ids=tag_ids,\n owner_id=rule.owner.id if rule.owner else None,\n )\n\n consume_task = consume_file.s(\n input_doc,\n doc_overrides,\n )\n\n consume_tasks.append(consume_task)\n\n processed_attachments += 1\n else:\n self.log.debug(\n f\"Rule {rule}: \"\n f\"Skipping attachment {att.filename} \"\n f\"since guessed mime type {mime_type} is not supported \"\n f\"by paperless\",\n )\n\n if len(consume_tasks) > 0:\n queue_consumption_tasks(\n consume_tasks=consume_tasks,\n rule=rule,\n message=message,\n )\n else:\n # No files to consume, just mark as processed if it wasnt by .eml processing\n if not ProcessedMail.objects.filter(\n rule=rule,\n uid=message.uid,\n folder=rule.folder,\n ).exists():\n ProcessedMail.objects.create(\n rule=rule,\n folder=rule.folder,\n uid=message.uid,\n subject=message.subject,\n received=message.date,\n status=\"PROCESSED_WO_CONSUMPTION\",\n )\n\n return processed_attachments\n\n def _process_eml(\n self,\n message: MailMessage,\n rule: MailRule,\n correspondent,\n tag_ids,\n doc_type,\n ):\n os.makedirs(settings.SCRATCH_DIR, exist_ok=True)\n _, temp_filename = tempfile.mkstemp(\n prefix=\"paperless-mail-\",\n dir=settings.SCRATCH_DIR,\n suffix=\".eml\",\n )\n with open(temp_filename, \"wb\") as f:\n # Move \"From\"-header to beginning of file\n # TODO: This ugly workaround is needed because the parser is\n # chosen only by the mime_type detected via magic\n # (see documents/consumer.py \"mime_type = magic.from_file\")\n # Unfortunately magic sometimes fails to detect the mime\n # type of .eml files correctly as message/rfc822 and instead\n # detects text/plain.\n # This also effects direct file consumption of .eml files\n # which are not treated with this workaround.\n from_element = None\n for i, header in enumerate(message.obj._headers):\n if header[0] == \"From\":\n from_element = i\n if from_element:\n new_headers = [message.obj._headers.pop(from_element)]\n new_headers += message.obj._headers\n message.obj._headers = new_headers\n\n f.write(message.obj.as_bytes())\n\n self.log.info(\n f\"Rule {rule}: \"\n f\"Consuming eml from mail \"\n f\"{message.subject} from {message.from_}\",\n )\n\n input_doc = ConsumableDocument(\n source=DocumentSource.MailFetch,\n original_file=temp_filename,\n )\n doc_overrides = DocumentMetadataOverrides(\n title=message.subject,\n filename=pathvalidate.sanitize_filename(f\"{message.subject}.eml\"),\n correspondent_id=correspondent.id if correspondent else None,\n document_type_id=doc_type.id if doc_type else None,\n tag_ids=tag_ids,\n owner_id=rule.owner.id if rule.owner else None,\n )\n\n consume_task = consume_file.s(\n input_doc,\n doc_overrides,\n )\n\n queue_consumption_tasks(\n consume_tasks=[consume_task],\n rule=rule,\n message=message,\n )\n\n processed_elements = 1\n return processed_elements\n", "path": "src/paperless_mail/mail.py" } ]
diff --git a/src/paperless_mail/mail.py b/src/paperless_mail/mail.py index b525ef91d3f..bfb306e5abc 100644 --- a/src/paperless_mail/mail.py +++ b/src/paperless_mail/mail.py @@ -384,6 +384,8 @@ def make_criterias(rule: MailRule, supports_gmail_labels: bool): if isinstance(rule_query, dict): if len(rule_query) or len(criterias): return AND(**rule_query, **criterias) + else: + return "ALL" else: return AND(rule_query, **criterias) diff --git a/src/paperless_mail/tests/test_mail.py b/src/paperless_mail/tests/test_mail.py index e69dbbef8ff..82b874fd808 100644 --- a/src/paperless_mail/tests/test_mail.py +++ b/src/paperless_mail/tests/test_mail.py @@ -721,6 +721,31 @@ def test_handle_mail_account_move(self): self.assertEqual(len(self.bogus_mailbox.messages), 2) self.assertEqual(len(self.bogus_mailbox.messages_spam), 1) + def test_handle_mail_account_move_no_filters(self): + account = MailAccount.objects.create( + name="test", + imap_server="", + username="admin", + password="secret", + ) + + _ = MailRule.objects.create( + name="testrule", + account=account, + action=MailRule.MailAction.MOVE, + action_parameter="spam", + maximum_age=0, + ) + + self.assertEqual(len(self.bogus_mailbox.messages), 3) + self.assertEqual(len(self.bogus_mailbox.messages_spam), 0) + + self.mail_account_handler.handle_mail_account(account) + self.apply_mail_actions() + + self.assertEqual(len(self.bogus_mailbox.messages), 0) + self.assertEqual(len(self.bogus_mailbox.messages_spam), 3) + def test_handle_mail_account_tag(self): account = MailAccount.objects.create( name="test",
ansible__ansible-modules-core-2723
pip module "requirements" parameter documentation is incomplete. For the "requirements" parameter, the comment "The path to a pip requirements file" is incomplete. I am left with the following questions (I am a very new Ansible user): - Is this a local or remote path? - If local, is there a way to refer to the path relatively? There doesn't appear to be, and if there is, it is not documented. - If the path is local and must be absolute, that should be clearly stated instead of being inferred by the example (which uses an absolute path, making the role unmoveable which seems broken).
[ { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Matt Wright <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nimport tempfile\nimport re\nimport os\n\nDOCUMENTATION = '''\n---\nmodule: pip\nshort_description: Manages Python library dependencies.\ndescription:\n - \"Manage Python library dependencies. To use this module, one of the following keys is required: C(name)\n or C(requirements).\"\nversion_added: \"0.7\"\noptions:\n name:\n description:\n - The name of a Python library to install or the url of the remote package.\n required: false\n default: null\n version:\n description:\n - The version number to install of the Python library specified in the I(name) parameter\n required: false\n default: null\n requirements:\n description:\n - The path to a pip requirements file\n required: false\n default: null\n virtualenv:\n description:\n - An optional path to a I(virtualenv) directory to install into\n required: false\n default: null\n virtualenv_site_packages:\n version_added: \"1.0\"\n description:\n - Whether the virtual environment will inherit packages from the\n global site-packages directory. Note that if this setting is\n changed on an already existing virtual environment it will not\n have any effect, the environment must be deleted and newly\n created.\n required: false\n default: \"no\"\n choices: [ \"yes\", \"no\" ]\n virtualenv_command:\n version_added: \"1.1\"\n description:\n - The command or a pathname to the command to create the virtual\n environment with. For example C(pyvenv), C(virtualenv),\n C(virtualenv2), C(~/bin/virtualenv), C(/usr/local/bin/virtualenv).\n required: false\n default: virtualenv\n virtualenv_python:\n version_added: \"2.0\"\n description:\n - The Python executable used for creating the virtual environment.\n For example C(python3.4), C(python2.7). When not specified, the\n system Python version is used.\n required: false\n default: null\n state:\n description:\n - The state of module\n required: false\n default: present\n choices: [ \"present\", \"absent\", \"latest\" ]\n extra_args:\n description:\n - Extra arguments passed to pip.\n required: false\n default: null\n version_added: \"1.0\"\n editable:\n description:\n - Pass the editable flag for versioning URLs.\n required: false\n default: yes\n version_added: \"2.0\"\n chdir:\n description:\n - cd into this directory before running the command\n version_added: \"1.3\"\n required: false\n default: null\n executable:\n description:\n - The explicit executable or a pathname to the executable to be used to\n run pip for a specific version of Python installed in the system. For\n example C(pip-3.3), if there are both Python 2.7 and 3.3 installations\n in the system and you want to run pip for the Python 3.3 installation.\n version_added: \"1.3\"\n required: false\n default: null\nnotes:\n - Please note that virtualenv (U(http://www.virtualenv.org/)) must be installed on the remote host if the virtualenv parameter is specified and the virtualenv needs to be initialized.\nrequirements: [ \"virtualenv\", \"pip\" ]\nauthor: \"Matt Wright (@mattupstate)\"\n'''\n\nEXAMPLES = '''\n# Install (Bottle) python package.\n- pip: name=bottle\n\n# Install (Bottle) python package on version 0.11.\n- pip: name=bottle version=0.11\n\n# Install (MyApp) using one of the remote protocols (bzr+,hg+,git+,svn+). You do not have to supply '-e' option in extra_args.\n- pip: name='svn+http://myrepo/svn/MyApp#egg=MyApp'\n\n# Install MyApp using one of the remote protocols (bzr+,hg+,git+) in a non editable way.\n- pip: name='git+http://myrepo/app/MyApp' editable=false\n\n# Install (MyApp) from local tarball\n- pip: name='file:///path/to/MyApp.tar.gz'\n\n# Install (Bottle) into the specified (virtualenv), inheriting none of the globally installed modules\n- pip: name=bottle virtualenv=/my_app/venv\n\n# Install (Bottle) into the specified (virtualenv), inheriting globally installed modules\n- pip: name=bottle virtualenv=/my_app/venv virtualenv_site_packages=yes\n\n# Install (Bottle) into the specified (virtualenv), using Python 2.7\n- pip: name=bottle virtualenv=/my_app/venv virtualenv_command=virtualenv-2.7\n\n# Install specified python requirements.\n- pip: requirements=/my_app/requirements.txt\n\n# Install specified python requirements in indicated (virtualenv).\n- pip: requirements=/my_app/requirements.txt virtualenv=/my_app/venv\n\n# Install specified python requirements and custom Index URL.\n- pip: requirements=/my_app/requirements.txt extra_args='-i https://example.com/pypi/simple'\n\n# Install (Bottle) for Python 3.3 specifically,using the 'pip-3.3' executable.\n- pip: name=bottle executable=pip-3.3\n'''\n\ndef _get_cmd_options(module, cmd):\n thiscmd = cmd + \" --help\"\n rc, stdout, stderr = module.run_command(thiscmd)\n if rc != 0:\n module.fail_json(msg=\"Could not get output from %s: %s\" % (thiscmd, stdout + stderr))\n\n words = stdout.strip().split()\n cmd_options = [ x for x in words if x.startswith('--') ]\n return cmd_options\n\n\ndef _get_full_name(name, version=None):\n if version is None:\n resp = name\n else:\n resp = name + '==' + version\n return resp\n\ndef _is_present(name, version, installed_pkgs):\n for pkg in installed_pkgs:\n if '==' not in pkg:\n continue\n\n [pkg_name, pkg_version] = pkg.split('==')\n\n if pkg_name == name and (version is None or version == pkg_version):\n return True\n\n return False\n\n\n\ndef _get_pip(module, env=None, executable=None):\n # On Debian and Ubuntu, pip is pip.\n # On Fedora18 and up, pip is python-pip.\n # On Fedora17 and below, CentOS and RedHat 6 and 5, pip is pip-python.\n # On Fedora, CentOS, and RedHat, the exception is in the virtualenv.\n # There, pip is just pip.\n candidate_pip_basenames = ['pip', 'python-pip', 'pip-python']\n pip = None\n if executable is not None:\n executable = os.path.expanduser(executable)\n if os.path.isabs(executable):\n pip = executable\n else:\n # If you define your own executable that executable should be the only candidate.\n candidate_pip_basenames = [executable]\n if pip is None:\n if env is None:\n opt_dirs = []\n else:\n # Try pip with the virtualenv directory first.\n opt_dirs = ['%s/bin' % env]\n for basename in candidate_pip_basenames:\n pip = module.get_bin_path(basename, False, opt_dirs)\n if pip is not None:\n break\n # pip should have been found by now. The final call to get_bin_path will\n # trigger fail_json.\n if pip is None:\n basename = candidate_pip_basenames[0]\n pip = module.get_bin_path(basename, True, opt_dirs)\n return pip\n\n\ndef _fail(module, cmd, out, err):\n msg = ''\n if out:\n msg += \"stdout: %s\" % (out, )\n if err:\n msg += \"\\n:stderr: %s\" % (err, )\n module.fail_json(cmd=cmd, msg=msg)\n\n\ndef main():\n state_map = dict(\n present='install',\n absent='uninstall -y',\n latest='install -U',\n )\n\n module = AnsibleModule(\n argument_spec=dict(\n state=dict(default='present', choices=state_map.keys()),\n name=dict(default=None, required=False),\n version=dict(default=None, required=False, type='str'),\n requirements=dict(default=None, required=False),\n virtualenv=dict(default=None, required=False),\n virtualenv_site_packages=dict(default='no', type='bool'),\n virtualenv_command=dict(default='virtualenv', required=False),\n virtualenv_python=dict(default=None, required=False, type='str'),\n use_mirrors=dict(default='yes', type='bool'),\n extra_args=dict(default=None, required=False),\n editable=dict(default='yes', type='bool', required=False),\n chdir=dict(default=None, required=False, type='path'),\n executable=dict(default=None, required=False),\n ),\n required_one_of=[['name', 'requirements']],\n mutually_exclusive=[['name', 'requirements']],\n supports_check_mode=True\n )\n\n state = module.params['state']\n name = module.params['name']\n version = module.params['version']\n requirements = module.params['requirements']\n extra_args = module.params['extra_args']\n virtualenv_python = module.params['virtualenv_python']\n chdir = module.params['chdir']\n\n if state == 'latest' and version is not None:\n module.fail_json(msg='version is incompatible with state=latest')\n\n if chdir is None:\n # this is done to avoid permissions issues with privilege escalation and virtualenvs\n chdir = tempfile.gettempdir()\n\n err = ''\n out = ''\n\n env = module.params['virtualenv']\n virtualenv_command = module.params['virtualenv_command']\n\n if env:\n env = os.path.expanduser(env)\n if not os.path.exists(os.path.join(env, 'bin', 'activate')):\n if module.check_mode:\n module.exit_json(changed=True)\n\n cmd = os.path.expanduser(virtualenv_command)\n if os.path.basename(cmd) == cmd:\n cmd = module.get_bin_path(virtualenv_command, True)\n\n if module.params['virtualenv_site_packages']:\n cmd += ' --system-site-packages'\n else:\n cmd_opts = _get_cmd_options(module, cmd)\n if '--no-site-packages' in cmd_opts:\n cmd += ' --no-site-packages'\n\n if virtualenv_python:\n cmd += ' -p%s' % virtualenv_python\n\n cmd = \"%s %s\" % (cmd, env)\n rc, out_venv, err_venv = module.run_command(cmd, cwd=chdir)\n out += out_venv\n err += err_venv\n if rc != 0:\n _fail(module, cmd, out, err)\n\n pip = _get_pip(module, env, module.params['executable'])\n\n cmd = '%s %s' % (pip, state_map[state])\n\n # If there's a virtualenv we want things we install to be able to use other\n # installations that exist as binaries within this virtualenv. Example: we\n # install cython and then gevent -- gevent needs to use the cython binary,\n # not just a python package that will be found by calling the right python.\n # So if there's a virtualenv, we add that bin/ to the beginning of the PATH\n # in run_command by setting path_prefix here.\n path_prefix = None\n if env:\n path_prefix = \"/\".join(pip.split('/')[:-1])\n\n # Automatically apply -e option to extra_args when source is a VCS url. VCS\n # includes those beginning with svn+, git+, hg+ or bzr+\n has_vcs = bool(name and re.match(r'(svn|git|hg|bzr)\\+', name))\n if has_vcs and module.params['editable']:\n args_list = [] # used if extra_args is not used at all\n if extra_args:\n args_list = extra_args.split(' ')\n if '-e' not in args_list:\n args_list.append('-e')\n # Ok, we will reconstruct the option string\n extra_args = ' '.join(args_list)\n\n if extra_args:\n cmd += ' %s' % extra_args\n if name:\n cmd += ' %s' % _get_full_name(name, version)\n elif requirements:\n cmd += ' -r %s' % requirements\n\n\n if module.check_mode:\n if extra_args or requirements or state == 'latest' or not name:\n module.exit_json(changed=True)\n elif has_vcs:\n module.exit_json(changed=True)\n\n freeze_cmd = '%s freeze' % pip\n\n rc, out_pip, err_pip = module.run_command(freeze_cmd, cwd=chdir)\n\n if rc != 0:\n module.exit_json(changed=True)\n\n out += out_pip\n err += err_pip\n\n is_present = _is_present(name, version, out.split())\n\n changed = (state == 'present' and not is_present) or (state == 'absent' and is_present)\n module.exit_json(changed=changed, cmd=freeze_cmd, stdout=out, stderr=err)\n\n if requirements or has_vcs:\n freeze_cmd = '%s freeze' % pip\n out_freeze_before = module.run_command(freeze_cmd, cwd=chdir)[1]\n else:\n out_freeze_before = None\n\n rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=chdir)\n out += out_pip\n err += err_pip\n if rc == 1 and state == 'absent' and \\\n ('not installed' in out_pip or 'not installed' in err_pip):\n pass # rc is 1 when attempting to uninstall non-installed package\n elif rc != 0:\n _fail(module, cmd, out, err)\n\n if state == 'absent':\n changed = 'Successfully uninstalled' in out_pip\n else:\n if out_freeze_before is None:\n changed = 'Successfully installed' in out_pip\n else:\n out_freeze_after = module.run_command(freeze_cmd, cwd=chdir)[1]\n changed = out_freeze_before != out_freeze_after\n\n module.exit_json(changed=changed, cmd=cmd, name=name, version=version,\n state=state, requirements=requirements, virtualenv=env,\n stdout=out, stderr=err)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\n\nmain()\n", "path": "packaging/language/pip.py" } ]
[ { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Matt Wright <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nimport tempfile\nimport re\nimport os\n\nDOCUMENTATION = '''\n---\nmodule: pip\nshort_description: Manages Python library dependencies.\ndescription:\n - \"Manage Python library dependencies. To use this module, one of the following keys is required: C(name)\n or C(requirements).\"\nversion_added: \"0.7\"\noptions:\n name:\n description:\n - The name of a Python library to install or the url of the remote package.\n required: false\n default: null\n version:\n description:\n - The version number to install of the Python library specified in the I(name) parameter\n required: false\n default: null\n requirements:\n description:\n - The path to a pip requirements file, which should be local to the remote system. \n File can be specified as a relative path if using the chdir option. \n required: false\n default: null\n virtualenv:\n description:\n - An optional path to a I(virtualenv) directory to install into\n required: false\n default: null\n virtualenv_site_packages:\n version_added: \"1.0\"\n description:\n - Whether the virtual environment will inherit packages from the\n global site-packages directory. Note that if this setting is\n changed on an already existing virtual environment it will not\n have any effect, the environment must be deleted and newly\n created.\n required: false\n default: \"no\"\n choices: [ \"yes\", \"no\" ]\n virtualenv_command:\n version_added: \"1.1\"\n description:\n - The command or a pathname to the command to create the virtual\n environment with. For example C(pyvenv), C(virtualenv),\n C(virtualenv2), C(~/bin/virtualenv), C(/usr/local/bin/virtualenv).\n required: false\n default: virtualenv\n virtualenv_python:\n version_added: \"2.0\"\n description:\n - The Python executable used for creating the virtual environment.\n For example C(python3.4), C(python2.7). When not specified, the\n system Python version is used.\n required: false\n default: null\n state:\n description:\n - The state of module\n required: false\n default: present\n choices: [ \"present\", \"absent\", \"latest\" ]\n extra_args:\n description:\n - Extra arguments passed to pip.\n required: false\n default: null\n version_added: \"1.0\"\n editable:\n description:\n - Pass the editable flag for versioning URLs.\n required: false\n default: yes\n version_added: \"2.0\"\n chdir:\n description:\n - cd into this directory before running the command\n version_added: \"1.3\"\n required: false\n default: null\n executable:\n description:\n - The explicit executable or a pathname to the executable to be used to\n run pip for a specific version of Python installed in the system. For\n example C(pip-3.3), if there are both Python 2.7 and 3.3 installations\n in the system and you want to run pip for the Python 3.3 installation.\n version_added: \"1.3\"\n required: false\n default: null\nnotes:\n - Please note that virtualenv (U(http://www.virtualenv.org/)) must be installed on the remote host if the virtualenv parameter is specified and the virtualenv needs to be initialized.\nrequirements: [ \"virtualenv\", \"pip\" ]\nauthor: \"Matt Wright (@mattupstate)\"\n'''\n\nEXAMPLES = '''\n# Install (Bottle) python package.\n- pip: name=bottle\n\n# Install (Bottle) python package on version 0.11.\n- pip: name=bottle version=0.11\n\n# Install (MyApp) using one of the remote protocols (bzr+,hg+,git+,svn+). You do not have to supply '-e' option in extra_args.\n- pip: name='svn+http://myrepo/svn/MyApp#egg=MyApp'\n\n# Install MyApp using one of the remote protocols (bzr+,hg+,git+) in a non editable way.\n- pip: name='git+http://myrepo/app/MyApp' editable=false\n\n# Install (MyApp) from local tarball\n- pip: name='file:///path/to/MyApp.tar.gz'\n\n# Install (Bottle) into the specified (virtualenv), inheriting none of the globally installed modules\n- pip: name=bottle virtualenv=/my_app/venv\n\n# Install (Bottle) into the specified (virtualenv), inheriting globally installed modules\n- pip: name=bottle virtualenv=/my_app/venv virtualenv_site_packages=yes\n\n# Install (Bottle) into the specified (virtualenv), using Python 2.7\n- pip: name=bottle virtualenv=/my_app/venv virtualenv_command=virtualenv-2.7\n\n# Install specified python requirements.\n- pip: requirements=/my_app/requirements.txt\n\n# Install specified python requirements in indicated (virtualenv).\n- pip: requirements=/my_app/requirements.txt virtualenv=/my_app/venv\n\n# Install specified python requirements and custom Index URL.\n- pip: requirements=/my_app/requirements.txt extra_args='-i https://example.com/pypi/simple'\n\n# Install (Bottle) for Python 3.3 specifically,using the 'pip-3.3' executable.\n- pip: name=bottle executable=pip-3.3\n'''\n\ndef _get_cmd_options(module, cmd):\n thiscmd = cmd + \" --help\"\n rc, stdout, stderr = module.run_command(thiscmd)\n if rc != 0:\n module.fail_json(msg=\"Could not get output from %s: %s\" % (thiscmd, stdout + stderr))\n\n words = stdout.strip().split()\n cmd_options = [ x for x in words if x.startswith('--') ]\n return cmd_options\n\n\ndef _get_full_name(name, version=None):\n if version is None:\n resp = name\n else:\n resp = name + '==' + version\n return resp\n\ndef _is_present(name, version, installed_pkgs):\n for pkg in installed_pkgs:\n if '==' not in pkg:\n continue\n\n [pkg_name, pkg_version] = pkg.split('==')\n\n if pkg_name == name and (version is None or version == pkg_version):\n return True\n\n return False\n\n\n\ndef _get_pip(module, env=None, executable=None):\n # On Debian and Ubuntu, pip is pip.\n # On Fedora18 and up, pip is python-pip.\n # On Fedora17 and below, CentOS and RedHat 6 and 5, pip is pip-python.\n # On Fedora, CentOS, and RedHat, the exception is in the virtualenv.\n # There, pip is just pip.\n candidate_pip_basenames = ['pip', 'python-pip', 'pip-python']\n pip = None\n if executable is not None:\n executable = os.path.expanduser(executable)\n if os.path.isabs(executable):\n pip = executable\n else:\n # If you define your own executable that executable should be the only candidate.\n candidate_pip_basenames = [executable]\n if pip is None:\n if env is None:\n opt_dirs = []\n else:\n # Try pip with the virtualenv directory first.\n opt_dirs = ['%s/bin' % env]\n for basename in candidate_pip_basenames:\n pip = module.get_bin_path(basename, False, opt_dirs)\n if pip is not None:\n break\n # pip should have been found by now. The final call to get_bin_path will\n # trigger fail_json.\n if pip is None:\n basename = candidate_pip_basenames[0]\n pip = module.get_bin_path(basename, True, opt_dirs)\n return pip\n\n\ndef _fail(module, cmd, out, err):\n msg = ''\n if out:\n msg += \"stdout: %s\" % (out, )\n if err:\n msg += \"\\n:stderr: %s\" % (err, )\n module.fail_json(cmd=cmd, msg=msg)\n\n\ndef main():\n state_map = dict(\n present='install',\n absent='uninstall -y',\n latest='install -U',\n )\n\n module = AnsibleModule(\n argument_spec=dict(\n state=dict(default='present', choices=state_map.keys()),\n name=dict(default=None, required=False),\n version=dict(default=None, required=False, type='str'),\n requirements=dict(default=None, required=False),\n virtualenv=dict(default=None, required=False),\n virtualenv_site_packages=dict(default='no', type='bool'),\n virtualenv_command=dict(default='virtualenv', required=False),\n virtualenv_python=dict(default=None, required=False, type='str'),\n use_mirrors=dict(default='yes', type='bool'),\n extra_args=dict(default=None, required=False),\n editable=dict(default='yes', type='bool', required=False),\n chdir=dict(default=None, required=False, type='path'),\n executable=dict(default=None, required=False),\n ),\n required_one_of=[['name', 'requirements']],\n mutually_exclusive=[['name', 'requirements']],\n supports_check_mode=True\n )\n\n state = module.params['state']\n name = module.params['name']\n version = module.params['version']\n requirements = module.params['requirements']\n extra_args = module.params['extra_args']\n virtualenv_python = module.params['virtualenv_python']\n chdir = module.params['chdir']\n\n if state == 'latest' and version is not None:\n module.fail_json(msg='version is incompatible with state=latest')\n\n if chdir is None:\n # this is done to avoid permissions issues with privilege escalation and virtualenvs\n chdir = tempfile.gettempdir()\n\n err = ''\n out = ''\n\n env = module.params['virtualenv']\n virtualenv_command = module.params['virtualenv_command']\n\n if env:\n env = os.path.expanduser(env)\n if not os.path.exists(os.path.join(env, 'bin', 'activate')):\n if module.check_mode:\n module.exit_json(changed=True)\n\n cmd = os.path.expanduser(virtualenv_command)\n if os.path.basename(cmd) == cmd:\n cmd = module.get_bin_path(virtualenv_command, True)\n\n if module.params['virtualenv_site_packages']:\n cmd += ' --system-site-packages'\n else:\n cmd_opts = _get_cmd_options(module, cmd)\n if '--no-site-packages' in cmd_opts:\n cmd += ' --no-site-packages'\n\n if virtualenv_python:\n cmd += ' -p%s' % virtualenv_python\n\n cmd = \"%s %s\" % (cmd, env)\n rc, out_venv, err_venv = module.run_command(cmd, cwd=chdir)\n out += out_venv\n err += err_venv\n if rc != 0:\n _fail(module, cmd, out, err)\n\n pip = _get_pip(module, env, module.params['executable'])\n\n cmd = '%s %s' % (pip, state_map[state])\n\n # If there's a virtualenv we want things we install to be able to use other\n # installations that exist as binaries within this virtualenv. Example: we\n # install cython and then gevent -- gevent needs to use the cython binary,\n # not just a python package that will be found by calling the right python.\n # So if there's a virtualenv, we add that bin/ to the beginning of the PATH\n # in run_command by setting path_prefix here.\n path_prefix = None\n if env:\n path_prefix = \"/\".join(pip.split('/')[:-1])\n\n # Automatically apply -e option to extra_args when source is a VCS url. VCS\n # includes those beginning with svn+, git+, hg+ or bzr+\n has_vcs = bool(name and re.match(r'(svn|git|hg|bzr)\\+', name))\n if has_vcs and module.params['editable']:\n args_list = [] # used if extra_args is not used at all\n if extra_args:\n args_list = extra_args.split(' ')\n if '-e' not in args_list:\n args_list.append('-e')\n # Ok, we will reconstruct the option string\n extra_args = ' '.join(args_list)\n\n if extra_args:\n cmd += ' %s' % extra_args\n if name:\n cmd += ' %s' % _get_full_name(name, version)\n elif requirements:\n cmd += ' -r %s' % requirements\n\n\n if module.check_mode:\n if extra_args or requirements or state == 'latest' or not name:\n module.exit_json(changed=True)\n elif has_vcs:\n module.exit_json(changed=True)\n\n freeze_cmd = '%s freeze' % pip\n\n rc, out_pip, err_pip = module.run_command(freeze_cmd, cwd=chdir)\n\n if rc != 0:\n module.exit_json(changed=True)\n\n out += out_pip\n err += err_pip\n\n is_present = _is_present(name, version, out.split())\n\n changed = (state == 'present' and not is_present) or (state == 'absent' and is_present)\n module.exit_json(changed=changed, cmd=freeze_cmd, stdout=out, stderr=err)\n\n if requirements or has_vcs:\n freeze_cmd = '%s freeze' % pip\n out_freeze_before = module.run_command(freeze_cmd, cwd=chdir)[1]\n else:\n out_freeze_before = None\n\n rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=chdir)\n out += out_pip\n err += err_pip\n if rc == 1 and state == 'absent' and \\\n ('not installed' in out_pip or 'not installed' in err_pip):\n pass # rc is 1 when attempting to uninstall non-installed package\n elif rc != 0:\n _fail(module, cmd, out, err)\n\n if state == 'absent':\n changed = 'Successfully uninstalled' in out_pip\n else:\n if out_freeze_before is None:\n changed = 'Successfully installed' in out_pip\n else:\n out_freeze_after = module.run_command(freeze_cmd, cwd=chdir)[1]\n changed = out_freeze_before != out_freeze_after\n\n module.exit_json(changed=changed, cmd=cmd, name=name, version=version,\n state=state, requirements=requirements, virtualenv=env,\n stdout=out, stderr=err)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\n\nmain()\n", "path": "packaging/language/pip.py" } ]
diff --git a/packaging/language/pip.py b/packaging/language/pip.py index 6d325282770..d896c5b9ed5 100755 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -44,7 +44,8 @@ default: null requirements: description: - - The path to a pip requirements file + - The path to a pip requirements file, which should be local to the remote system. + File can be specified as a relative path if using the chdir option. required: false default: null virtualenv:
lutris__lutris-1723
Saving game settings causes a traceback ``` Traceback (most recent call last): File "/mnt/extrastorage/lutris/lutris/gui/lutriswindow.py", line 666, in on_game_updated self.view.set_selected_game(game.id) File "/mnt/extrastorage/lutris/lutris/gui/views/list.py", line 123, in set_selected_game row = self.get_row_by_id(game_id, filtered=True) AttributeError: 'GameListView' object has no attribute 'get_row_by_id' ``` `GameListView` does not seem to provide that method. After the traceback occurs, Lutris will try to update the default wine prefix (`~/.wine`) rather than the correct one and must be restarted.
[ { "content": "# pylint: disable=no-member\nfrom gi.repository import Gtk, Pango\nfrom lutris import settings\nfrom lutris.gui.views.base import GameView\nfrom lutris.gui.views import (\n COL_NAME,\n COL_ICON,\n COL_YEAR,\n COL_RUNNER_HUMAN_NAME,\n COL_PLATFORM,\n COL_LASTPLAYED,\n COL_LASTPLAYED_TEXT,\n COL_INSTALLED_AT,\n COL_INSTALLED_AT_TEXT,\n COL_PLAYTIME_TEXT,\n COLUMN_NAMES\n)\n\n\nclass GameListView(Gtk.TreeView, GameView):\n \"\"\"Show the main list of games.\"\"\"\n\n __gsignals__ = GameView.__gsignals__\n\n def __init__(self, store):\n self.game_store = store\n self.model = self.game_store.modelsort\n super().__init__(self.model)\n self.set_rules_hint(True)\n\n # Icon column\n image_cell = Gtk.CellRendererPixbuf()\n column = Gtk.TreeViewColumn(\"\", image_cell, pixbuf=COL_ICON)\n column.set_reorderable(True)\n column.set_sort_indicator(False)\n self.append_column(column)\n\n # Text columns\n default_text_cell = self.set_text_cell()\n name_cell = self.set_text_cell()\n name_cell.set_padding(5, 0)\n\n self.set_column(name_cell, \"Name\", COL_NAME, 200)\n self.set_column(default_text_cell, \"Year\", COL_YEAR, 60)\n self.set_column(default_text_cell, \"Runner\", COL_RUNNER_HUMAN_NAME, 120)\n self.set_column(default_text_cell, \"Platform\", COL_PLATFORM, 120)\n self.set_column(default_text_cell, \"Last Played\", COL_LASTPLAYED_TEXT, 120)\n self.set_sort_with_column(COL_LASTPLAYED_TEXT, COL_LASTPLAYED)\n self.set_column(default_text_cell, \"Installed At\", COL_INSTALLED_AT_TEXT, 120)\n self.set_sort_with_column(COL_INSTALLED_AT_TEXT, COL_INSTALLED_AT)\n self.set_column(default_text_cell, \"Play Time\", COL_PLAYTIME_TEXT, 100)\n\n self.get_selection().set_mode(Gtk.SelectionMode.SINGLE)\n\n self.connect_signals()\n self.connect(\"row-activated\", self.on_row_activated)\n self.get_selection().connect(\"changed\", self.on_cursor_changed)\n\n @staticmethod\n def set_text_cell():\n text_cell = Gtk.CellRendererText()\n text_cell.set_padding(10, 0)\n text_cell.set_property(\"ellipsize\", Pango.EllipsizeMode.END)\n return text_cell\n\n def set_column(self, cell, header, column_id, default_width, sort_id=None):\n column = Gtk.TreeViewColumn(header, cell, markup=column_id)\n column.set_sort_indicator(True)\n column.set_sort_column_id(column_id if sort_id is None else sort_id)\n self.set_column_sort(column_id if sort_id is None else sort_id)\n column.set_resizable(True)\n column.set_reorderable(True)\n width = settings.read_setting(\n \"%s_column_width\" % COLUMN_NAMES[column_id], \"list view\"\n )\n column.set_fixed_width(int(width) if width else default_width)\n self.append_column(column)\n column.connect(\"notify::width\", self.on_column_width_changed)\n return column\n\n def set_column_sort(self, col):\n \"\"\"Sort a column and fallback to sorting by name and runner.\"\"\"\n\n def sort_func(model, row1, row2, user_data):\n v1 = model.get_value(row1, col)\n v2 = model.get_value(row2, col)\n diff = -1 if v1 < v2 else 0 if v1 == v2 else 1\n if diff is 0:\n v1 = model.get_value(row1, COL_NAME)\n v2 = model.get_value(row2, COL_NAME)\n diff = -1 if v1 < v2 else 0 if v1 == v2 else 1\n if diff is 0:\n v1 = model.get_value(row1, COL_RUNNER_HUMAN_NAME)\n v2 = model.get_value(row2, COL_RUNNER_HUMAN_NAME)\n diff = -1 if v1 < v2 else 0 if v1 == v2 else 1\n return diff\n\n self.model.set_sort_func(col, sort_func)\n\n def set_sort_with_column(self, col, sort_col):\n \"\"\"Set to sort a column by using another column\"\"\"\n\n def sort_func(model, row1, row2, _user_data):\n value1 = model.get_value(row1, sort_col)\n value2 = model.get_value(row2, sort_col)\n return -1 if value1 < value2 else 0 if value1 == value2 else 1\n\n self.model.set_sort_func(col, sort_func)\n\n def get_selected_item(self):\n \"\"\"Return the currently selected game's id.\"\"\"\n selection = self.get_selection()\n if not selection:\n return None\n model, select_iter = selection.get_selected()\n if select_iter:\n return select_iter\n\n def select(self):\n self.set_cursor(self.current_path[0])\n\n def set_selected_game(self, game_id):\n row = self.get_row_by_id(game_id, filtered=True)\n if row:\n self.set_cursor(row.path)\n\n def on_row_activated(self, widget, line=None, column=None):\n \"\"\"Handles double clicks\"\"\"\n selected_item = self.get_selected_item()\n if selected_item:\n selected_game = self.get_selected_game(selected_item)\n else:\n selected_game = None\n self.emit(\"game-activated\", selected_game)\n\n def on_cursor_changed(self, widget, line=None, column=None):\n selected_item = self.get_selected_item()\n if selected_item:\n self.selected_game = self.get_selected_game(selected_item)\n else:\n self.selected_game = None\n self.emit(\"game-selected\", self.selected_game)\n\n @staticmethod\n def on_column_width_changed(col, *args):\n col_name = col.get_title()\n if col_name:\n settings.write_setting(\n col_name.replace(\" \", \"\") + \"_column_width\",\n col.get_fixed_width(),\n \"list view\",\n )\n", "path": "lutris/gui/views/list.py" } ]
[ { "content": "# pylint: disable=no-member\nfrom gi.repository import Gtk, Pango\nfrom lutris import settings\nfrom lutris.gui.views.base import GameView\nfrom lutris.gui.views import (\n COL_NAME,\n COL_ICON,\n COL_YEAR,\n COL_RUNNER_HUMAN_NAME,\n COL_PLATFORM,\n COL_LASTPLAYED,\n COL_LASTPLAYED_TEXT,\n COL_INSTALLED_AT,\n COL_INSTALLED_AT_TEXT,\n COL_PLAYTIME_TEXT,\n COLUMN_NAMES\n)\n\n\nclass GameListView(Gtk.TreeView, GameView):\n \"\"\"Show the main list of games.\"\"\"\n\n __gsignals__ = GameView.__gsignals__\n\n def __init__(self, store):\n self.game_store = store\n self.model = self.game_store.modelsort\n super().__init__(self.model)\n self.set_rules_hint(True)\n\n # Icon column\n image_cell = Gtk.CellRendererPixbuf()\n column = Gtk.TreeViewColumn(\"\", image_cell, pixbuf=COL_ICON)\n column.set_reorderable(True)\n column.set_sort_indicator(False)\n self.append_column(column)\n\n # Text columns\n default_text_cell = self.set_text_cell()\n name_cell = self.set_text_cell()\n name_cell.set_padding(5, 0)\n\n self.set_column(name_cell, \"Name\", COL_NAME, 200)\n self.set_column(default_text_cell, \"Year\", COL_YEAR, 60)\n self.set_column(default_text_cell, \"Runner\", COL_RUNNER_HUMAN_NAME, 120)\n self.set_column(default_text_cell, \"Platform\", COL_PLATFORM, 120)\n self.set_column(default_text_cell, \"Last Played\", COL_LASTPLAYED_TEXT, 120)\n self.set_sort_with_column(COL_LASTPLAYED_TEXT, COL_LASTPLAYED)\n self.set_column(default_text_cell, \"Installed At\", COL_INSTALLED_AT_TEXT, 120)\n self.set_sort_with_column(COL_INSTALLED_AT_TEXT, COL_INSTALLED_AT)\n self.set_column(default_text_cell, \"Play Time\", COL_PLAYTIME_TEXT, 100)\n\n self.get_selection().set_mode(Gtk.SelectionMode.SINGLE)\n\n self.connect_signals()\n self.connect(\"row-activated\", self.on_row_activated)\n self.get_selection().connect(\"changed\", self.on_cursor_changed)\n\n @staticmethod\n def set_text_cell():\n text_cell = Gtk.CellRendererText()\n text_cell.set_padding(10, 0)\n text_cell.set_property(\"ellipsize\", Pango.EllipsizeMode.END)\n return text_cell\n\n def set_column(self, cell, header, column_id, default_width, sort_id=None):\n column = Gtk.TreeViewColumn(header, cell, markup=column_id)\n column.set_sort_indicator(True)\n column.set_sort_column_id(column_id if sort_id is None else sort_id)\n self.set_column_sort(column_id if sort_id is None else sort_id)\n column.set_resizable(True)\n column.set_reorderable(True)\n width = settings.read_setting(\n \"%s_column_width\" % COLUMN_NAMES[column_id], \"list view\"\n )\n column.set_fixed_width(int(width) if width else default_width)\n self.append_column(column)\n column.connect(\"notify::width\", self.on_column_width_changed)\n return column\n\n def set_column_sort(self, col):\n \"\"\"Sort a column and fallback to sorting by name and runner.\"\"\"\n\n def sort_func(model, row1, row2, user_data):\n v1 = model.get_value(row1, col)\n v2 = model.get_value(row2, col)\n diff = -1 if v1 < v2 else 0 if v1 == v2 else 1\n if diff is 0:\n v1 = model.get_value(row1, COL_NAME)\n v2 = model.get_value(row2, COL_NAME)\n diff = -1 if v1 < v2 else 0 if v1 == v2 else 1\n if diff is 0:\n v1 = model.get_value(row1, COL_RUNNER_HUMAN_NAME)\n v2 = model.get_value(row2, COL_RUNNER_HUMAN_NAME)\n diff = -1 if v1 < v2 else 0 if v1 == v2 else 1\n return diff\n\n self.model.set_sort_func(col, sort_func)\n\n def set_sort_with_column(self, col, sort_col):\n \"\"\"Set to sort a column by using another column\"\"\"\n\n def sort_func(model, row1, row2, _user_data):\n value1 = model.get_value(row1, sort_col)\n value2 = model.get_value(row2, sort_col)\n return -1 if value1 < value2 else 0 if value1 == value2 else 1\n\n self.model.set_sort_func(col, sort_func)\n\n def get_selected_item(self):\n \"\"\"Return the currently selected game's id.\"\"\"\n selection = self.get_selection()\n if not selection:\n return None\n model, select_iter = selection.get_selected()\n if select_iter:\n return select_iter\n\n def select(self):\n self.set_cursor(self.current_path[0])\n\n def set_selected_game(self, game_id):\n row = self.game_store.get_row_by_id(game_id, filtered=True)\n if row:\n self.set_cursor(row.path)\n\n def on_row_activated(self, widget, line=None, column=None):\n \"\"\"Handles double clicks\"\"\"\n selected_item = self.get_selected_item()\n if selected_item:\n selected_game = self.get_selected_game(selected_item)\n else:\n selected_game = None\n self.emit(\"game-activated\", selected_game)\n\n def on_cursor_changed(self, widget, line=None, column=None):\n selected_item = self.get_selected_item()\n if selected_item:\n self.selected_game = self.get_selected_game(selected_item)\n else:\n self.selected_game = None\n self.emit(\"game-selected\", self.selected_game)\n\n @staticmethod\n def on_column_width_changed(col, *args):\n col_name = col.get_title()\n if col_name:\n settings.write_setting(\n col_name.replace(\" \", \"\") + \"_column_width\",\n col.get_fixed_width(),\n \"list view\",\n )\n", "path": "lutris/gui/views/list.py" } ]
diff --git a/lutris/gui/views/list.py b/lutris/gui/views/list.py index 5f7e9b7f81..14ff5bd340 100644 --- a/lutris/gui/views/list.py +++ b/lutris/gui/views/list.py @@ -120,7 +120,7 @@ def select(self): self.set_cursor(self.current_path[0]) def set_selected_game(self, game_id): - row = self.get_row_by_id(game_id, filtered=True) + row = self.game_store.get_row_by_id(game_id, filtered=True) if row: self.set_cursor(row.path)
bridgecrewio__checkov-3226
Parsing does not work with terraform files being encoded with UTF-8/BOM **Example** Create an empty file with Unix line endings and save it in UTF-8 encoding with a BOM. Running checkov will fail. Removing BOM will resolve the issue **Stacktrace** ``` Traceback (most recent call last): File "/usr/local/lib/python3.8/dist-packages/lark/lexer.py", line 536, in lex token = self.root_lexer.next_token(lexer_state, parser_state) File "/usr/local/lib/python3.8/dist-packages/lark/lexer.py", line 466, in next_token raise UnexpectedCharacters(lex_state.text, line_ctr.char_pos, line_ctr.line, line_ctr.column, lark.exceptions.UnexpectedCharacters: <unprintable UnexpectedCharacters object> During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/local/lib/python3.8/dist-packages/checkov/terraform/parser.py", line 742, in _load_or_die_quietly raw_data = hcl2.load(f) ```
[ { "content": "from __future__ import annotations\n\nimport json\nimport logging\nimport os\nimport re\nfrom collections.abc import Sequence\nfrom copy import deepcopy\nfrom json import dumps, loads\nfrom pathlib import Path\nfrom typing import Optional, Dict, Mapping, Set, Tuple, Callable, Any, List, Type\n\nimport deep_merge\nimport hcl2\nfrom lark import Tree\nfrom typing_extensions import TypeAlias\n\nfrom checkov.common.runners.base_runner import filter_ignored_paths, IGNORE_HIDDEN_DIRECTORY_ENV\nfrom checkov.common.util.config_utils import should_scan_hcl_files\nfrom checkov.common.util.consts import DEFAULT_EXTERNAL_MODULES_DIR, RESOLVED_MODULE_ENTRY_NAME\nfrom checkov.common.util.json_utils import CustomJSONEncoder\nfrom checkov.common.variables.context import EvaluationContext\nfrom checkov.terraform.checks.utils.dependency_path_handler import unify_dependency_path\nfrom checkov.terraform.graph_builder.graph_components.block_types import BlockType\nfrom checkov.terraform.graph_builder.graph_components.module import Module\nfrom checkov.terraform.graph_builder.utils import remove_module_dependency_in_path\nfrom checkov.terraform.module_loading.content import ModuleContent\nfrom checkov.terraform.module_loading.module_finder import load_tf_modules\nfrom checkov.terraform.module_loading.registry import module_loader_registry as default_ml_registry, \\\n ModuleLoaderRegistry\nfrom checkov.common.util.parser_utils import eval_string, find_var_blocks\n\n_Hcl2Payload: TypeAlias = \"dict[str, list[dict[str, Any]]]\"\n\nexternal_modules_download_path = os.environ.get('EXTERNAL_MODULES_DIR', DEFAULT_EXTERNAL_MODULES_DIR)\nGOOD_BLOCK_TYPES = {BlockType.LOCALS, BlockType.TERRAFORM} # used for cleaning bad tf definitions\n\nENTITY_NAME_PATTERN = re.compile(r\"[^\\W0-9][\\w-]*\")\nRESOLVED_MODULE_PATTERN = re.compile(r\"\\[.+\\#.+\\]\")\n\n\ndef _filter_ignored_paths(root, paths, excluded_paths):\n filter_ignored_paths(root, paths, excluded_paths)\n [paths.remove(path) for path in list(paths) if path in [default_ml_registry.external_modules_folder_name]]\n\n\nclass Parser:\n def __init__(self, module_class: Type[Module] = Module):\n self.module_class = module_class\n self._parsed_directories: set[str] = set()\n self.external_modules_source_map: Dict[Tuple[str, str], str] = {}\n self.module_address_map: Dict[Tuple[str, str], str] = {}\n self.loaded_files_map = {}\n\n # This ensures that we don't try to double-load modules\n # Tuple is <file>, <module_index>, <name> (see _load_modules)\n self._loaded_modules: Set[Tuple[str, int, str]] = set()\n self.external_variables_data = []\n\n def _init(self, directory: str, out_definitions: Optional[Dict],\n out_evaluations_context: Dict[str, Dict[str, EvaluationContext]],\n out_parsing_errors: Dict[str, Exception],\n env_vars: Mapping[str, str],\n download_external_modules: bool,\n external_modules_download_path: str,\n excluded_paths: Optional[List[str]] = None,\n tf_var_files: Optional[List[str]] = None):\n self.directory = directory\n self.out_definitions = out_definitions\n self.out_evaluations_context = out_evaluations_context\n self.out_parsing_errors = out_parsing_errors\n self.env_vars = env_vars\n self.download_external_modules = download_external_modules\n self.external_modules_download_path = external_modules_download_path\n self.external_modules_source_map = {}\n self.module_address_map = {}\n self.tf_var_files = tf_var_files\n self.scan_hcl = should_scan_hcl_files()\n self.dirname_cache = {}\n\n if self.out_evaluations_context is None:\n self.out_evaluations_context = {}\n if self.out_parsing_errors is None:\n self.out_parsing_errors = {}\n if self.env_vars is None:\n self.env_vars = dict(os.environ)\n self.excluded_paths = excluded_paths\n\n def _check_process_dir(self, directory: str) -> bool:\n if directory not in self._parsed_directories:\n self._parsed_directories.add(directory)\n return True\n else:\n return False\n\n def parse_directory(self, directory: str, out_definitions: Optional[Dict],\n out_evaluations_context: Dict[str, Dict[str, EvaluationContext]] = None,\n out_parsing_errors: Dict[str, Exception] = None,\n env_vars: Mapping[str, str] = None,\n download_external_modules: bool = False,\n external_modules_download_path: str = DEFAULT_EXTERNAL_MODULES_DIR,\n excluded_paths: Optional[List[str]] = None,\n vars_files: Optional[List[str]] = None,\n external_modules_content_cache: Optional[Dict[str, ModuleContent]] = None):\n self._init(directory, out_definitions, out_evaluations_context, out_parsing_errors, env_vars,\n download_external_modules, external_modules_download_path, excluded_paths)\n self._parsed_directories.clear()\n default_ml_registry.root_dir = directory\n default_ml_registry.download_external_modules = download_external_modules\n default_ml_registry.external_modules_folder_name = external_modules_download_path\n default_ml_registry.module_content_cache = external_modules_content_cache if external_modules_content_cache else {}\n load_tf_modules(directory)\n self._parse_directory(dir_filter=lambda d: self._check_process_dir(d), vars_files=vars_files)\n\n def parse_file(\n self, file: str, parsing_errors: Optional[Dict[str, Exception]] = None, scan_hcl: bool = False\n ) -> Optional[Dict[str, Any]]:\n if file.endswith(\".tf\") or file.endswith(\".tf.json\") or (scan_hcl and file.endswith(\".hcl\")):\n parse_result = _load_or_die_quietly(file, parsing_errors)\n if parse_result:\n parse_result = self._serialize_definitions(parse_result)\n parse_result = self._clean_parser_types(parse_result)\n return parse_result\n\n return None\n\n def _parse_directory(self, include_sub_dirs: bool = True,\n module_loader_registry: ModuleLoaderRegistry = default_ml_registry,\n dir_filter: Callable[[str], bool] = lambda _: True,\n vars_files: Optional[List[str]] = None) -> None:\n \"\"\"\n Load and resolve configuration files starting in the given directory, merging the\n resulting data into `tf_definitions`. This loads data according to the Terraform Code Organization\n specification (https://www.terraform.io/docs/configuration/index.html#code-organization), starting\n in the given directory and possibly moving out from there.\n\n The resulting data dictionary generally follows the layout of HCL parsing with a couple distinctions:\n - Data is broken out by file from which the data was loaded. So: <file>: <data>\n - Loaded modules will also be keyed by referrer info: <file>[<referring_file>#<index>]: <data>\n - Module block will included a \"__resolved__\" key with a list of the file/referrer names under\n which data for the file was loaded. For example: \"__resolved__\": [\"main.tf#0\"]. The values will\n correspond to the file names mentioned in the first bullet.\n - All variables that can be resolved will be resolved.\n\n\n :param include_sub_dirs: If true, subdirectories will be walked.\n\n :param module_loader_registry: Registry used for resolving modules. This allows customization of how\n much resolution is performed (and easier testing) by using a manually\n constructed registry rather than the default.\n :param dir_filter: Determines whether or not a directory should be processed. Returning\n True will allow processing. The argument will be the absolute path of\n the directory.\n \"\"\"\n keys_referenced_as_modules: Set[str] = set()\n\n if include_sub_dirs:\n for sub_dir, d_names, f_names in os.walk(self.directory):\n # filter subdirectories for future iterations (we filter files while iterating the directory)\n _filter_ignored_paths(sub_dir, d_names, self.excluded_paths)\n if dir_filter(os.path.abspath(sub_dir)):\n self._internal_dir_load(sub_dir, module_loader_registry, dir_filter,\n keys_referenced_as_modules, vars_files=vars_files,\n root_dir=self.directory, excluded_paths=self.excluded_paths)\n else:\n self._internal_dir_load(self.directory, module_loader_registry, dir_filter,\n keys_referenced_as_modules, vars_files=vars_files)\n\n # Ensure anything that was referenced as a module is removed\n for key in keys_referenced_as_modules:\n if key in self.out_definitions:\n del self.out_definitions[key]\n\n def _internal_dir_load(self, directory: str,\n module_loader_registry: ModuleLoaderRegistry,\n dir_filter: Callable[[str], bool],\n keys_referenced_as_modules: Set[str],\n specified_vars: Optional[Mapping[str, str]] = None,\n vars_files: Optional[List[str]] = None,\n root_dir: Optional[str] = None,\n excluded_paths: Optional[List[str]] = None):\n \"\"\"\n See `parse_directory` docs.\n :param directory: Directory in which .tf and .tfvars files will be loaded.\n :param module_loader_registry: Registry used for resolving modules. This allows customization of how\n much resolution is performed (and easier testing) by using a manually\n constructed registry rather than the default.\n :param dir_filter: Determines whether or not a directory should be processed. Returning\n True will allow processing. The argument will be the absolute path of\n the directory.\n :param specified_vars: Specifically defined variable values, overriding values from any other source.\n \"\"\"\n\n # Stage 1: Look for applicable files in the directory:\n # https://www.terraform.io/docs/configuration/index.html#code-organization\n # Load the raw data for non-variable files, but perform no processing other than loading\n # variable default values.\n # Variable files are also flagged for later processing.\n var_value_and_file_map: Dict[str, Tuple[Any, str]] = {}\n hcl_tfvars: Optional[os.DirEntry] = None\n json_tfvars: Optional[os.DirEntry] = None\n auto_vars_files: List[os.DirEntry] = [] # *.auto.tfvars / *.auto.tfvars.json\n explicit_var_files: List[os.DirEntry] = [] # files passed with --var-file; only process the ones that are in this directory\n\n dir_contents = list(os.scandir(directory))\n if excluded_paths or IGNORE_HIDDEN_DIRECTORY_ENV:\n filter_ignored_paths(root_dir, dir_contents, excluded_paths)\n\n tf_files_to_load = []\n for file in dir_contents:\n # Ignore directories and hidden files\n try:\n if not file.is_file():\n continue\n except OSError:\n # Skip files that can't be accessed\n continue\n\n # Variable files\n # See: https://www.terraform.io/docs/configuration/variables.html#variable-definitions-tfvars-files\n if file.name == \"terraform.tfvars.json\":\n json_tfvars = file\n elif file.name == \"terraform.tfvars\":\n hcl_tfvars = file\n elif file.name.endswith(\".auto.tfvars.json\") or file.name.endswith(\".auto.tfvars\"):\n auto_vars_files.append(file)\n elif vars_files and file.path in vars_files:\n explicit_var_files.append(file)\n\n # Resource files\n elif file.name.endswith(\".tf\") or (self.scan_hcl and file.name.endswith('.hcl')): # TODO: add support for .tf.json\n tf_files_to_load.append(file)\n\n files_to_data = self._load_files(tf_files_to_load)\n\n for file, data in sorted(files_to_data, key=lambda x: x[0]):\n if not data:\n continue\n self.out_definitions[file] = data\n\n # Load variable defaults\n # (see https://www.terraform.io/docs/configuration/variables.html#declaring-an-input-variable)\n var_blocks = data.get(\"variable\")\n if var_blocks and isinstance(var_blocks, list):\n for var_block in var_blocks:\n if not isinstance(var_block, dict):\n continue\n for var_name, var_definition in var_block.items():\n if not isinstance(var_definition, dict):\n continue\n\n default_value = var_definition.get(\"default\")\n if default_value is not None and isinstance(default_value, list):\n self.external_variables_data.append((var_name, default_value[0], file))\n var_value_and_file_map[var_name] = default_value[0], file\n\n # Stage 2: Load vars in proper order:\n # https://www.terraform.io/docs/configuration/variables.html#variable-definition-precedence\n # Defaults are loaded in stage 1.\n # Then loading in this order with later taking precedence:\n # - Environment variables\n # - The terraform.tfvars file, if present.\n # - The terraform.tfvars.json file, if present.\n # - Any *.auto.tfvars or *.auto.tfvars.json files, processed in lexical order of\n # their filenames.\n # Overriding everything else, variables form `specified_vars`, which are considered\n # directly set.\n for key, value in self.env_vars.items(): # env vars\n if not key.startswith(\"TF_VAR_\"):\n continue\n var_value_and_file_map[key[7:]] = value, f\"env:{key}\"\n self.external_variables_data.append((key[7:], value, f\"env:{key}\"))\n if hcl_tfvars: # terraform.tfvars\n data = _load_or_die_quietly(hcl_tfvars, self.out_parsing_errors, clean_definitions=False)\n if data:\n var_value_and_file_map.update({k: (_safe_index(v, 0), hcl_tfvars.path) for k, v in data.items()})\n self.external_variables_data.extend([(k, _safe_index(v, 0), hcl_tfvars.path) for k, v in data.items()])\n if json_tfvars: # terraform.tfvars.json\n data = _load_or_die_quietly(json_tfvars, self.out_parsing_errors)\n if data:\n var_value_and_file_map.update({k: (v, json_tfvars.path) for k, v in data.items()})\n self.external_variables_data.extend([(k, v, json_tfvars.path) for k, v in data.items()])\n\n auto_var_files_to_data = self._load_files(auto_vars_files)\n for var_file, data in sorted(auto_var_files_to_data, key=lambda x: x[0]):\n if data:\n var_value_and_file_map.update({k: (v, var_file) for k, v in data.items()})\n self.external_variables_data.extend([(k, v, var_file) for k, v in data.items()])\n\n explicit_var_files_to_data = self._load_files(explicit_var_files)\n # it's possible that os.scandir returned the var files in a different order than they were specified\n for var_file, data in sorted(explicit_var_files_to_data, key=lambda x: vars_files.index(x[0])):\n if data:\n var_value_and_file_map.update({k: (v, var_file) for k, v in data.items()})\n self.external_variables_data.extend([(k, v, var_file) for k, v in data.items()])\n\n if specified_vars: # specified\n var_value_and_file_map.update({k: (v, \"manual specification\") for k, v in specified_vars.items()})\n self.external_variables_data.extend([(k, v, \"manual specification\") for k, v in specified_vars.items()])\n\n # Stage 4: Load modules\n # This stage needs to be done in a loop (again... alas, no DAG) because modules might not\n # be loadable until other modules are loaded. This happens when parameters to one module\n # depend on the output of another. For such cases, the base module must be loaded, then\n # a parameter resolution pass needs to happen, then the second module can be loaded.\n #\n # One gotcha is that we need to make sure we load all modules at some point, even if their\n # parameters don't resolve. So, if we hit a spot where resolution doesn't change anything\n # and there are still modules to be loaded, they will be forced on the next pass.\n force_final_module_load = False\n for i in range(0, 10): # circuit breaker - no more than 10 loops\n logging.debug(\"Module load loop %d\", i)\n\n # Stage 4a: Load eligible modules\n has_more_modules = self._load_modules(directory, module_loader_registry, dir_filter,\n keys_referenced_as_modules, force_final_module_load)\n\n # Stage 4b: Variable resolution round 2 - now with (possibly more) modules\n made_var_changes = False\n if not has_more_modules:\n break # nothing more to do\n elif not made_var_changes:\n # If there are more modules to load but no variables were resolved, then to a final module\n # load, forcing things through without complete resolution.\n force_final_module_load = True\n\n def _load_files(self, files: list[os.DirEntry]):\n def _load_file(file: os.DirEntry):\n parsing_errors = {}\n result = _load_or_die_quietly(file, parsing_errors)\n # the exceptions type can un-pickleable\n for path, e in parsing_errors.items():\n parsing_errors[path] = e\n\n return (file.path, result), parsing_errors\n\n files_to_data = []\n files_to_parse = []\n for file in files:\n data = self.loaded_files_map.get(file.path)\n if data:\n files_to_data.append((file.path, data))\n else:\n files_to_parse.append(file)\n\n results = [_load_file(f) for f in files_to_parse]\n for result, parsing_errors in results:\n self.out_parsing_errors.update(parsing_errors)\n files_to_data.append(result)\n if result[0] not in self.loaded_files_map:\n self.loaded_files_map[result[0]] = result[1]\n return files_to_data\n\n def _load_modules(self, root_dir: str, module_loader_registry: ModuleLoaderRegistry,\n dir_filter: Callable[[str], bool],\n keys_referenced_as_modules: Set[str], ignore_unresolved_params: bool = False) -> bool:\n \"\"\"\n Load modules which have not already been loaded and can be loaded (don't have unresolved parameters).\n\n :param ignore_unresolved_params: If true, not-yet-loaded modules will be loaded even if they are\n passed parameters that are not fully resolved.\n :return: True if there were modules that were not loaded due to unresolved\n parameters.\n \"\"\"\n all_module_definitions = {}\n all_module_evaluations_context = {}\n skipped_a_module = False\n for file in list(self.out_definitions.keys()):\n # Don't process a file in a directory other than the directory we're processing. For example,\n # if we're down dealing with <top_dir>/<module>/something.tf, we don't want to rescan files\n # up in <top_dir>.\n if self.get_dirname(file) != root_dir:\n continue\n # Don't process a file reference which has already been processed\n if file.endswith(\"]\"):\n continue\n\n file_data = self.out_definitions.get(file)\n if file_data is None:\n continue\n module_calls = file_data.get(\"module\")\n if not module_calls or not isinstance(module_calls, list):\n continue\n\n for module_index, module_call in enumerate(module_calls):\n\n if not isinstance(module_call, dict):\n continue\n\n # There should only be one module reference per outer dict, but... safety first\n for module_call_name, module_call_data in module_call.items():\n if not isinstance(module_call_data, dict):\n continue\n\n module_address = (file, module_index, module_call_name)\n if module_address in self._loaded_modules:\n continue\n\n # Variables being passed to module, \"source\" and \"version\" are reserved\n specified_vars = {k: v[0] if isinstance(v, list) else v for k, v in module_call_data.items()\n if k != \"source\" and k != \"version\"}\n\n if not ignore_unresolved_params:\n has_unresolved_params = False\n for k, v in specified_vars.items():\n if not is_acceptable_module_param(v) or not is_acceptable_module_param(k):\n has_unresolved_params = True\n break\n if has_unresolved_params:\n skipped_a_module = True\n continue\n self._loaded_modules.add(module_address)\n\n source = module_call_data.get(\"source\")\n if not source or not isinstance(source, list):\n continue\n source = source[0]\n if not isinstance(source, str):\n logging.debug(f\"Skipping loading of {module_call_name} as source is not a string, it is: {source}\")\n continue\n\n # Special handling for local sources to make sure we aren't double-parsing\n if source.startswith(\"./\") or source.startswith(\"../\"):\n source = os.path.normpath(\n os.path.join(os.path.dirname(_remove_module_dependency_in_path(file)), source))\n\n version = module_call_data.get(\"version\", \"latest\")\n if version and isinstance(version, list):\n version = version[0]\n try:\n content = module_loader_registry.load(root_dir, source, version)\n if not content.loaded():\n logging.info(f'Got no content for {source}:{version}')\n continue\n\n self._internal_dir_load(directory=content.path(),\n module_loader_registry=module_loader_registry,\n dir_filter=dir_filter, specified_vars=specified_vars,\n keys_referenced_as_modules=keys_referenced_as_modules)\n\n module_definitions = {path: self.out_definitions[path] for path in\n list(self.out_definitions.keys()) if\n self.get_dirname(path) == content.path()}\n\n if not module_definitions:\n continue\n\n # NOTE: Modules are put into the main TF definitions structure \"as normal\" with the\n # notable exception of the file name. For loaded modules referrer information is\n # appended to the file name to create this format:\n # <file_name>[<referred_file>#<referrer_index>]\n # For example:\n # /the/path/module/my_module.tf[/the/path/main.tf#0]\n # The referrer and index allow a module allow a module to be loaded multiple\n # times with differing data.\n #\n # In addition, the referring block will have a \"__resolved__\" key added with a\n # list pointing to the location of the module data that was resolved. For example:\n # \"__resolved__\": [\"/the/path/module/my_module.tf[/the/path/main.tf#0]\"]\n\n resolved_loc_list = module_call_data.get(RESOLVED_MODULE_ENTRY_NAME)\n if resolved_loc_list is None:\n resolved_loc_list = []\n module_call_data[RESOLVED_MODULE_ENTRY_NAME] = resolved_loc_list\n\n # NOTE: Modules can load other modules, so only append referrer information where it\n # has not already been added.\n keys = list(module_definitions.keys())\n for key in keys:\n if key.endswith(\"]\") or file.endswith(\"]\"):\n continue\n keys_referenced_as_modules.add(key)\n new_key = f\"{key}[{file}#{module_index}]\"\n module_definitions[new_key] = module_definitions[key]\n del module_definitions[key]\n del self.out_definitions[key]\n if new_key not in resolved_loc_list:\n resolved_loc_list.append(new_key)\n if (file, module_call_name) not in self.module_address_map:\n self.module_address_map[(file, module_call_name)] = str(module_index)\n resolved_loc_list.sort() # For testing, need predictable ordering\n\n if all_module_definitions:\n deep_merge.merge(all_module_definitions, module_definitions)\n else:\n all_module_definitions = module_definitions\n\n self.external_modules_source_map[(source, version)] = content.path()\n except Exception as e:\n logging.warning(\"Unable to load module (source=\\\"%s\\\" version=\\\"%s\\\"): %s\",\n source, version, e)\n\n if all_module_definitions:\n deep_merge.merge(self.out_definitions, all_module_definitions)\n if all_module_evaluations_context:\n deep_merge.merge(self.out_evaluations_context, all_module_evaluations_context)\n return skipped_a_module\n\n def parse_hcl_module(\n self,\n source_dir: str,\n source: str,\n download_external_modules: bool = False,\n external_modules_download_path: str = DEFAULT_EXTERNAL_MODULES_DIR,\n parsing_errors: dict[str, Exception] | None = None,\n excluded_paths: list[str] | None = None,\n vars_files: list[str] | None = None,\n external_modules_content_cache: dict[str, ModuleContent] | None = None,\n create_graph: bool = True,\n ) -> tuple[Module | None, dict[str, dict[str, Any]]]:\n tf_definitions: dict[str, dict[str, Any]] = {}\n self.parse_directory(directory=source_dir, out_definitions=tf_definitions, out_evaluations_context={},\n out_parsing_errors=parsing_errors if parsing_errors is not None else {},\n download_external_modules=download_external_modules,\n external_modules_download_path=external_modules_download_path, excluded_paths=excluded_paths,\n vars_files=vars_files, external_modules_content_cache=external_modules_content_cache)\n tf_definitions = self._clean_parser_types(tf_definitions)\n tf_definitions = self._serialize_definitions(tf_definitions)\n\n module = None\n if create_graph:\n module, tf_definitions = self.parse_hcl_module_from_tf_definitions(tf_definitions, source_dir, source)\n\n return module, tf_definitions\n\n def parse_hcl_module_from_tf_definitions(\n self,\n tf_definitions: Dict[str, Dict[str, Any]],\n source_dir: str,\n source: str,\n ) -> Tuple[Module, Dict[str, Dict[str, Any]]]:\n module_dependency_map, tf_definitions, dep_index_mapping = self.get_module_dependency_map(tf_definitions)\n module = self.get_new_module(\n source_dir=source_dir,\n module_dependency_map=module_dependency_map,\n module_address_map=self.module_address_map,\n external_modules_source_map=self.external_modules_source_map,\n dep_index_mapping=dep_index_mapping,\n )\n self.add_tfvars(module, source)\n copy_of_tf_definitions = deepcopy(tf_definitions)\n for file_path, blocks in copy_of_tf_definitions.items():\n for block_type in blocks:\n try:\n module.add_blocks(block_type, blocks[block_type], file_path, source)\n except Exception as e:\n logging.warning(f'Failed to add block {blocks[block_type]}. Error:')\n logging.warning(e, exc_info=False)\n return module, tf_definitions\n\n @staticmethod\n def _clean_parser_types(conf: dict[str, Any]) -> dict[str, Any]:\n if not conf:\n return conf\n\n sorted_keys = list(conf.keys())\n first_key_type = type(sorted_keys[0])\n if first_key_type is None:\n return {}\n\n if all(isinstance(x, first_key_type) for x in sorted_keys):\n sorted_keys.sort()\n\n # Create a new dict where the keys are sorted alphabetically\n sorted_conf = {key: conf[key] for key in sorted_keys}\n for attribute, values in sorted_conf.items():\n if attribute == 'alias':\n continue\n if isinstance(values, list):\n sorted_conf[attribute] = Parser._clean_parser_types_lst(values)\n elif isinstance(values, dict):\n sorted_conf[attribute] = Parser._clean_parser_types(values)\n elif isinstance(values, str) and values in ('true', 'false'):\n sorted_conf[attribute] = True if values == 'true' else False\n elif isinstance(values, set):\n sorted_conf[attribute] = Parser._clean_parser_types_lst(list(values))\n elif isinstance(values, Tree):\n sorted_conf[attribute] = str(values)\n return sorted_conf\n\n @staticmethod\n def _clean_parser_types_lst(values: list[Any]) -> list[Any]:\n for idx, val in enumerate(values):\n if isinstance(val, dict):\n values[idx] = Parser._clean_parser_types(val)\n elif isinstance(val, list):\n values[idx] = Parser._clean_parser_types_lst(val)\n elif isinstance(val, str):\n if val == 'true':\n values[idx] = True\n elif val == 'false':\n values[idx] = False\n elif isinstance(val, set):\n values[idx] = Parser._clean_parser_types_lst(list(val))\n str_values_in_lst = [val for val in values if isinstance(val, str)]\n str_values_in_lst.sort()\n result_values = [val for val in values if not isinstance(val, str)]\n result_values.extend(str_values_in_lst)\n return result_values\n\n @staticmethod\n def _serialize_definitions(tf_definitions: dict[str, _Hcl2Payload]) -> dict[str, _Hcl2Payload]:\n return loads(dumps(tf_definitions, cls=CustomJSONEncoder))\n\n @staticmethod\n def get_next_vertices(evaluated_files: list[str], unevaluated_files: list[str]) -> tuple[list[str], list[str]]:\n \"\"\"\n This function implements a lazy separation of levels for the evaluated files. It receives the evaluated\n files, and returns 2 lists:\n 1. The next level of files - files from the unevaluated_files which have no unresolved dependency (either\n no dependency or all dependencies were evaluated).\n 2. unevaluated - files which have yet to be evaluated, and still have pending dependencies\n\n Let's say we have this dependency tree:\n a -> b\n x -> b\n y -> c\n z -> b\n b -> c\n c -> d\n\n The first run will return [a, y, x, z] as the next level since all of them have no dependencies\n The second run with the evaluated being [a, y, x, z] will return [b] as the next level.\n Please mind that [c] has some resolved dependencies (from y), but has unresolved dependencies from [b].\n The third run will return [c], and the fourth will return [d].\n \"\"\"\n next_level, unevaluated, do_not_eval_yet = [], [], []\n for key in unevaluated_files:\n found = False\n for eval_key in evaluated_files:\n if eval_key in key:\n found = True\n break\n if not found:\n do_not_eval_yet.append(key.split('[')[0])\n unevaluated.append(key)\n else:\n next_level.append(key)\n\n move_to_uneval = list(filter(lambda k: k.split('[')[0] in do_not_eval_yet, next_level))\n for k in move_to_uneval:\n next_level.remove(k)\n unevaluated.append(k)\n return next_level, unevaluated\n\n @staticmethod\n def get_module_dependency_map(tf_definitions):\n \"\"\"\n :param tf_definitions, with paths in format 'dir/main.tf[module_dir/main.tf#0]'\n :return module_dependency_map: mapping between directories and the location of its module definition:\n {'dir': 'module_dir/main.tf'}\n :return tf_definitions: with paths in format 'dir/main.tf'\n \"\"\"\n module_dependency_map = {}\n copy_of_tf_definitions = {}\n dep_index_mapping: Dict[Tuple[str, str], List[str]] = {}\n origin_keys = list(filter(lambda k: not k.endswith(']'), tf_definitions.keys()))\n unevaluated_keys = list(filter(lambda k: k.endswith(']'), tf_definitions.keys()))\n for file_path in origin_keys:\n dir_name = os.path.dirname(file_path)\n module_dependency_map[dir_name] = [[]]\n copy_of_tf_definitions[file_path] = deepcopy(tf_definitions[file_path])\n\n next_level, unevaluated_keys = Parser.get_next_vertices(origin_keys, unevaluated_keys)\n while next_level:\n for file_path in next_level:\n path, module_dependency, module_dependency_num = remove_module_dependency_in_path(file_path)\n dir_name = os.path.dirname(path)\n current_deps = deepcopy(module_dependency_map[os.path.dirname(module_dependency)])\n for dep in current_deps:\n dep.append(module_dependency)\n if dir_name not in module_dependency_map:\n module_dependency_map[dir_name] = current_deps\n elif current_deps not in module_dependency_map[dir_name]:\n module_dependency_map[dir_name] += current_deps\n copy_of_tf_definitions[path] = deepcopy(tf_definitions[file_path])\n origin_keys.append(path)\n dep_index_mapping.setdefault((path, module_dependency), []).append(module_dependency_num)\n next_level, unevaluated_keys = Parser.get_next_vertices(origin_keys, unevaluated_keys)\n for key, dep_trails in module_dependency_map.items():\n hashes = set()\n deduped = []\n for trail in dep_trails:\n trail_hash = unify_dependency_path(trail)\n if trail_hash in hashes:\n continue\n hashes.add(trail_hash)\n deduped.append(trail)\n module_dependency_map[key] = deduped\n return module_dependency_map, copy_of_tf_definitions, dep_index_mapping\n\n @staticmethod\n def get_new_module(\n source_dir: str,\n module_dependency_map: Dict[str, List[List[str]]],\n module_address_map: Dict[Tuple[str, str], str],\n external_modules_source_map: Dict[Tuple[str, str], str],\n dep_index_mapping: Dict[Tuple[str, str], List[str]],\n ) -> Module:\n return Module(\n source_dir=source_dir,\n module_dependency_map=module_dependency_map,\n module_address_map=module_address_map,\n external_modules_source_map=external_modules_source_map,\n dep_index_mapping=dep_index_mapping\n )\n\n def add_tfvars(self, module: Module, source: str) -> None:\n if not self.external_variables_data:\n return\n for (var_name, default, path) in self.external_variables_data:\n if \".tfvars\" in path:\n block = {var_name: {\"default\": default}}\n module.add_blocks(BlockType.TF_VARIABLE, block, path, source)\n\n def get_dirname(self, path: str) -> str:\n dirname_path = self.dirname_cache.get(path)\n if not dirname_path:\n dirname_path = os.path.dirname(path)\n self.dirname_cache[path] = dirname_path\n return dirname_path\n\n\ndef _load_or_die_quietly(\n file: str | Path, parsing_errors: dict[str, Exception], clean_definitions: bool = True\n) -> _Hcl2Payload | None:\n \"\"\"\nLoad JSON or HCL, depending on filename.\n :return: None if the file can't be loaded\n \"\"\"\n\n file_path = os.fspath(file)\n file_name = os.path.basename(file_path)\n\n try:\n logging.debug(f\"Parsing {file_path}\")\n\n with open(file_path, \"r\") as f:\n if file_name.endswith(\".json\"):\n return json.load(f)\n else:\n raw_data = hcl2.load(f)\n non_malformed_definitions = validate_malformed_definitions(raw_data)\n if clean_definitions:\n return clean_bad_definitions(non_malformed_definitions)\n else:\n return non_malformed_definitions\n except Exception as e:\n logging.debug(f'failed while parsing file {file_path}', exc_info=True)\n parsing_errors[file_path] = e\n return None\n\n\ndef _is_valid_block(block: Any) -> bool:\n if not isinstance(block, dict):\n return True\n\n # if the block is empty, there's no need to process it further\n if not block:\n return False\n\n entity_name = next(iter(block.keys()))\n if re.fullmatch(ENTITY_NAME_PATTERN, entity_name):\n return True\n return False\n\n\ndef validate_malformed_definitions(raw_data: _Hcl2Payload) -> _Hcl2Payload:\n return {\n block_type: [block for block in blocks if _is_valid_block(block)]\n for block_type, blocks in raw_data.items()\n }\n\n\ndef clean_bad_definitions(tf_definition_list: _Hcl2Payload) -> _Hcl2Payload:\n return {\n block_type: [\n definition\n for definition in definition_list\n if block_type in GOOD_BLOCK_TYPES\n or not isinstance(definition, dict)\n or len(definition) == 1\n ]\n for block_type, definition_list in tf_definition_list.items()\n }\n\n\ndef _to_native_value(value: str) -> Any:\n if value.startswith('\"') or value.startswith(\"'\"):\n return value[1:-1]\n else:\n return eval_string(value)\n\n\ndef _remove_module_dependency_in_path(path: str) -> str:\n \"\"\"\n :param path: path that looks like \"dir/main.tf[other_dir/x.tf#0]\n :return: only the outer path: dir/main.tf\n \"\"\"\n if re.findall(RESOLVED_MODULE_PATTERN, path):\n path = re.sub(RESOLVED_MODULE_PATTERN, '', path)\n return path\n\n\ndef _safe_index(sequence_hopefully: Sequence[Any], index: int) -> Any:\n try:\n return sequence_hopefully[index]\n except IndexError:\n logging.debug(f'Failed to parse index int ({index}) out of {sequence_hopefully}', exc_info=True)\n return None\n\n\ndef is_acceptable_module_param(value: Any) -> bool:\n \"\"\"\n This function determines if a value should be passed to a module as a parameter. We don't want to pass\n unresolved var, local or module references because they can't be resolved from the module, so they need\n to be resolved prior to being passed down.\n \"\"\"\n value_type = type(value)\n if value_type is dict:\n for k, v in value.items():\n if not is_acceptable_module_param(v) or not is_acceptable_module_param(k):\n return False\n return True\n if value_type is set or value_type is list:\n for v in value:\n if not is_acceptable_module_param(v):\n return False\n return True\n\n if value_type is not str:\n return True\n\n for vbm in find_var_blocks(value):\n if vbm.is_simple_var():\n return False\n return True\n", "path": "checkov/terraform/parser.py" } ]
[ { "content": "from __future__ import annotations\n\nimport json\nimport logging\nimport os\nimport re\nfrom collections.abc import Sequence\nfrom copy import deepcopy\nfrom json import dumps, loads\nfrom pathlib import Path\nfrom typing import Optional, Dict, Mapping, Set, Tuple, Callable, Any, List, Type\n\nimport deep_merge\nimport hcl2\nfrom lark import Tree\nfrom typing_extensions import TypeAlias\n\nfrom checkov.common.runners.base_runner import filter_ignored_paths, IGNORE_HIDDEN_DIRECTORY_ENV\nfrom checkov.common.util.config_utils import should_scan_hcl_files\nfrom checkov.common.util.consts import DEFAULT_EXTERNAL_MODULES_DIR, RESOLVED_MODULE_ENTRY_NAME\nfrom checkov.common.util.json_utils import CustomJSONEncoder\nfrom checkov.common.variables.context import EvaluationContext\nfrom checkov.terraform.checks.utils.dependency_path_handler import unify_dependency_path\nfrom checkov.terraform.graph_builder.graph_components.block_types import BlockType\nfrom checkov.terraform.graph_builder.graph_components.module import Module\nfrom checkov.terraform.graph_builder.utils import remove_module_dependency_in_path\nfrom checkov.terraform.module_loading.content import ModuleContent\nfrom checkov.terraform.module_loading.module_finder import load_tf_modules\nfrom checkov.terraform.module_loading.registry import module_loader_registry as default_ml_registry, \\\n ModuleLoaderRegistry\nfrom checkov.common.util.parser_utils import eval_string, find_var_blocks\n\n_Hcl2Payload: TypeAlias = \"dict[str, list[dict[str, Any]]]\"\n\nexternal_modules_download_path = os.environ.get('EXTERNAL_MODULES_DIR', DEFAULT_EXTERNAL_MODULES_DIR)\nGOOD_BLOCK_TYPES = {BlockType.LOCALS, BlockType.TERRAFORM} # used for cleaning bad tf definitions\n\nENTITY_NAME_PATTERN = re.compile(r\"[^\\W0-9][\\w-]*\")\nRESOLVED_MODULE_PATTERN = re.compile(r\"\\[.+\\#.+\\]\")\n\n\ndef _filter_ignored_paths(root, paths, excluded_paths):\n filter_ignored_paths(root, paths, excluded_paths)\n [paths.remove(path) for path in list(paths) if path in [default_ml_registry.external_modules_folder_name]]\n\n\nclass Parser:\n def __init__(self, module_class: Type[Module] = Module):\n self.module_class = module_class\n self._parsed_directories: set[str] = set()\n self.external_modules_source_map: Dict[Tuple[str, str], str] = {}\n self.module_address_map: Dict[Tuple[str, str], str] = {}\n self.loaded_files_map = {}\n\n # This ensures that we don't try to double-load modules\n # Tuple is <file>, <module_index>, <name> (see _load_modules)\n self._loaded_modules: Set[Tuple[str, int, str]] = set()\n self.external_variables_data = []\n\n def _init(self, directory: str, out_definitions: Optional[Dict],\n out_evaluations_context: Dict[str, Dict[str, EvaluationContext]],\n out_parsing_errors: Dict[str, Exception],\n env_vars: Mapping[str, str],\n download_external_modules: bool,\n external_modules_download_path: str,\n excluded_paths: Optional[List[str]] = None,\n tf_var_files: Optional[List[str]] = None):\n self.directory = directory\n self.out_definitions = out_definitions\n self.out_evaluations_context = out_evaluations_context\n self.out_parsing_errors = out_parsing_errors\n self.env_vars = env_vars\n self.download_external_modules = download_external_modules\n self.external_modules_download_path = external_modules_download_path\n self.external_modules_source_map = {}\n self.module_address_map = {}\n self.tf_var_files = tf_var_files\n self.scan_hcl = should_scan_hcl_files()\n self.dirname_cache = {}\n\n if self.out_evaluations_context is None:\n self.out_evaluations_context = {}\n if self.out_parsing_errors is None:\n self.out_parsing_errors = {}\n if self.env_vars is None:\n self.env_vars = dict(os.environ)\n self.excluded_paths = excluded_paths\n\n def _check_process_dir(self, directory: str) -> bool:\n if directory not in self._parsed_directories:\n self._parsed_directories.add(directory)\n return True\n else:\n return False\n\n def parse_directory(self, directory: str, out_definitions: Optional[Dict],\n out_evaluations_context: Dict[str, Dict[str, EvaluationContext]] = None,\n out_parsing_errors: Dict[str, Exception] = None,\n env_vars: Mapping[str, str] = None,\n download_external_modules: bool = False,\n external_modules_download_path: str = DEFAULT_EXTERNAL_MODULES_DIR,\n excluded_paths: Optional[List[str]] = None,\n vars_files: Optional[List[str]] = None,\n external_modules_content_cache: Optional[Dict[str, ModuleContent]] = None):\n self._init(directory, out_definitions, out_evaluations_context, out_parsing_errors, env_vars,\n download_external_modules, external_modules_download_path, excluded_paths)\n self._parsed_directories.clear()\n default_ml_registry.root_dir = directory\n default_ml_registry.download_external_modules = download_external_modules\n default_ml_registry.external_modules_folder_name = external_modules_download_path\n default_ml_registry.module_content_cache = external_modules_content_cache if external_modules_content_cache else {}\n load_tf_modules(directory)\n self._parse_directory(dir_filter=lambda d: self._check_process_dir(d), vars_files=vars_files)\n\n def parse_file(\n self, file: str, parsing_errors: Optional[Dict[str, Exception]] = None, scan_hcl: bool = False\n ) -> Optional[Dict[str, Any]]:\n if file.endswith(\".tf\") or file.endswith(\".tf.json\") or (scan_hcl and file.endswith(\".hcl\")):\n parse_result = _load_or_die_quietly(file, parsing_errors)\n if parse_result:\n parse_result = self._serialize_definitions(parse_result)\n parse_result = self._clean_parser_types(parse_result)\n return parse_result\n\n return None\n\n def _parse_directory(self, include_sub_dirs: bool = True,\n module_loader_registry: ModuleLoaderRegistry = default_ml_registry,\n dir_filter: Callable[[str], bool] = lambda _: True,\n vars_files: Optional[List[str]] = None) -> None:\n \"\"\"\n Load and resolve configuration files starting in the given directory, merging the\n resulting data into `tf_definitions`. This loads data according to the Terraform Code Organization\n specification (https://www.terraform.io/docs/configuration/index.html#code-organization), starting\n in the given directory and possibly moving out from there.\n\n The resulting data dictionary generally follows the layout of HCL parsing with a couple distinctions:\n - Data is broken out by file from which the data was loaded. So: <file>: <data>\n - Loaded modules will also be keyed by referrer info: <file>[<referring_file>#<index>]: <data>\n - Module block will included a \"__resolved__\" key with a list of the file/referrer names under\n which data for the file was loaded. For example: \"__resolved__\": [\"main.tf#0\"]. The values will\n correspond to the file names mentioned in the first bullet.\n - All variables that can be resolved will be resolved.\n\n\n :param include_sub_dirs: If true, subdirectories will be walked.\n\n :param module_loader_registry: Registry used for resolving modules. This allows customization of how\n much resolution is performed (and easier testing) by using a manually\n constructed registry rather than the default.\n :param dir_filter: Determines whether or not a directory should be processed. Returning\n True will allow processing. The argument will be the absolute path of\n the directory.\n \"\"\"\n keys_referenced_as_modules: Set[str] = set()\n\n if include_sub_dirs:\n for sub_dir, d_names, f_names in os.walk(self.directory):\n # filter subdirectories for future iterations (we filter files while iterating the directory)\n _filter_ignored_paths(sub_dir, d_names, self.excluded_paths)\n if dir_filter(os.path.abspath(sub_dir)):\n self._internal_dir_load(sub_dir, module_loader_registry, dir_filter,\n keys_referenced_as_modules, vars_files=vars_files,\n root_dir=self.directory, excluded_paths=self.excluded_paths)\n else:\n self._internal_dir_load(self.directory, module_loader_registry, dir_filter,\n keys_referenced_as_modules, vars_files=vars_files)\n\n # Ensure anything that was referenced as a module is removed\n for key in keys_referenced_as_modules:\n if key in self.out_definitions:\n del self.out_definitions[key]\n\n def _internal_dir_load(self, directory: str,\n module_loader_registry: ModuleLoaderRegistry,\n dir_filter: Callable[[str], bool],\n keys_referenced_as_modules: Set[str],\n specified_vars: Optional[Mapping[str, str]] = None,\n vars_files: Optional[List[str]] = None,\n root_dir: Optional[str] = None,\n excluded_paths: Optional[List[str]] = None):\n \"\"\"\n See `parse_directory` docs.\n :param directory: Directory in which .tf and .tfvars files will be loaded.\n :param module_loader_registry: Registry used for resolving modules. This allows customization of how\n much resolution is performed (and easier testing) by using a manually\n constructed registry rather than the default.\n :param dir_filter: Determines whether or not a directory should be processed. Returning\n True will allow processing. The argument will be the absolute path of\n the directory.\n :param specified_vars: Specifically defined variable values, overriding values from any other source.\n \"\"\"\n\n # Stage 1: Look for applicable files in the directory:\n # https://www.terraform.io/docs/configuration/index.html#code-organization\n # Load the raw data for non-variable files, but perform no processing other than loading\n # variable default values.\n # Variable files are also flagged for later processing.\n var_value_and_file_map: Dict[str, Tuple[Any, str]] = {}\n hcl_tfvars: Optional[os.DirEntry] = None\n json_tfvars: Optional[os.DirEntry] = None\n auto_vars_files: List[os.DirEntry] = [] # *.auto.tfvars / *.auto.tfvars.json\n explicit_var_files: List[os.DirEntry] = [] # files passed with --var-file; only process the ones that are in this directory\n\n dir_contents = list(os.scandir(directory))\n if excluded_paths or IGNORE_HIDDEN_DIRECTORY_ENV:\n filter_ignored_paths(root_dir, dir_contents, excluded_paths)\n\n tf_files_to_load = []\n for file in dir_contents:\n # Ignore directories and hidden files\n try:\n if not file.is_file():\n continue\n except OSError:\n # Skip files that can't be accessed\n continue\n\n # Variable files\n # See: https://www.terraform.io/docs/configuration/variables.html#variable-definitions-tfvars-files\n if file.name == \"terraform.tfvars.json\":\n json_tfvars = file\n elif file.name == \"terraform.tfvars\":\n hcl_tfvars = file\n elif file.name.endswith(\".auto.tfvars.json\") or file.name.endswith(\".auto.tfvars\"):\n auto_vars_files.append(file)\n elif vars_files and file.path in vars_files:\n explicit_var_files.append(file)\n\n # Resource files\n elif file.name.endswith(\".tf\") or (self.scan_hcl and file.name.endswith('.hcl')): # TODO: add support for .tf.json\n tf_files_to_load.append(file)\n\n files_to_data = self._load_files(tf_files_to_load)\n\n for file, data in sorted(files_to_data, key=lambda x: x[0]):\n if not data:\n continue\n self.out_definitions[file] = data\n\n # Load variable defaults\n # (see https://www.terraform.io/docs/configuration/variables.html#declaring-an-input-variable)\n var_blocks = data.get(\"variable\")\n if var_blocks and isinstance(var_blocks, list):\n for var_block in var_blocks:\n if not isinstance(var_block, dict):\n continue\n for var_name, var_definition in var_block.items():\n if not isinstance(var_definition, dict):\n continue\n\n default_value = var_definition.get(\"default\")\n if default_value is not None and isinstance(default_value, list):\n self.external_variables_data.append((var_name, default_value[0], file))\n var_value_and_file_map[var_name] = default_value[0], file\n\n # Stage 2: Load vars in proper order:\n # https://www.terraform.io/docs/configuration/variables.html#variable-definition-precedence\n # Defaults are loaded in stage 1.\n # Then loading in this order with later taking precedence:\n # - Environment variables\n # - The terraform.tfvars file, if present.\n # - The terraform.tfvars.json file, if present.\n # - Any *.auto.tfvars or *.auto.tfvars.json files, processed in lexical order of\n # their filenames.\n # Overriding everything else, variables form `specified_vars`, which are considered\n # directly set.\n for key, value in self.env_vars.items(): # env vars\n if not key.startswith(\"TF_VAR_\"):\n continue\n var_value_and_file_map[key[7:]] = value, f\"env:{key}\"\n self.external_variables_data.append((key[7:], value, f\"env:{key}\"))\n if hcl_tfvars: # terraform.tfvars\n data = _load_or_die_quietly(hcl_tfvars, self.out_parsing_errors, clean_definitions=False)\n if data:\n var_value_and_file_map.update({k: (_safe_index(v, 0), hcl_tfvars.path) for k, v in data.items()})\n self.external_variables_data.extend([(k, _safe_index(v, 0), hcl_tfvars.path) for k, v in data.items()])\n if json_tfvars: # terraform.tfvars.json\n data = _load_or_die_quietly(json_tfvars, self.out_parsing_errors)\n if data:\n var_value_and_file_map.update({k: (v, json_tfvars.path) for k, v in data.items()})\n self.external_variables_data.extend([(k, v, json_tfvars.path) for k, v in data.items()])\n\n auto_var_files_to_data = self._load_files(auto_vars_files)\n for var_file, data in sorted(auto_var_files_to_data, key=lambda x: x[0]):\n if data:\n var_value_and_file_map.update({k: (v, var_file) for k, v in data.items()})\n self.external_variables_data.extend([(k, v, var_file) for k, v in data.items()])\n\n explicit_var_files_to_data = self._load_files(explicit_var_files)\n # it's possible that os.scandir returned the var files in a different order than they were specified\n for var_file, data in sorted(explicit_var_files_to_data, key=lambda x: vars_files.index(x[0])):\n if data:\n var_value_and_file_map.update({k: (v, var_file) for k, v in data.items()})\n self.external_variables_data.extend([(k, v, var_file) for k, v in data.items()])\n\n if specified_vars: # specified\n var_value_and_file_map.update({k: (v, \"manual specification\") for k, v in specified_vars.items()})\n self.external_variables_data.extend([(k, v, \"manual specification\") for k, v in specified_vars.items()])\n\n # Stage 4: Load modules\n # This stage needs to be done in a loop (again... alas, no DAG) because modules might not\n # be loadable until other modules are loaded. This happens when parameters to one module\n # depend on the output of another. For such cases, the base module must be loaded, then\n # a parameter resolution pass needs to happen, then the second module can be loaded.\n #\n # One gotcha is that we need to make sure we load all modules at some point, even if their\n # parameters don't resolve. So, if we hit a spot where resolution doesn't change anything\n # and there are still modules to be loaded, they will be forced on the next pass.\n force_final_module_load = False\n for i in range(0, 10): # circuit breaker - no more than 10 loops\n logging.debug(\"Module load loop %d\", i)\n\n # Stage 4a: Load eligible modules\n has_more_modules = self._load_modules(directory, module_loader_registry, dir_filter,\n keys_referenced_as_modules, force_final_module_load)\n\n # Stage 4b: Variable resolution round 2 - now with (possibly more) modules\n made_var_changes = False\n if not has_more_modules:\n break # nothing more to do\n elif not made_var_changes:\n # If there are more modules to load but no variables were resolved, then to a final module\n # load, forcing things through without complete resolution.\n force_final_module_load = True\n\n def _load_files(self, files: list[os.DirEntry]):\n def _load_file(file: os.DirEntry):\n parsing_errors = {}\n result = _load_or_die_quietly(file, parsing_errors)\n # the exceptions type can un-pickleable\n for path, e in parsing_errors.items():\n parsing_errors[path] = e\n\n return (file.path, result), parsing_errors\n\n files_to_data = []\n files_to_parse = []\n for file in files:\n data = self.loaded_files_map.get(file.path)\n if data:\n files_to_data.append((file.path, data))\n else:\n files_to_parse.append(file)\n\n results = [_load_file(f) for f in files_to_parse]\n for result, parsing_errors in results:\n self.out_parsing_errors.update(parsing_errors)\n files_to_data.append(result)\n if result[0] not in self.loaded_files_map:\n self.loaded_files_map[result[0]] = result[1]\n return files_to_data\n\n def _load_modules(self, root_dir: str, module_loader_registry: ModuleLoaderRegistry,\n dir_filter: Callable[[str], bool],\n keys_referenced_as_modules: Set[str], ignore_unresolved_params: bool = False) -> bool:\n \"\"\"\n Load modules which have not already been loaded and can be loaded (don't have unresolved parameters).\n\n :param ignore_unresolved_params: If true, not-yet-loaded modules will be loaded even if they are\n passed parameters that are not fully resolved.\n :return: True if there were modules that were not loaded due to unresolved\n parameters.\n \"\"\"\n all_module_definitions = {}\n all_module_evaluations_context = {}\n skipped_a_module = False\n for file in list(self.out_definitions.keys()):\n # Don't process a file in a directory other than the directory we're processing. For example,\n # if we're down dealing with <top_dir>/<module>/something.tf, we don't want to rescan files\n # up in <top_dir>.\n if self.get_dirname(file) != root_dir:\n continue\n # Don't process a file reference which has already been processed\n if file.endswith(\"]\"):\n continue\n\n file_data = self.out_definitions.get(file)\n if file_data is None:\n continue\n module_calls = file_data.get(\"module\")\n if not module_calls or not isinstance(module_calls, list):\n continue\n\n for module_index, module_call in enumerate(module_calls):\n\n if not isinstance(module_call, dict):\n continue\n\n # There should only be one module reference per outer dict, but... safety first\n for module_call_name, module_call_data in module_call.items():\n if not isinstance(module_call_data, dict):\n continue\n\n module_address = (file, module_index, module_call_name)\n if module_address in self._loaded_modules:\n continue\n\n # Variables being passed to module, \"source\" and \"version\" are reserved\n specified_vars = {k: v[0] if isinstance(v, list) else v for k, v in module_call_data.items()\n if k != \"source\" and k != \"version\"}\n\n if not ignore_unresolved_params:\n has_unresolved_params = False\n for k, v in specified_vars.items():\n if not is_acceptable_module_param(v) or not is_acceptable_module_param(k):\n has_unresolved_params = True\n break\n if has_unresolved_params:\n skipped_a_module = True\n continue\n self._loaded_modules.add(module_address)\n\n source = module_call_data.get(\"source\")\n if not source or not isinstance(source, list):\n continue\n source = source[0]\n if not isinstance(source, str):\n logging.debug(f\"Skipping loading of {module_call_name} as source is not a string, it is: {source}\")\n continue\n\n # Special handling for local sources to make sure we aren't double-parsing\n if source.startswith(\"./\") or source.startswith(\"../\"):\n source = os.path.normpath(\n os.path.join(os.path.dirname(_remove_module_dependency_in_path(file)), source))\n\n version = module_call_data.get(\"version\", \"latest\")\n if version and isinstance(version, list):\n version = version[0]\n try:\n content = module_loader_registry.load(root_dir, source, version)\n if not content.loaded():\n logging.info(f'Got no content for {source}:{version}')\n continue\n\n self._internal_dir_load(directory=content.path(),\n module_loader_registry=module_loader_registry,\n dir_filter=dir_filter, specified_vars=specified_vars,\n keys_referenced_as_modules=keys_referenced_as_modules)\n\n module_definitions = {path: self.out_definitions[path] for path in\n list(self.out_definitions.keys()) if\n self.get_dirname(path) == content.path()}\n\n if not module_definitions:\n continue\n\n # NOTE: Modules are put into the main TF definitions structure \"as normal\" with the\n # notable exception of the file name. For loaded modules referrer information is\n # appended to the file name to create this format:\n # <file_name>[<referred_file>#<referrer_index>]\n # For example:\n # /the/path/module/my_module.tf[/the/path/main.tf#0]\n # The referrer and index allow a module allow a module to be loaded multiple\n # times with differing data.\n #\n # In addition, the referring block will have a \"__resolved__\" key added with a\n # list pointing to the location of the module data that was resolved. For example:\n # \"__resolved__\": [\"/the/path/module/my_module.tf[/the/path/main.tf#0]\"]\n\n resolved_loc_list = module_call_data.get(RESOLVED_MODULE_ENTRY_NAME)\n if resolved_loc_list is None:\n resolved_loc_list = []\n module_call_data[RESOLVED_MODULE_ENTRY_NAME] = resolved_loc_list\n\n # NOTE: Modules can load other modules, so only append referrer information where it\n # has not already been added.\n keys = list(module_definitions.keys())\n for key in keys:\n if key.endswith(\"]\") or file.endswith(\"]\"):\n continue\n keys_referenced_as_modules.add(key)\n new_key = f\"{key}[{file}#{module_index}]\"\n module_definitions[new_key] = module_definitions[key]\n del module_definitions[key]\n del self.out_definitions[key]\n if new_key not in resolved_loc_list:\n resolved_loc_list.append(new_key)\n if (file, module_call_name) not in self.module_address_map:\n self.module_address_map[(file, module_call_name)] = str(module_index)\n resolved_loc_list.sort() # For testing, need predictable ordering\n\n if all_module_definitions:\n deep_merge.merge(all_module_definitions, module_definitions)\n else:\n all_module_definitions = module_definitions\n\n self.external_modules_source_map[(source, version)] = content.path()\n except Exception as e:\n logging.warning(\"Unable to load module (source=\\\"%s\\\" version=\\\"%s\\\"): %s\",\n source, version, e)\n\n if all_module_definitions:\n deep_merge.merge(self.out_definitions, all_module_definitions)\n if all_module_evaluations_context:\n deep_merge.merge(self.out_evaluations_context, all_module_evaluations_context)\n return skipped_a_module\n\n def parse_hcl_module(\n self,\n source_dir: str,\n source: str,\n download_external_modules: bool = False,\n external_modules_download_path: str = DEFAULT_EXTERNAL_MODULES_DIR,\n parsing_errors: dict[str, Exception] | None = None,\n excluded_paths: list[str] | None = None,\n vars_files: list[str] | None = None,\n external_modules_content_cache: dict[str, ModuleContent] | None = None,\n create_graph: bool = True,\n ) -> tuple[Module | None, dict[str, dict[str, Any]]]:\n tf_definitions: dict[str, dict[str, Any]] = {}\n self.parse_directory(directory=source_dir, out_definitions=tf_definitions, out_evaluations_context={},\n out_parsing_errors=parsing_errors if parsing_errors is not None else {},\n download_external_modules=download_external_modules,\n external_modules_download_path=external_modules_download_path, excluded_paths=excluded_paths,\n vars_files=vars_files, external_modules_content_cache=external_modules_content_cache)\n tf_definitions = self._clean_parser_types(tf_definitions)\n tf_definitions = self._serialize_definitions(tf_definitions)\n\n module = None\n if create_graph:\n module, tf_definitions = self.parse_hcl_module_from_tf_definitions(tf_definitions, source_dir, source)\n\n return module, tf_definitions\n\n def parse_hcl_module_from_tf_definitions(\n self,\n tf_definitions: Dict[str, Dict[str, Any]],\n source_dir: str,\n source: str,\n ) -> Tuple[Module, Dict[str, Dict[str, Any]]]:\n module_dependency_map, tf_definitions, dep_index_mapping = self.get_module_dependency_map(tf_definitions)\n module = self.get_new_module(\n source_dir=source_dir,\n module_dependency_map=module_dependency_map,\n module_address_map=self.module_address_map,\n external_modules_source_map=self.external_modules_source_map,\n dep_index_mapping=dep_index_mapping,\n )\n self.add_tfvars(module, source)\n copy_of_tf_definitions = deepcopy(tf_definitions)\n for file_path, blocks in copy_of_tf_definitions.items():\n for block_type in blocks:\n try:\n module.add_blocks(block_type, blocks[block_type], file_path, source)\n except Exception as e:\n logging.warning(f'Failed to add block {blocks[block_type]}. Error:')\n logging.warning(e, exc_info=False)\n return module, tf_definitions\n\n @staticmethod\n def _clean_parser_types(conf: dict[str, Any]) -> dict[str, Any]:\n if not conf:\n return conf\n\n sorted_keys = list(conf.keys())\n first_key_type = type(sorted_keys[0])\n if first_key_type is None:\n return {}\n\n if all(isinstance(x, first_key_type) for x in sorted_keys):\n sorted_keys.sort()\n\n # Create a new dict where the keys are sorted alphabetically\n sorted_conf = {key: conf[key] for key in sorted_keys}\n for attribute, values in sorted_conf.items():\n if attribute == 'alias':\n continue\n if isinstance(values, list):\n sorted_conf[attribute] = Parser._clean_parser_types_lst(values)\n elif isinstance(values, dict):\n sorted_conf[attribute] = Parser._clean_parser_types(values)\n elif isinstance(values, str) and values in ('true', 'false'):\n sorted_conf[attribute] = True if values == 'true' else False\n elif isinstance(values, set):\n sorted_conf[attribute] = Parser._clean_parser_types_lst(list(values))\n elif isinstance(values, Tree):\n sorted_conf[attribute] = str(values)\n return sorted_conf\n\n @staticmethod\n def _clean_parser_types_lst(values: list[Any]) -> list[Any]:\n for idx, val in enumerate(values):\n if isinstance(val, dict):\n values[idx] = Parser._clean_parser_types(val)\n elif isinstance(val, list):\n values[idx] = Parser._clean_parser_types_lst(val)\n elif isinstance(val, str):\n if val == 'true':\n values[idx] = True\n elif val == 'false':\n values[idx] = False\n elif isinstance(val, set):\n values[idx] = Parser._clean_parser_types_lst(list(val))\n str_values_in_lst = [val for val in values if isinstance(val, str)]\n str_values_in_lst.sort()\n result_values = [val for val in values if not isinstance(val, str)]\n result_values.extend(str_values_in_lst)\n return result_values\n\n @staticmethod\n def _serialize_definitions(tf_definitions: dict[str, _Hcl2Payload]) -> dict[str, _Hcl2Payload]:\n return loads(dumps(tf_definitions, cls=CustomJSONEncoder))\n\n @staticmethod\n def get_next_vertices(evaluated_files: list[str], unevaluated_files: list[str]) -> tuple[list[str], list[str]]:\n \"\"\"\n This function implements a lazy separation of levels for the evaluated files. It receives the evaluated\n files, and returns 2 lists:\n 1. The next level of files - files from the unevaluated_files which have no unresolved dependency (either\n no dependency or all dependencies were evaluated).\n 2. unevaluated - files which have yet to be evaluated, and still have pending dependencies\n\n Let's say we have this dependency tree:\n a -> b\n x -> b\n y -> c\n z -> b\n b -> c\n c -> d\n\n The first run will return [a, y, x, z] as the next level since all of them have no dependencies\n The second run with the evaluated being [a, y, x, z] will return [b] as the next level.\n Please mind that [c] has some resolved dependencies (from y), but has unresolved dependencies from [b].\n The third run will return [c], and the fourth will return [d].\n \"\"\"\n next_level, unevaluated, do_not_eval_yet = [], [], []\n for key in unevaluated_files:\n found = False\n for eval_key in evaluated_files:\n if eval_key in key:\n found = True\n break\n if not found:\n do_not_eval_yet.append(key.split('[')[0])\n unevaluated.append(key)\n else:\n next_level.append(key)\n\n move_to_uneval = list(filter(lambda k: k.split('[')[0] in do_not_eval_yet, next_level))\n for k in move_to_uneval:\n next_level.remove(k)\n unevaluated.append(k)\n return next_level, unevaluated\n\n @staticmethod\n def get_module_dependency_map(tf_definitions):\n \"\"\"\n :param tf_definitions, with paths in format 'dir/main.tf[module_dir/main.tf#0]'\n :return module_dependency_map: mapping between directories and the location of its module definition:\n {'dir': 'module_dir/main.tf'}\n :return tf_definitions: with paths in format 'dir/main.tf'\n \"\"\"\n module_dependency_map = {}\n copy_of_tf_definitions = {}\n dep_index_mapping: Dict[Tuple[str, str], List[str]] = {}\n origin_keys = list(filter(lambda k: not k.endswith(']'), tf_definitions.keys()))\n unevaluated_keys = list(filter(lambda k: k.endswith(']'), tf_definitions.keys()))\n for file_path in origin_keys:\n dir_name = os.path.dirname(file_path)\n module_dependency_map[dir_name] = [[]]\n copy_of_tf_definitions[file_path] = deepcopy(tf_definitions[file_path])\n\n next_level, unevaluated_keys = Parser.get_next_vertices(origin_keys, unevaluated_keys)\n while next_level:\n for file_path in next_level:\n path, module_dependency, module_dependency_num = remove_module_dependency_in_path(file_path)\n dir_name = os.path.dirname(path)\n current_deps = deepcopy(module_dependency_map[os.path.dirname(module_dependency)])\n for dep in current_deps:\n dep.append(module_dependency)\n if dir_name not in module_dependency_map:\n module_dependency_map[dir_name] = current_deps\n elif current_deps not in module_dependency_map[dir_name]:\n module_dependency_map[dir_name] += current_deps\n copy_of_tf_definitions[path] = deepcopy(tf_definitions[file_path])\n origin_keys.append(path)\n dep_index_mapping.setdefault((path, module_dependency), []).append(module_dependency_num)\n next_level, unevaluated_keys = Parser.get_next_vertices(origin_keys, unevaluated_keys)\n for key, dep_trails in module_dependency_map.items():\n hashes = set()\n deduped = []\n for trail in dep_trails:\n trail_hash = unify_dependency_path(trail)\n if trail_hash in hashes:\n continue\n hashes.add(trail_hash)\n deduped.append(trail)\n module_dependency_map[key] = deduped\n return module_dependency_map, copy_of_tf_definitions, dep_index_mapping\n\n @staticmethod\n def get_new_module(\n source_dir: str,\n module_dependency_map: Dict[str, List[List[str]]],\n module_address_map: Dict[Tuple[str, str], str],\n external_modules_source_map: Dict[Tuple[str, str], str],\n dep_index_mapping: Dict[Tuple[str, str], List[str]],\n ) -> Module:\n return Module(\n source_dir=source_dir,\n module_dependency_map=module_dependency_map,\n module_address_map=module_address_map,\n external_modules_source_map=external_modules_source_map,\n dep_index_mapping=dep_index_mapping\n )\n\n def add_tfvars(self, module: Module, source: str) -> None:\n if not self.external_variables_data:\n return\n for (var_name, default, path) in self.external_variables_data:\n if \".tfvars\" in path:\n block = {var_name: {\"default\": default}}\n module.add_blocks(BlockType.TF_VARIABLE, block, path, source)\n\n def get_dirname(self, path: str) -> str:\n dirname_path = self.dirname_cache.get(path)\n if not dirname_path:\n dirname_path = os.path.dirname(path)\n self.dirname_cache[path] = dirname_path\n return dirname_path\n\n\ndef _load_or_die_quietly(\n file: str | Path, parsing_errors: dict[str, Exception], clean_definitions: bool = True\n) -> _Hcl2Payload | None:\n \"\"\"\nLoad JSON or HCL, depending on filename.\n :return: None if the file can't be loaded\n \"\"\"\n\n file_path = os.fspath(file)\n file_name = os.path.basename(file_path)\n\n try:\n logging.debug(f\"Parsing {file_path}\")\n\n with open(file_path, \"r\", encoding=\"utf-8-sig\") as f:\n if file_name.endswith(\".json\"):\n return json.load(f)\n else:\n raw_data = hcl2.load(f)\n non_malformed_definitions = validate_malformed_definitions(raw_data)\n if clean_definitions:\n return clean_bad_definitions(non_malformed_definitions)\n else:\n return non_malformed_definitions\n except Exception as e:\n logging.debug(f'failed while parsing file {file_path}', exc_info=True)\n parsing_errors[file_path] = e\n return None\n\n\ndef _is_valid_block(block: Any) -> bool:\n if not isinstance(block, dict):\n return True\n\n # if the block is empty, there's no need to process it further\n if not block:\n return False\n\n entity_name = next(iter(block.keys()))\n if re.fullmatch(ENTITY_NAME_PATTERN, entity_name):\n return True\n return False\n\n\ndef validate_malformed_definitions(raw_data: _Hcl2Payload) -> _Hcl2Payload:\n return {\n block_type: [block for block in blocks if _is_valid_block(block)]\n for block_type, blocks in raw_data.items()\n }\n\n\ndef clean_bad_definitions(tf_definition_list: _Hcl2Payload) -> _Hcl2Payload:\n return {\n block_type: [\n definition\n for definition in definition_list\n if block_type in GOOD_BLOCK_TYPES\n or not isinstance(definition, dict)\n or len(definition) == 1\n ]\n for block_type, definition_list in tf_definition_list.items()\n }\n\n\ndef _to_native_value(value: str) -> Any:\n if value.startswith('\"') or value.startswith(\"'\"):\n return value[1:-1]\n else:\n return eval_string(value)\n\n\ndef _remove_module_dependency_in_path(path: str) -> str:\n \"\"\"\n :param path: path that looks like \"dir/main.tf[other_dir/x.tf#0]\n :return: only the outer path: dir/main.tf\n \"\"\"\n if re.findall(RESOLVED_MODULE_PATTERN, path):\n path = re.sub(RESOLVED_MODULE_PATTERN, '', path)\n return path\n\n\ndef _safe_index(sequence_hopefully: Sequence[Any], index: int) -> Any:\n try:\n return sequence_hopefully[index]\n except IndexError:\n logging.debug(f'Failed to parse index int ({index}) out of {sequence_hopefully}', exc_info=True)\n return None\n\n\ndef is_acceptable_module_param(value: Any) -> bool:\n \"\"\"\n This function determines if a value should be passed to a module as a parameter. We don't want to pass\n unresolved var, local or module references because they can't be resolved from the module, so they need\n to be resolved prior to being passed down.\n \"\"\"\n value_type = type(value)\n if value_type is dict:\n for k, v in value.items():\n if not is_acceptable_module_param(v) or not is_acceptable_module_param(k):\n return False\n return True\n if value_type is set or value_type is list:\n for v in value:\n if not is_acceptable_module_param(v):\n return False\n return True\n\n if value_type is not str:\n return True\n\n for vbm in find_var_blocks(value):\n if vbm.is_simple_var():\n return False\n return True\n", "path": "checkov/terraform/parser.py" } ]
diff --git a/checkov/terraform/parser.py b/checkov/terraform/parser.py index c5bb19dd0d..bfd0d7d036 100644 --- a/checkov/terraform/parser.py +++ b/checkov/terraform/parser.py @@ -735,7 +735,7 @@ def _load_or_die_quietly( try: logging.debug(f"Parsing {file_path}") - with open(file_path, "r") as f: + with open(file_path, "r", encoding="utf-8-sig") as f: if file_name.endswith(".json"): return json.load(f) else: diff --git a/tests/terraform/parser/resources/file_bom/with_bom.tf b/tests/terraform/parser/resources/file_bom/with_bom.tf new file mode 100644 index 0000000000..55a50a84bf --- /dev/null +++ b/tests/terraform/parser/resources/file_bom/with_bom.tf @@ -0,0 +1,3 @@ +resource "aws_s3_bucket" "example" { + bucket = "example" +} diff --git a/tests/terraform/parser/resources/file_bom/without_bom.tf b/tests/terraform/parser/resources/file_bom/without_bom.tf new file mode 100644 index 0000000000..a478590748 --- /dev/null +++ b/tests/terraform/parser/resources/file_bom/without_bom.tf @@ -0,0 +1,3 @@ +resource "aws_s3_bucket" "example" { + bucket = "example" +} diff --git a/tests/terraform/parser/test_parser_internals.py b/tests/terraform/parser/test_parser_internals.py index fd13abdda3..ea80cceb94 100644 --- a/tests/terraform/parser/test_parser_internals.py +++ b/tests/terraform/parser/test_parser_internals.py @@ -1,9 +1,56 @@ -import unittest +from pathlib import Path -from checkov.terraform import parser +from checkov.common.util.parser_utils import eval_string +from checkov.terraform.parser import _load_or_die_quietly -class TestParserInternals(unittest.TestCase): - def test_eval_string_to_list(self): - expected = ["a", "b", "c"] - assert parser.eval_string('["a", "b", "c"]') == expected +def test_eval_string_to_list(): + # given + expected = ["a", "b", "c"] + + # when + actual = eval_string('["a", "b", "c"]') + + assert actual == expected + + +def test__load_or_die_quietly_with_bom(): + # given + test_file = Path(__file__).parent / "resources/file_bom/with_bom.tf" + parsing_errors = {} + + # when + definition = _load_or_die_quietly(file=test_file, parsing_errors=parsing_errors) + + # then + assert not parsing_errors + assert definition == { + "resource": [ + { + "aws_s3_bucket": { + "example": {"bucket": ["example"], "__start_line__": 1, "__end_line__": 3}, + }, + } + ] + } + + +def test__load_or_die_quietly_without_bom(): + # given + test_file = Path(__file__).parent / "resources/file_bom/without_bom.tf" + parsing_errors = {} + + # when + definition = _load_or_die_quietly(file=test_file, parsing_errors=parsing_errors) + + # then + assert not parsing_errors + assert definition == { + "resource": [ + { + "aws_s3_bucket": { + "example": {"bucket": ["example"], "__start_line__": 1, "__end_line__": 3}, + }, + } + ] + }
celery__celery-4203
-u option does not exist ## Steps to reproduce Start -> celery -A application worker -l info ## Actual behavior RuntimeWarning: You're running the worker with superuser privileges: this is absolutely not recommended! Please specify a different user using the -u option. User information: uid=0 euid=0 gid=0 egid=0 uid=uid, euid=euid, gid=gid, egid=egid ## Fixes When displaying the help menu -> celery -A application worker -l info -h There is currently no longer a -u option and the warning should be changed to use --uid / --gid options
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"Platforms.\n\nUtilities dealing with platform specifics: signals, daemonization,\nusers, groups, and so on.\n\"\"\"\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport atexit\nimport errno\nimport math\nimport numbers\nimport os\nimport platform as _platform\nimport signal as _signal\nimport sys\nimport warnings\n\nfrom collections import namedtuple\n\nfrom billiard.compat import get_fdmax, close_open_fds\n# fileno used to be in this module\nfrom kombu.utils.compat import maybe_fileno\nfrom kombu.utils.encoding import safe_str\nfrom contextlib import contextmanager\n\nfrom .exceptions import SecurityError\nfrom .local import try_import\nfrom .five import items, reraise, string_t\n\ntry:\n from billiard.process import current_process\nexcept ImportError: # pragma: no cover\n current_process = None\n\n_setproctitle = try_import('setproctitle')\nresource = try_import('resource')\npwd = try_import('pwd')\ngrp = try_import('grp')\nmputil = try_import('multiprocessing.util')\n\n__all__ = [\n 'EX_OK', 'EX_FAILURE', 'EX_UNAVAILABLE', 'EX_USAGE', 'SYSTEM',\n 'IS_macOS', 'IS_WINDOWS', 'SIGMAP', 'pyimplementation', 'LockFailed',\n 'get_fdmax', 'Pidfile', 'create_pidlock', 'close_open_fds',\n 'DaemonContext', 'detached', 'parse_uid', 'parse_gid', 'setgroups',\n 'initgroups', 'setgid', 'setuid', 'maybe_drop_privileges', 'signals',\n 'signal_name', 'set_process_title', 'set_mp_process_title',\n 'get_errno_name', 'ignore_errno', 'fd_by_path', 'isatty',\n]\n\n# exitcodes\nEX_OK = getattr(os, 'EX_OK', 0)\nEX_FAILURE = 1\nEX_UNAVAILABLE = getattr(os, 'EX_UNAVAILABLE', 69)\nEX_USAGE = getattr(os, 'EX_USAGE', 64)\nEX_CANTCREAT = getattr(os, 'EX_CANTCREAT', 73)\n\nSYSTEM = _platform.system()\nIS_macOS = SYSTEM == 'Darwin'\nIS_WINDOWS = SYSTEM == 'Windows'\n\nDAEMON_WORKDIR = '/'\n\nPIDFILE_FLAGS = os.O_CREAT | os.O_EXCL | os.O_WRONLY\nPIDFILE_MODE = ((os.R_OK | os.W_OK) << 6) | ((os.R_OK) << 3) | ((os.R_OK))\n\nPIDLOCKED = \"\"\"ERROR: Pidfile ({0}) already exists.\nSeems we're already running? (pid: {1})\"\"\"\n\n_range = namedtuple('_range', ('start', 'stop'))\n\nC_FORCE_ROOT = os.environ.get('C_FORCE_ROOT', False)\n\nROOT_DISALLOWED = \"\"\"\\\nRunning a worker with superuser privileges when the\nworker accepts messages serialized with pickle is a very bad idea!\n\nIf you really want to continue then you have to set the C_FORCE_ROOT\nenvironment variable (but please think about this before you do).\n\nUser information: uid={uid} euid={euid} gid={gid} egid={egid}\n\"\"\"\n\nROOT_DISCOURAGED = \"\"\"\\\nYou're running the worker with superuser privileges: this is\nabsolutely not recommended!\n\nPlease specify a different user using the -u option.\n\nUser information: uid={uid} euid={euid} gid={gid} egid={egid}\n\"\"\"\n\nSIGNAMES = {\n sig for sig in dir(_signal)\n if sig.startswith('SIG') and '_' not in sig\n}\nSIGMAP = {getattr(_signal, name): name for name in SIGNAMES}\n\n\ndef isatty(fh):\n \"\"\"Return true if the process has a controlling terminal.\"\"\"\n try:\n return fh.isatty()\n except AttributeError:\n pass\n\n\ndef pyimplementation():\n \"\"\"Return string identifying the current Python implementation.\"\"\"\n if hasattr(_platform, 'python_implementation'):\n return _platform.python_implementation()\n elif sys.platform.startswith('java'):\n return 'Jython ' + sys.platform\n elif hasattr(sys, 'pypy_version_info'):\n v = '.'.join(str(p) for p in sys.pypy_version_info[:3])\n if sys.pypy_version_info[3:]:\n v += '-' + ''.join(str(p) for p in sys.pypy_version_info[3:])\n return 'PyPy ' + v\n else:\n return 'CPython'\n\n\nclass LockFailed(Exception):\n \"\"\"Raised if a PID lock can't be acquired.\"\"\"\n\n\nclass Pidfile(object):\n \"\"\"Pidfile.\n\n This is the type returned by :func:`create_pidlock`.\n\n See Also:\n Best practice is to not use this directly but rather use\n the :func:`create_pidlock` function instead:\n more convenient and also removes stale pidfiles (when\n the process holding the lock is no longer running).\n \"\"\"\n\n #: Path to the pid lock file.\n path = None\n\n def __init__(self, path):\n self.path = os.path.abspath(path)\n\n def acquire(self):\n \"\"\"Acquire lock.\"\"\"\n try:\n self.write_pid()\n except OSError as exc:\n reraise(LockFailed, LockFailed(str(exc)), sys.exc_info()[2])\n return self\n __enter__ = acquire\n\n def is_locked(self):\n \"\"\"Return true if the pid lock exists.\"\"\"\n return os.path.exists(self.path)\n\n def release(self, *args):\n \"\"\"Release lock.\"\"\"\n self.remove()\n __exit__ = release\n\n def read_pid(self):\n \"\"\"Read and return the current pid.\"\"\"\n with ignore_errno('ENOENT'):\n with open(self.path, 'r') as fh:\n line = fh.readline()\n if line.strip() == line: # must contain '\\n'\n raise ValueError(\n 'Partial or invalid pidfile {0.path}'.format(self))\n\n try:\n return int(line.strip())\n except ValueError:\n raise ValueError(\n 'pidfile {0.path} contents invalid.'.format(self))\n\n def remove(self):\n \"\"\"Remove the lock.\"\"\"\n with ignore_errno(errno.ENOENT, errno.EACCES):\n os.unlink(self.path)\n\n def remove_if_stale(self):\n \"\"\"Remove the lock if the process isn't running.\n\n I.e. process does not respons to signal.\n \"\"\"\n try:\n pid = self.read_pid()\n except ValueError as exc:\n print('Broken pidfile found - Removing it.', file=sys.stderr)\n self.remove()\n return True\n if not pid:\n self.remove()\n return True\n\n try:\n os.kill(pid, 0)\n except os.error as exc:\n if exc.errno == errno.ESRCH:\n print('Stale pidfile exists - Removing it.', file=sys.stderr)\n self.remove()\n return True\n return False\n\n def write_pid(self):\n pid = os.getpid()\n content = '{0}\\n'.format(pid)\n\n pidfile_fd = os.open(self.path, PIDFILE_FLAGS, PIDFILE_MODE)\n pidfile = os.fdopen(pidfile_fd, 'w')\n try:\n pidfile.write(content)\n # flush and sync so that the re-read below works.\n pidfile.flush()\n try:\n os.fsync(pidfile_fd)\n except AttributeError: # pragma: no cover\n pass\n finally:\n pidfile.close()\n\n rfh = open(self.path)\n try:\n if rfh.read() != content:\n raise LockFailed(\n \"Inconsistency: Pidfile content doesn't match at re-read\")\n finally:\n rfh.close()\nPIDFile = Pidfile # noqa: E305 XXX compat alias\n\n\ndef create_pidlock(pidfile):\n \"\"\"Create and verify pidfile.\n\n If the pidfile already exists the program exits with an error message,\n however if the process it refers to isn't running anymore, the pidfile\n is deleted and the program continues.\n\n This function will automatically install an :mod:`atexit` handler\n to release the lock at exit, you can skip this by calling\n :func:`_create_pidlock` instead.\n\n Returns:\n Pidfile: used to manage the lock.\n\n Example:\n >>> pidlock = create_pidlock('/var/run/app.pid')\n \"\"\"\n pidlock = _create_pidlock(pidfile)\n atexit.register(pidlock.release)\n return pidlock\n\n\ndef _create_pidlock(pidfile):\n pidlock = Pidfile(pidfile)\n if pidlock.is_locked() and not pidlock.remove_if_stale():\n print(PIDLOCKED.format(pidfile, pidlock.read_pid()), file=sys.stderr)\n raise SystemExit(EX_CANTCREAT)\n pidlock.acquire()\n return pidlock\n\n\ndef fd_by_path(paths):\n \"\"\"Return a list of file descriptors.\n\n This method returns list of file descriptors corresponding to\n file paths passed in paths variable.\n\n Arguments:\n paths: List[str]: List of file paths.\n\n Returns:\n List[int]: List of file descriptors.\n\n Example:\n >>> keep = fd_by_path(['/dev/urandom', '/my/precious/'])\n \"\"\"\n stats = set()\n for path in paths:\n try:\n fd = os.open(path, os.O_RDONLY)\n except OSError:\n continue\n try:\n stats.add(os.fstat(fd)[1:3])\n finally:\n os.close(fd)\n\n def fd_in_stats(fd):\n try:\n return os.fstat(fd)[1:3] in stats\n except OSError:\n return False\n\n return [_fd for _fd in range(get_fdmax(2048)) if fd_in_stats(_fd)]\n\n\nclass DaemonContext(object):\n \"\"\"Context manager daemonizing the process.\"\"\"\n\n _is_open = False\n\n def __init__(self, pidfile=None, workdir=None, umask=None,\n fake=False, after_chdir=None, after_forkers=True,\n **kwargs):\n if isinstance(umask, string_t):\n # octal or decimal, depending on initial zero.\n umask = int(umask, 8 if umask.startswith('0') else 10)\n self.workdir = workdir or DAEMON_WORKDIR\n self.umask = umask\n self.fake = fake\n self.after_chdir = after_chdir\n self.after_forkers = after_forkers\n self.stdfds = (sys.stdin, sys.stdout, sys.stderr)\n\n def redirect_to_null(self, fd):\n if fd is not None:\n dest = os.open(os.devnull, os.O_RDWR)\n os.dup2(dest, fd)\n\n def open(self):\n if not self._is_open:\n if not self.fake:\n self._detach()\n\n os.chdir(self.workdir)\n if self.umask is not None:\n os.umask(self.umask)\n\n if self.after_chdir:\n self.after_chdir()\n\n if not self.fake:\n # We need to keep /dev/urandom from closing because\n # shelve needs it, and Beat needs shelve to start.\n keep = list(self.stdfds) + fd_by_path(['/dev/urandom'])\n close_open_fds(keep)\n for fd in self.stdfds:\n self.redirect_to_null(maybe_fileno(fd))\n if self.after_forkers and mputil is not None:\n mputil._run_after_forkers()\n\n self._is_open = True\n __enter__ = open\n\n def close(self, *args):\n if self._is_open:\n self._is_open = False\n __exit__ = close\n\n def _detach(self):\n if os.fork() == 0: # first child\n os.setsid() # create new session\n if os.fork() > 0: # pragma: no cover\n # second child\n os._exit(0)\n else:\n os._exit(0)\n return self\n\n\ndef detached(logfile=None, pidfile=None, uid=None, gid=None, umask=0,\n workdir=None, fake=False, **opts):\n \"\"\"Detach the current process in the background (daemonize).\n\n Arguments:\n logfile (str): Optional log file.\n The ability to write to this file\n will be verified before the process is detached.\n pidfile (str): Optional pid file.\n The pidfile won't be created,\n as this is the responsibility of the child. But the process will\n exit if the pid lock exists and the pid written is still running.\n uid (int, str): Optional user id or user name to change\n effective privileges to.\n gid (int, str): Optional group id or group name to change\n effective privileges to.\n umask (str, int): Optional umask that'll be effective in\n the child process.\n workdir (str): Optional new working directory.\n fake (bool): Don't actually detach, intended for debugging purposes.\n **opts (Any): Ignored.\n\n Example:\n >>> from celery.platforms import detached, create_pidlock\n >>> with detached(\n ... logfile='/var/log/app.log',\n ... pidfile='/var/run/app.pid',\n ... uid='nobody'):\n ... # Now in detached child process with effective user set to nobody,\n ... # and we know that our logfile can be written to, and that\n ... # the pidfile isn't locked.\n ... pidlock = create_pidlock('/var/run/app.pid')\n ...\n ... # Run the program\n ... program.run(logfile='/var/log/app.log')\n \"\"\"\n if not resource:\n raise RuntimeError('This platform does not support detach.')\n workdir = os.getcwd() if workdir is None else workdir\n\n signals.reset('SIGCLD') # Make sure SIGCLD is using the default handler.\n maybe_drop_privileges(uid=uid, gid=gid)\n\n def after_chdir_do():\n # Since without stderr any errors will be silently suppressed,\n # we need to know that we have access to the logfile.\n logfile and open(logfile, 'a').close()\n # Doesn't actually create the pidfile, but makes sure it's not stale.\n if pidfile:\n _create_pidlock(pidfile).release()\n\n return DaemonContext(\n umask=umask, workdir=workdir, fake=fake, after_chdir=after_chdir_do,\n )\n\n\ndef parse_uid(uid):\n \"\"\"Parse user id.\n\n Arguments:\n uid (str, int): Actual uid, or the username of a user.\n Returns:\n int: The actual uid.\n \"\"\"\n try:\n return int(uid)\n except ValueError:\n try:\n return pwd.getpwnam(uid).pw_uid\n except (AttributeError, KeyError):\n raise KeyError('User does not exist: {0}'.format(uid))\n\n\ndef parse_gid(gid):\n \"\"\"Parse group id.\n\n Arguments:\n gid (str, int): Actual gid, or the name of a group.\n Returns:\n int: The actual gid of the group.\n \"\"\"\n try:\n return int(gid)\n except ValueError:\n try:\n return grp.getgrnam(gid).gr_gid\n except (AttributeError, KeyError):\n raise KeyError('Group does not exist: {0}'.format(gid))\n\n\ndef _setgroups_hack(groups):\n # :fun:`setgroups` may have a platform-dependent limit,\n # and it's not always possible to know in advance what this limit\n # is, so we use this ugly hack stolen from glibc.\n groups = groups[:]\n\n while 1:\n try:\n return os.setgroups(groups)\n except ValueError: # error from Python's check.\n if len(groups) <= 1:\n raise\n groups[:] = groups[:-1]\n except OSError as exc: # error from the OS.\n if exc.errno != errno.EINVAL or len(groups) <= 1:\n raise\n groups[:] = groups[:-1]\n\n\ndef setgroups(groups):\n \"\"\"Set active groups from a list of group ids.\"\"\"\n max_groups = None\n try:\n max_groups = os.sysconf('SC_NGROUPS_MAX')\n except Exception: # pylint: disable=broad-except\n pass\n try:\n return _setgroups_hack(groups[:max_groups])\n except OSError as exc:\n if exc.errno != errno.EPERM:\n raise\n if any(group not in groups for group in os.getgroups()):\n # we shouldn't be allowed to change to this group.\n raise\n\n\ndef initgroups(uid, gid):\n \"\"\"Init process group permissions.\n\n Compat version of :func:`os.initgroups` that was first\n added to Python 2.7.\n \"\"\"\n if not pwd: # pragma: no cover\n return\n username = pwd.getpwuid(uid)[0]\n if hasattr(os, 'initgroups'): # Python 2.7+\n return os.initgroups(username, gid)\n groups = [gr.gr_gid for gr in grp.getgrall()\n if username in gr.gr_mem]\n setgroups(groups)\n\n\ndef setgid(gid):\n \"\"\"Version of :func:`os.setgid` supporting group names.\"\"\"\n os.setgid(parse_gid(gid))\n\n\ndef setuid(uid):\n \"\"\"Version of :func:`os.setuid` supporting usernames.\"\"\"\n os.setuid(parse_uid(uid))\n\n\ndef maybe_drop_privileges(uid=None, gid=None):\n \"\"\"Change process privileges to new user/group.\n\n If UID and GID is specified, the real user/group is changed.\n\n If only UID is specified, the real user is changed, and the group is\n changed to the users primary group.\n\n If only GID is specified, only the group is changed.\n \"\"\"\n if sys.platform == 'win32':\n return\n if os.geteuid():\n # no point trying to setuid unless we're root.\n if not os.getuid():\n raise SecurityError('contact support')\n uid = uid and parse_uid(uid)\n gid = gid and parse_gid(gid)\n\n if uid:\n _setuid(uid, gid)\n else:\n gid and setgid(gid)\n\n if uid and not os.getuid() and not os.geteuid():\n raise SecurityError('Still root uid after drop privileges!')\n if gid and not os.getgid() and not os.getegid():\n raise SecurityError('Still root gid after drop privileges!')\n\n\ndef _setuid(uid, gid):\n # If GID isn't defined, get the primary GID of the user.\n if not gid and pwd:\n gid = pwd.getpwuid(uid).pw_gid\n # Must set the GID before initgroups(), as setgid()\n # is known to zap the group list on some platforms.\n\n # setgid must happen before setuid (otherwise the setgid operation\n # may fail because of insufficient privileges and possibly stay\n # in a privileged group).\n setgid(gid)\n initgroups(uid, gid)\n\n # at last:\n setuid(uid)\n # ... and make sure privileges cannot be restored:\n try:\n setuid(0)\n except OSError as exc:\n if exc.errno != errno.EPERM:\n raise\n # we should get here: cannot restore privileges,\n # everything was fine.\n else:\n raise SecurityError(\n 'non-root user able to restore privileges after setuid.')\n\n\nclass Signals(object):\n \"\"\"Convenience interface to :mod:`signals`.\n\n If the requested signal isn't supported on the current platform,\n the operation will be ignored.\n\n Example:\n >>> from celery.platforms import signals\n\n >>> from proj.handlers import my_handler\n >>> signals['INT'] = my_handler\n\n >>> signals['INT']\n my_handler\n\n >>> signals.supported('INT')\n True\n\n >>> signals.signum('INT')\n 2\n\n >>> signals.ignore('USR1')\n >>> signals['USR1'] == signals.ignored\n True\n\n >>> signals.reset('USR1')\n >>> signals['USR1'] == signals.default\n True\n\n >>> from proj.handlers import exit_handler, hup_handler\n >>> signals.update(INT=exit_handler,\n ... TERM=exit_handler,\n ... HUP=hup_handler)\n \"\"\"\n\n ignored = _signal.SIG_IGN\n default = _signal.SIG_DFL\n\n if hasattr(_signal, 'setitimer'):\n\n def arm_alarm(self, seconds):\n _signal.setitimer(_signal.ITIMER_REAL, seconds)\n else: # pragma: no cover\n try:\n from itimer import alarm as _itimer_alarm # noqa\n except ImportError:\n\n def arm_alarm(self, seconds): # noqa\n _signal.alarm(math.ceil(seconds))\n else: # pragma: no cover\n\n def arm_alarm(self, seconds): # noqa\n return _itimer_alarm(seconds) # noqa\n\n def reset_alarm(self):\n return _signal.alarm(0)\n\n def supported(self, name):\n \"\"\"Return true value if signal by ``name`` exists on this platform.\"\"\"\n try:\n self.signum(name)\n except AttributeError:\n return False\n else:\n return True\n\n def signum(self, name):\n \"\"\"Get signal number by name.\"\"\"\n if isinstance(name, numbers.Integral):\n return name\n if not isinstance(name, string_t) \\\n or not name.isupper():\n raise TypeError('signal name must be uppercase string.')\n if not name.startswith('SIG'):\n name = 'SIG' + name\n return getattr(_signal, name)\n\n def reset(self, *signal_names):\n \"\"\"Reset signals to the default signal handler.\n\n Does nothing if the platform has no support for signals,\n or the specified signal in particular.\n \"\"\"\n self.update((sig, self.default) for sig in signal_names)\n\n def ignore(self, *names):\n \"\"\"Ignore signal using :const:`SIG_IGN`.\n\n Does nothing if the platform has no support for signals,\n or the specified signal in particular.\n \"\"\"\n self.update((sig, self.ignored) for sig in names)\n\n def __getitem__(self, name):\n return _signal.getsignal(self.signum(name))\n\n def __setitem__(self, name, handler):\n \"\"\"Install signal handler.\n\n Does nothing if the current platform has no support for signals,\n or the specified signal in particular.\n \"\"\"\n try:\n _signal.signal(self.signum(name), handler)\n except (AttributeError, ValueError):\n pass\n\n def update(self, _d_=None, **sigmap):\n \"\"\"Set signal handlers from a mapping.\"\"\"\n for name, handler in items(dict(_d_ or {}, **sigmap)):\n self[name] = handler\n\n\nsignals = Signals()\nget_signal = signals.signum # compat\ninstall_signal_handler = signals.__setitem__ # compat\nreset_signal = signals.reset # compat\nignore_signal = signals.ignore # compat\n\n\ndef signal_name(signum):\n \"\"\"Return name of signal from signal number.\"\"\"\n return SIGMAP[signum][3:]\n\n\ndef strargv(argv):\n arg_start = 2 if 'manage' in argv[0] else 1\n if len(argv) > arg_start:\n return ' '.join(argv[arg_start:])\n return ''\n\n\ndef set_process_title(progname, info=None):\n \"\"\"Set the :command:`ps` name for the currently running process.\n\n Only works if :pypi:`setproctitle` is installed.\n \"\"\"\n proctitle = '[{0}]'.format(progname)\n proctitle = '{0} {1}'.format(proctitle, info) if info else proctitle\n if _setproctitle:\n _setproctitle.setproctitle(safe_str(proctitle))\n return proctitle\n\n\nif os.environ.get('NOSETPS'): # pragma: no cover\n\n def set_mp_process_title(*a, **k):\n \"\"\"Disabled feature.\"\"\"\n pass\nelse:\n\n def set_mp_process_title(progname, info=None, hostname=None): # noqa\n \"\"\"Set the :command:`ps` name from the current process name.\n\n Only works if :pypi:`setproctitle` is installed.\n \"\"\"\n if hostname:\n progname = '{0}: {1}'.format(progname, hostname)\n name = current_process().name if current_process else 'MainProcess'\n return set_process_title('{0}:{1}'.format(progname, name), info=info)\n\n\ndef get_errno_name(n):\n \"\"\"Get errno for string (e.g., ``ENOENT``).\"\"\"\n if isinstance(n, string_t):\n return getattr(errno, n)\n return n\n\n\n@contextmanager\ndef ignore_errno(*errnos, **kwargs):\n \"\"\"Context manager to ignore specific POSIX error codes.\n\n Takes a list of error codes to ignore: this can be either\n the name of the code, or the code integer itself::\n\n >>> with ignore_errno('ENOENT'):\n ... with open('foo', 'r') as fh:\n ... return fh.read()\n\n >>> with ignore_errno(errno.ENOENT, errno.EPERM):\n ... pass\n\n Arguments:\n types (Tuple[Exception]): A tuple of exceptions to ignore\n (when the errno matches). Defaults to :exc:`Exception`.\n \"\"\"\n types = kwargs.get('types') or (Exception,)\n errnos = [get_errno_name(errno) for errno in errnos]\n try:\n yield\n except types as exc:\n if not hasattr(exc, 'errno'):\n raise\n if exc.errno not in errnos:\n raise\n\n\ndef check_privileges(accept_content):\n uid = os.getuid() if hasattr(os, 'getuid') else 65535\n gid = os.getgid() if hasattr(os, 'getgid') else 65535\n euid = os.geteuid() if hasattr(os, 'geteuid') else 65535\n egid = os.getegid() if hasattr(os, 'getegid') else 65535\n\n if hasattr(os, 'fchown'):\n if not all(hasattr(os, attr)\n for attr in ['getuid', 'getgid', 'geteuid', 'getegid']):\n raise SecurityError('suspicious platform, contact support')\n\n if not uid or not gid or not euid or not egid:\n if ('pickle' in accept_content or\n 'application/x-python-serialize' in accept_content):\n if not C_FORCE_ROOT:\n try:\n print(ROOT_DISALLOWED.format(\n uid=uid, euid=euid, gid=gid, egid=egid,\n ), file=sys.stderr)\n finally:\n os._exit(1)\n warnings.warn(RuntimeWarning(ROOT_DISCOURAGED.format(\n uid=uid, euid=euid, gid=gid, egid=egid,\n )))\n", "path": "celery/platforms.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"Platforms.\n\nUtilities dealing with platform specifics: signals, daemonization,\nusers, groups, and so on.\n\"\"\"\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport atexit\nimport errno\nimport math\nimport numbers\nimport os\nimport platform as _platform\nimport signal as _signal\nimport sys\nimport warnings\n\nfrom collections import namedtuple\n\nfrom billiard.compat import get_fdmax, close_open_fds\n# fileno used to be in this module\nfrom kombu.utils.compat import maybe_fileno\nfrom kombu.utils.encoding import safe_str\nfrom contextlib import contextmanager\n\nfrom .exceptions import SecurityError\nfrom .local import try_import\nfrom .five import items, reraise, string_t\n\ntry:\n from billiard.process import current_process\nexcept ImportError: # pragma: no cover\n current_process = None\n\n_setproctitle = try_import('setproctitle')\nresource = try_import('resource')\npwd = try_import('pwd')\ngrp = try_import('grp')\nmputil = try_import('multiprocessing.util')\n\n__all__ = [\n 'EX_OK', 'EX_FAILURE', 'EX_UNAVAILABLE', 'EX_USAGE', 'SYSTEM',\n 'IS_macOS', 'IS_WINDOWS', 'SIGMAP', 'pyimplementation', 'LockFailed',\n 'get_fdmax', 'Pidfile', 'create_pidlock', 'close_open_fds',\n 'DaemonContext', 'detached', 'parse_uid', 'parse_gid', 'setgroups',\n 'initgroups', 'setgid', 'setuid', 'maybe_drop_privileges', 'signals',\n 'signal_name', 'set_process_title', 'set_mp_process_title',\n 'get_errno_name', 'ignore_errno', 'fd_by_path', 'isatty',\n]\n\n# exitcodes\nEX_OK = getattr(os, 'EX_OK', 0)\nEX_FAILURE = 1\nEX_UNAVAILABLE = getattr(os, 'EX_UNAVAILABLE', 69)\nEX_USAGE = getattr(os, 'EX_USAGE', 64)\nEX_CANTCREAT = getattr(os, 'EX_CANTCREAT', 73)\n\nSYSTEM = _platform.system()\nIS_macOS = SYSTEM == 'Darwin'\nIS_WINDOWS = SYSTEM == 'Windows'\n\nDAEMON_WORKDIR = '/'\n\nPIDFILE_FLAGS = os.O_CREAT | os.O_EXCL | os.O_WRONLY\nPIDFILE_MODE = ((os.R_OK | os.W_OK) << 6) | ((os.R_OK) << 3) | ((os.R_OK))\n\nPIDLOCKED = \"\"\"ERROR: Pidfile ({0}) already exists.\nSeems we're already running? (pid: {1})\"\"\"\n\n_range = namedtuple('_range', ('start', 'stop'))\n\nC_FORCE_ROOT = os.environ.get('C_FORCE_ROOT', False)\n\nROOT_DISALLOWED = \"\"\"\\\nRunning a worker with superuser privileges when the\nworker accepts messages serialized with pickle is a very bad idea!\n\nIf you really want to continue then you have to set the C_FORCE_ROOT\nenvironment variable (but please think about this before you do).\n\nUser information: uid={uid} euid={euid} gid={gid} egid={egid}\n\"\"\"\n\nROOT_DISCOURAGED = \"\"\"\\\nYou're running the worker with superuser privileges: this is\nabsolutely not recommended!\n\nPlease specify a different user using the --uid option.\n\nUser information: uid={uid} euid={euid} gid={gid} egid={egid}\n\"\"\"\n\nSIGNAMES = {\n sig for sig in dir(_signal)\n if sig.startswith('SIG') and '_' not in sig\n}\nSIGMAP = {getattr(_signal, name): name for name in SIGNAMES}\n\n\ndef isatty(fh):\n \"\"\"Return true if the process has a controlling terminal.\"\"\"\n try:\n return fh.isatty()\n except AttributeError:\n pass\n\n\ndef pyimplementation():\n \"\"\"Return string identifying the current Python implementation.\"\"\"\n if hasattr(_platform, 'python_implementation'):\n return _platform.python_implementation()\n elif sys.platform.startswith('java'):\n return 'Jython ' + sys.platform\n elif hasattr(sys, 'pypy_version_info'):\n v = '.'.join(str(p) for p in sys.pypy_version_info[:3])\n if sys.pypy_version_info[3:]:\n v += '-' + ''.join(str(p) for p in sys.pypy_version_info[3:])\n return 'PyPy ' + v\n else:\n return 'CPython'\n\n\nclass LockFailed(Exception):\n \"\"\"Raised if a PID lock can't be acquired.\"\"\"\n\n\nclass Pidfile(object):\n \"\"\"Pidfile.\n\n This is the type returned by :func:`create_pidlock`.\n\n See Also:\n Best practice is to not use this directly but rather use\n the :func:`create_pidlock` function instead:\n more convenient and also removes stale pidfiles (when\n the process holding the lock is no longer running).\n \"\"\"\n\n #: Path to the pid lock file.\n path = None\n\n def __init__(self, path):\n self.path = os.path.abspath(path)\n\n def acquire(self):\n \"\"\"Acquire lock.\"\"\"\n try:\n self.write_pid()\n except OSError as exc:\n reraise(LockFailed, LockFailed(str(exc)), sys.exc_info()[2])\n return self\n __enter__ = acquire\n\n def is_locked(self):\n \"\"\"Return true if the pid lock exists.\"\"\"\n return os.path.exists(self.path)\n\n def release(self, *args):\n \"\"\"Release lock.\"\"\"\n self.remove()\n __exit__ = release\n\n def read_pid(self):\n \"\"\"Read and return the current pid.\"\"\"\n with ignore_errno('ENOENT'):\n with open(self.path, 'r') as fh:\n line = fh.readline()\n if line.strip() == line: # must contain '\\n'\n raise ValueError(\n 'Partial or invalid pidfile {0.path}'.format(self))\n\n try:\n return int(line.strip())\n except ValueError:\n raise ValueError(\n 'pidfile {0.path} contents invalid.'.format(self))\n\n def remove(self):\n \"\"\"Remove the lock.\"\"\"\n with ignore_errno(errno.ENOENT, errno.EACCES):\n os.unlink(self.path)\n\n def remove_if_stale(self):\n \"\"\"Remove the lock if the process isn't running.\n\n I.e. process does not respons to signal.\n \"\"\"\n try:\n pid = self.read_pid()\n except ValueError as exc:\n print('Broken pidfile found - Removing it.', file=sys.stderr)\n self.remove()\n return True\n if not pid:\n self.remove()\n return True\n\n try:\n os.kill(pid, 0)\n except os.error as exc:\n if exc.errno == errno.ESRCH:\n print('Stale pidfile exists - Removing it.', file=sys.stderr)\n self.remove()\n return True\n return False\n\n def write_pid(self):\n pid = os.getpid()\n content = '{0}\\n'.format(pid)\n\n pidfile_fd = os.open(self.path, PIDFILE_FLAGS, PIDFILE_MODE)\n pidfile = os.fdopen(pidfile_fd, 'w')\n try:\n pidfile.write(content)\n # flush and sync so that the re-read below works.\n pidfile.flush()\n try:\n os.fsync(pidfile_fd)\n except AttributeError: # pragma: no cover\n pass\n finally:\n pidfile.close()\n\n rfh = open(self.path)\n try:\n if rfh.read() != content:\n raise LockFailed(\n \"Inconsistency: Pidfile content doesn't match at re-read\")\n finally:\n rfh.close()\nPIDFile = Pidfile # noqa: E305 XXX compat alias\n\n\ndef create_pidlock(pidfile):\n \"\"\"Create and verify pidfile.\n\n If the pidfile already exists the program exits with an error message,\n however if the process it refers to isn't running anymore, the pidfile\n is deleted and the program continues.\n\n This function will automatically install an :mod:`atexit` handler\n to release the lock at exit, you can skip this by calling\n :func:`_create_pidlock` instead.\n\n Returns:\n Pidfile: used to manage the lock.\n\n Example:\n >>> pidlock = create_pidlock('/var/run/app.pid')\n \"\"\"\n pidlock = _create_pidlock(pidfile)\n atexit.register(pidlock.release)\n return pidlock\n\n\ndef _create_pidlock(pidfile):\n pidlock = Pidfile(pidfile)\n if pidlock.is_locked() and not pidlock.remove_if_stale():\n print(PIDLOCKED.format(pidfile, pidlock.read_pid()), file=sys.stderr)\n raise SystemExit(EX_CANTCREAT)\n pidlock.acquire()\n return pidlock\n\n\ndef fd_by_path(paths):\n \"\"\"Return a list of file descriptors.\n\n This method returns list of file descriptors corresponding to\n file paths passed in paths variable.\n\n Arguments:\n paths: List[str]: List of file paths.\n\n Returns:\n List[int]: List of file descriptors.\n\n Example:\n >>> keep = fd_by_path(['/dev/urandom', '/my/precious/'])\n \"\"\"\n stats = set()\n for path in paths:\n try:\n fd = os.open(path, os.O_RDONLY)\n except OSError:\n continue\n try:\n stats.add(os.fstat(fd)[1:3])\n finally:\n os.close(fd)\n\n def fd_in_stats(fd):\n try:\n return os.fstat(fd)[1:3] in stats\n except OSError:\n return False\n\n return [_fd for _fd in range(get_fdmax(2048)) if fd_in_stats(_fd)]\n\n\nclass DaemonContext(object):\n \"\"\"Context manager daemonizing the process.\"\"\"\n\n _is_open = False\n\n def __init__(self, pidfile=None, workdir=None, umask=None,\n fake=False, after_chdir=None, after_forkers=True,\n **kwargs):\n if isinstance(umask, string_t):\n # octal or decimal, depending on initial zero.\n umask = int(umask, 8 if umask.startswith('0') else 10)\n self.workdir = workdir or DAEMON_WORKDIR\n self.umask = umask\n self.fake = fake\n self.after_chdir = after_chdir\n self.after_forkers = after_forkers\n self.stdfds = (sys.stdin, sys.stdout, sys.stderr)\n\n def redirect_to_null(self, fd):\n if fd is not None:\n dest = os.open(os.devnull, os.O_RDWR)\n os.dup2(dest, fd)\n\n def open(self):\n if not self._is_open:\n if not self.fake:\n self._detach()\n\n os.chdir(self.workdir)\n if self.umask is not None:\n os.umask(self.umask)\n\n if self.after_chdir:\n self.after_chdir()\n\n if not self.fake:\n # We need to keep /dev/urandom from closing because\n # shelve needs it, and Beat needs shelve to start.\n keep = list(self.stdfds) + fd_by_path(['/dev/urandom'])\n close_open_fds(keep)\n for fd in self.stdfds:\n self.redirect_to_null(maybe_fileno(fd))\n if self.after_forkers and mputil is not None:\n mputil._run_after_forkers()\n\n self._is_open = True\n __enter__ = open\n\n def close(self, *args):\n if self._is_open:\n self._is_open = False\n __exit__ = close\n\n def _detach(self):\n if os.fork() == 0: # first child\n os.setsid() # create new session\n if os.fork() > 0: # pragma: no cover\n # second child\n os._exit(0)\n else:\n os._exit(0)\n return self\n\n\ndef detached(logfile=None, pidfile=None, uid=None, gid=None, umask=0,\n workdir=None, fake=False, **opts):\n \"\"\"Detach the current process in the background (daemonize).\n\n Arguments:\n logfile (str): Optional log file.\n The ability to write to this file\n will be verified before the process is detached.\n pidfile (str): Optional pid file.\n The pidfile won't be created,\n as this is the responsibility of the child. But the process will\n exit if the pid lock exists and the pid written is still running.\n uid (int, str): Optional user id or user name to change\n effective privileges to.\n gid (int, str): Optional group id or group name to change\n effective privileges to.\n umask (str, int): Optional umask that'll be effective in\n the child process.\n workdir (str): Optional new working directory.\n fake (bool): Don't actually detach, intended for debugging purposes.\n **opts (Any): Ignored.\n\n Example:\n >>> from celery.platforms import detached, create_pidlock\n >>> with detached(\n ... logfile='/var/log/app.log',\n ... pidfile='/var/run/app.pid',\n ... uid='nobody'):\n ... # Now in detached child process with effective user set to nobody,\n ... # and we know that our logfile can be written to, and that\n ... # the pidfile isn't locked.\n ... pidlock = create_pidlock('/var/run/app.pid')\n ...\n ... # Run the program\n ... program.run(logfile='/var/log/app.log')\n \"\"\"\n if not resource:\n raise RuntimeError('This platform does not support detach.')\n workdir = os.getcwd() if workdir is None else workdir\n\n signals.reset('SIGCLD') # Make sure SIGCLD is using the default handler.\n maybe_drop_privileges(uid=uid, gid=gid)\n\n def after_chdir_do():\n # Since without stderr any errors will be silently suppressed,\n # we need to know that we have access to the logfile.\n logfile and open(logfile, 'a').close()\n # Doesn't actually create the pidfile, but makes sure it's not stale.\n if pidfile:\n _create_pidlock(pidfile).release()\n\n return DaemonContext(\n umask=umask, workdir=workdir, fake=fake, after_chdir=after_chdir_do,\n )\n\n\ndef parse_uid(uid):\n \"\"\"Parse user id.\n\n Arguments:\n uid (str, int): Actual uid, or the username of a user.\n Returns:\n int: The actual uid.\n \"\"\"\n try:\n return int(uid)\n except ValueError:\n try:\n return pwd.getpwnam(uid).pw_uid\n except (AttributeError, KeyError):\n raise KeyError('User does not exist: {0}'.format(uid))\n\n\ndef parse_gid(gid):\n \"\"\"Parse group id.\n\n Arguments:\n gid (str, int): Actual gid, or the name of a group.\n Returns:\n int: The actual gid of the group.\n \"\"\"\n try:\n return int(gid)\n except ValueError:\n try:\n return grp.getgrnam(gid).gr_gid\n except (AttributeError, KeyError):\n raise KeyError('Group does not exist: {0}'.format(gid))\n\n\ndef _setgroups_hack(groups):\n # :fun:`setgroups` may have a platform-dependent limit,\n # and it's not always possible to know in advance what this limit\n # is, so we use this ugly hack stolen from glibc.\n groups = groups[:]\n\n while 1:\n try:\n return os.setgroups(groups)\n except ValueError: # error from Python's check.\n if len(groups) <= 1:\n raise\n groups[:] = groups[:-1]\n except OSError as exc: # error from the OS.\n if exc.errno != errno.EINVAL or len(groups) <= 1:\n raise\n groups[:] = groups[:-1]\n\n\ndef setgroups(groups):\n \"\"\"Set active groups from a list of group ids.\"\"\"\n max_groups = None\n try:\n max_groups = os.sysconf('SC_NGROUPS_MAX')\n except Exception: # pylint: disable=broad-except\n pass\n try:\n return _setgroups_hack(groups[:max_groups])\n except OSError as exc:\n if exc.errno != errno.EPERM:\n raise\n if any(group not in groups for group in os.getgroups()):\n # we shouldn't be allowed to change to this group.\n raise\n\n\ndef initgroups(uid, gid):\n \"\"\"Init process group permissions.\n\n Compat version of :func:`os.initgroups` that was first\n added to Python 2.7.\n \"\"\"\n if not pwd: # pragma: no cover\n return\n username = pwd.getpwuid(uid)[0]\n if hasattr(os, 'initgroups'): # Python 2.7+\n return os.initgroups(username, gid)\n groups = [gr.gr_gid for gr in grp.getgrall()\n if username in gr.gr_mem]\n setgroups(groups)\n\n\ndef setgid(gid):\n \"\"\"Version of :func:`os.setgid` supporting group names.\"\"\"\n os.setgid(parse_gid(gid))\n\n\ndef setuid(uid):\n \"\"\"Version of :func:`os.setuid` supporting usernames.\"\"\"\n os.setuid(parse_uid(uid))\n\n\ndef maybe_drop_privileges(uid=None, gid=None):\n \"\"\"Change process privileges to new user/group.\n\n If UID and GID is specified, the real user/group is changed.\n\n If only UID is specified, the real user is changed, and the group is\n changed to the users primary group.\n\n If only GID is specified, only the group is changed.\n \"\"\"\n if sys.platform == 'win32':\n return\n if os.geteuid():\n # no point trying to setuid unless we're root.\n if not os.getuid():\n raise SecurityError('contact support')\n uid = uid and parse_uid(uid)\n gid = gid and parse_gid(gid)\n\n if uid:\n _setuid(uid, gid)\n else:\n gid and setgid(gid)\n\n if uid and not os.getuid() and not os.geteuid():\n raise SecurityError('Still root uid after drop privileges!')\n if gid and not os.getgid() and not os.getegid():\n raise SecurityError('Still root gid after drop privileges!')\n\n\ndef _setuid(uid, gid):\n # If GID isn't defined, get the primary GID of the user.\n if not gid and pwd:\n gid = pwd.getpwuid(uid).pw_gid\n # Must set the GID before initgroups(), as setgid()\n # is known to zap the group list on some platforms.\n\n # setgid must happen before setuid (otherwise the setgid operation\n # may fail because of insufficient privileges and possibly stay\n # in a privileged group).\n setgid(gid)\n initgroups(uid, gid)\n\n # at last:\n setuid(uid)\n # ... and make sure privileges cannot be restored:\n try:\n setuid(0)\n except OSError as exc:\n if exc.errno != errno.EPERM:\n raise\n # we should get here: cannot restore privileges,\n # everything was fine.\n else:\n raise SecurityError(\n 'non-root user able to restore privileges after setuid.')\n\n\nclass Signals(object):\n \"\"\"Convenience interface to :mod:`signals`.\n\n If the requested signal isn't supported on the current platform,\n the operation will be ignored.\n\n Example:\n >>> from celery.platforms import signals\n\n >>> from proj.handlers import my_handler\n >>> signals['INT'] = my_handler\n\n >>> signals['INT']\n my_handler\n\n >>> signals.supported('INT')\n True\n\n >>> signals.signum('INT')\n 2\n\n >>> signals.ignore('USR1')\n >>> signals['USR1'] == signals.ignored\n True\n\n >>> signals.reset('USR1')\n >>> signals['USR1'] == signals.default\n True\n\n >>> from proj.handlers import exit_handler, hup_handler\n >>> signals.update(INT=exit_handler,\n ... TERM=exit_handler,\n ... HUP=hup_handler)\n \"\"\"\n\n ignored = _signal.SIG_IGN\n default = _signal.SIG_DFL\n\n if hasattr(_signal, 'setitimer'):\n\n def arm_alarm(self, seconds):\n _signal.setitimer(_signal.ITIMER_REAL, seconds)\n else: # pragma: no cover\n try:\n from itimer import alarm as _itimer_alarm # noqa\n except ImportError:\n\n def arm_alarm(self, seconds): # noqa\n _signal.alarm(math.ceil(seconds))\n else: # pragma: no cover\n\n def arm_alarm(self, seconds): # noqa\n return _itimer_alarm(seconds) # noqa\n\n def reset_alarm(self):\n return _signal.alarm(0)\n\n def supported(self, name):\n \"\"\"Return true value if signal by ``name`` exists on this platform.\"\"\"\n try:\n self.signum(name)\n except AttributeError:\n return False\n else:\n return True\n\n def signum(self, name):\n \"\"\"Get signal number by name.\"\"\"\n if isinstance(name, numbers.Integral):\n return name\n if not isinstance(name, string_t) \\\n or not name.isupper():\n raise TypeError('signal name must be uppercase string.')\n if not name.startswith('SIG'):\n name = 'SIG' + name\n return getattr(_signal, name)\n\n def reset(self, *signal_names):\n \"\"\"Reset signals to the default signal handler.\n\n Does nothing if the platform has no support for signals,\n or the specified signal in particular.\n \"\"\"\n self.update((sig, self.default) for sig in signal_names)\n\n def ignore(self, *names):\n \"\"\"Ignore signal using :const:`SIG_IGN`.\n\n Does nothing if the platform has no support for signals,\n or the specified signal in particular.\n \"\"\"\n self.update((sig, self.ignored) for sig in names)\n\n def __getitem__(self, name):\n return _signal.getsignal(self.signum(name))\n\n def __setitem__(self, name, handler):\n \"\"\"Install signal handler.\n\n Does nothing if the current platform has no support for signals,\n or the specified signal in particular.\n \"\"\"\n try:\n _signal.signal(self.signum(name), handler)\n except (AttributeError, ValueError):\n pass\n\n def update(self, _d_=None, **sigmap):\n \"\"\"Set signal handlers from a mapping.\"\"\"\n for name, handler in items(dict(_d_ or {}, **sigmap)):\n self[name] = handler\n\n\nsignals = Signals()\nget_signal = signals.signum # compat\ninstall_signal_handler = signals.__setitem__ # compat\nreset_signal = signals.reset # compat\nignore_signal = signals.ignore # compat\n\n\ndef signal_name(signum):\n \"\"\"Return name of signal from signal number.\"\"\"\n return SIGMAP[signum][3:]\n\n\ndef strargv(argv):\n arg_start = 2 if 'manage' in argv[0] else 1\n if len(argv) > arg_start:\n return ' '.join(argv[arg_start:])\n return ''\n\n\ndef set_process_title(progname, info=None):\n \"\"\"Set the :command:`ps` name for the currently running process.\n\n Only works if :pypi:`setproctitle` is installed.\n \"\"\"\n proctitle = '[{0}]'.format(progname)\n proctitle = '{0} {1}'.format(proctitle, info) if info else proctitle\n if _setproctitle:\n _setproctitle.setproctitle(safe_str(proctitle))\n return proctitle\n\n\nif os.environ.get('NOSETPS'): # pragma: no cover\n\n def set_mp_process_title(*a, **k):\n \"\"\"Disabled feature.\"\"\"\n pass\nelse:\n\n def set_mp_process_title(progname, info=None, hostname=None): # noqa\n \"\"\"Set the :command:`ps` name from the current process name.\n\n Only works if :pypi:`setproctitle` is installed.\n \"\"\"\n if hostname:\n progname = '{0}: {1}'.format(progname, hostname)\n name = current_process().name if current_process else 'MainProcess'\n return set_process_title('{0}:{1}'.format(progname, name), info=info)\n\n\ndef get_errno_name(n):\n \"\"\"Get errno for string (e.g., ``ENOENT``).\"\"\"\n if isinstance(n, string_t):\n return getattr(errno, n)\n return n\n\n\n@contextmanager\ndef ignore_errno(*errnos, **kwargs):\n \"\"\"Context manager to ignore specific POSIX error codes.\n\n Takes a list of error codes to ignore: this can be either\n the name of the code, or the code integer itself::\n\n >>> with ignore_errno('ENOENT'):\n ... with open('foo', 'r') as fh:\n ... return fh.read()\n\n >>> with ignore_errno(errno.ENOENT, errno.EPERM):\n ... pass\n\n Arguments:\n types (Tuple[Exception]): A tuple of exceptions to ignore\n (when the errno matches). Defaults to :exc:`Exception`.\n \"\"\"\n types = kwargs.get('types') or (Exception,)\n errnos = [get_errno_name(errno) for errno in errnos]\n try:\n yield\n except types as exc:\n if not hasattr(exc, 'errno'):\n raise\n if exc.errno not in errnos:\n raise\n\n\ndef check_privileges(accept_content):\n uid = os.getuid() if hasattr(os, 'getuid') else 65535\n gid = os.getgid() if hasattr(os, 'getgid') else 65535\n euid = os.geteuid() if hasattr(os, 'geteuid') else 65535\n egid = os.getegid() if hasattr(os, 'getegid') else 65535\n\n if hasattr(os, 'fchown'):\n if not all(hasattr(os, attr)\n for attr in ['getuid', 'getgid', 'geteuid', 'getegid']):\n raise SecurityError('suspicious platform, contact support')\n\n if not uid or not gid or not euid or not egid:\n if ('pickle' in accept_content or\n 'application/x-python-serialize' in accept_content):\n if not C_FORCE_ROOT:\n try:\n print(ROOT_DISALLOWED.format(\n uid=uid, euid=euid, gid=gid, egid=egid,\n ), file=sys.stderr)\n finally:\n os._exit(1)\n warnings.warn(RuntimeWarning(ROOT_DISCOURAGED.format(\n uid=uid, euid=euid, gid=gid, egid=egid,\n )))\n", "path": "celery/platforms.py" } ]
diff --git a/celery/platforms.py b/celery/platforms.py index bd7ae58ea9f..7620e1d8210 100644 --- a/celery/platforms.py +++ b/celery/platforms.py @@ -86,7 +86,7 @@ You're running the worker with superuser privileges: this is absolutely not recommended! -Please specify a different user using the -u option. +Please specify a different user using the --uid option. User information: uid={uid} euid={euid} gid={gid} egid={egid} """
facebookresearch__CompilerGym-592
Is running CompilerGym intended to leave cache directories behind? ## ❓ Questions and Help Not sure if this is a bug or not, so submitting as a question. Running a CompilerGym experiment leaves behind many cache directories. When running a large experiment, this can create problems through the sheer number of directories in `COMPILER_GYM_CACHE`. I expected the `COMPILER_GYM_CACHE` to not have anything after the experiment exited cleanly. Is there a way to avoid the experiments leaving the directories behind? ## Steps to reproduce Running the following on my machine leaves behind about 270 cache directories. ```python import compiler_gym import compiler_gym.wrappers from ray import tune from ray.rllib.agents.ppo import PPOTrainer def make_env(env_config): env = compiler_gym.make(env_config['cgym_id']) env = compiler_gym.wrappers.TimeLimit(env, env_config['timelimit']) dataset = env.datasets[env_config['dataset']] env = compiler_gym.wrappers.CycleOverBenchmarks( env, dataset.benchmarks()) return env config = { "env_config": { "cgym_id": "llvm-autophase-ic-v0", "timelimit": 45, "dataset": "benchmark://cbench-v1", }, "env": "CompilerGym", } stop = { "timesteps_total": 10_000, } tune.register_env("CompilerGym", make_env) tune.run( PPOTrainer, config=config, stop=stop, name='cgym_cache_dir_demo', ) ``` ## Environment Please fill in this checklist: - CompilerGym: 0.2.2 - How you installed CompilerGym (conda, pip, source): pip - OS: Ubuntu 20.04.1 LTS (x86_64) - Python version: 3.9.7 - Build command you used (if compiling from source): N/A - GCC/clang version (if compiling from source): N/A - Bazel version (if compiling from source): N/A - Versions of any other relevant libraries: ray: 1.10.0, gym: 0.20.0
[ { "content": "#! /usr/bin/env python3\n#\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"An example CompilerGym service in python.\"\"\"\nimport os\nimport sys\nfrom concurrent import futures\nfrom multiprocessing import cpu_count\nfrom pathlib import Path\nfrom signal import SIGTERM, signal\nfrom tempfile import mkdtemp\nfrom threading import Event, Thread\nfrom typing import Type\n\nimport grpc\nfrom absl import app, flags, logging\n\nfrom compiler_gym.service import connection\nfrom compiler_gym.service.compilation_session import CompilationSession\nfrom compiler_gym.service.proto import compiler_gym_service_pb2_grpc\nfrom compiler_gym.service.runtime.compiler_gym_service import CompilerGymService\nfrom compiler_gym.util import debug_util as dbg\nfrom compiler_gym.util.filesystem import atomic_file_write\nfrom compiler_gym.util.shell_format import plural\n\nflags.DEFINE_string(\"working_dir\", \"\", \"Path to use as service working directory\")\nflags.DEFINE_integer(\"port\", 0, \"The service listening port\")\nflags.DEFINE_integer(\n \"rpc_service_threads\", cpu_count(), \"The number of server worker threads\"\n)\nflags.DEFINE_integer(\"logbuflevel\", 0, \"Flag for compatability with C++ service.\")\nFLAGS = flags.FLAGS\n\nMAX_MESSAGE_SIZE_IN_BYTES = 512 * 1024 * 1024\n\n\nshutdown_signal = Event()\n\n\n# NOTE(cummins): This script is executed in a subprocess, so code coverage\n# tracking does not work. As such we use \"# pragma: no cover\" annotation for all\n# functions.\ndef _shutdown_handler(signal_number, stack_frame): # pragma: no cover\n del stack_frame # Unused\n logging.info(\"Service received signal: %d\", signal_number)\n shutdown_signal.set()\n\n\ndef create_and_run_compiler_gym_service(\n compilation_session_type: Type[CompilationSession],\n): # pragma: no cover\n \"\"\"Create and run an RPC service for the given compilation session.\n\n This should be called on its own in a self contained script to implement a\n compilation service. Example:\n\n .. code-block:: python\n\n from compiler_gym.service import runtime\n from my_compiler_service import MyCompilationSession\n\n if __name__ == \"__main__\":\n runtime.create_and_run_compiler_gym_service(MyCompilationSession)\n\n This function never returns.\n\n :param compilation_session_type: A sublass of :class:`CompilationSession\n <compiler_gym.service.CompilationSession>` that provides implementations\n of the abstract methods.\n \"\"\"\n\n def main(argv):\n # Register a signal handler for SIGTERM that will set the shutdownSignal\n # future value.\n signal(SIGTERM, _shutdown_handler)\n\n argv = [x for x in argv if x.strip()]\n if len(argv) > 1:\n print(\n f\"ERROR: Unrecognized command line argument '{argv[1]}'\",\n file=sys.stderr,\n )\n sys.exit(1)\n\n working_dir = Path(FLAGS.working_dir or mkdtemp(prefix=\"compiler_gym-service-\"))\n (working_dir / \"logs\").mkdir(exist_ok=True, parents=True)\n\n FLAGS.log_dir = str(working_dir / \"logs\")\n logging.get_absl_handler().use_absl_log_file()\n logging.set_verbosity(dbg.get_logging_level())\n\n # Create the service.\n server = grpc.server(\n futures.ThreadPoolExecutor(max_workers=FLAGS.rpc_service_threads),\n options=connection.GRPC_CHANNEL_OPTIONS,\n )\n service = CompilerGymService(\n working_directory=working_dir,\n compilation_session_type=compilation_session_type,\n )\n compiler_gym_service_pb2_grpc.add_CompilerGymServiceServicer_to_server(\n service, server\n )\n\n address = f\"0.0.0.0:{FLAGS.port}\" if FLAGS.port else \"0.0.0.0:0\"\n port = server.add_insecure_port(address)\n\n with atomic_file_write(working_dir / \"port.txt\", fileobj=True, mode=\"w\") as f:\n f.write(str(port))\n\n with atomic_file_write(working_dir / \"pid.txt\", fileobj=True, mode=\"w\") as f:\n f.write(str(os.getpid()))\n\n logging.info(\n \"Service %s listening on %d, PID = %d\", working_dir, port, os.getpid()\n )\n\n server.start()\n\n # Block on the RPC service in a separate thread. This enables the\n # current thread to handle the shutdown routine.\n server_thread = Thread(target=server.wait_for_termination)\n server_thread.start()\n\n # Block until the shutdown signal is received.\n shutdown_signal.wait()\n logging.info(\"Shutting down the RPC service\")\n server.stop(60).wait()\n server_thread.join()\n\n if len(service.sessions):\n print(\n \"ERROR: Killing a service with\",\n plural(len(service.session), \"active session\", \"active sessions\"),\n file=sys.stderr,\n )\n sys.exit(6)\n\n app.run(main)\n", "path": "compiler_gym/service/runtime/create_and_run_compiler_gym_service.py" } ]
[ { "content": "#! /usr/bin/env python3\n#\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"An example CompilerGym service in python.\"\"\"\nimport os\nimport sys\nfrom concurrent import futures\nfrom multiprocessing import cpu_count\nfrom pathlib import Path\nfrom signal import SIGTERM, signal\nfrom tempfile import mkdtemp\nfrom threading import Event, Thread\nfrom typing import Type\n\nimport grpc\nfrom absl import app, flags, logging\n\nfrom compiler_gym.service import connection\nfrom compiler_gym.service.compilation_session import CompilationSession\nfrom compiler_gym.service.proto import compiler_gym_service_pb2_grpc\nfrom compiler_gym.service.runtime.compiler_gym_service import CompilerGymService\nfrom compiler_gym.util import debug_util as dbg\nfrom compiler_gym.util.filesystem import atomic_file_write\nfrom compiler_gym.util.shell_format import plural\n\nflags.DEFINE_string(\"working_dir\", \"\", \"Path to use as service working directory\")\nflags.DEFINE_integer(\"port\", 0, \"The service listening port\")\nflags.DEFINE_integer(\n \"rpc_service_threads\", cpu_count(), \"The number of server worker threads\"\n)\nflags.DEFINE_integer(\"logbuflevel\", 0, \"Flag for compatability with C++ service.\")\nFLAGS = flags.FLAGS\n\nMAX_MESSAGE_SIZE_IN_BYTES = 512 * 1024 * 1024\n\n\nshutdown_signal = Event()\n\n\n# NOTE(cummins): This script is executed in a subprocess, so code coverage\n# tracking does not work. As such we use \"# pragma: no cover\" annotation for all\n# functions.\ndef _shutdown_handler(signal_number, stack_frame): # pragma: no cover\n del stack_frame # Unused\n logging.info(\"Service received signal: %d\", signal_number)\n shutdown_signal.set()\n\n\ndef create_and_run_compiler_gym_service(\n compilation_session_type: Type[CompilationSession],\n): # pragma: no cover\n \"\"\"Create and run an RPC service for the given compilation session.\n\n This should be called on its own in a self contained script to implement a\n compilation service. Example:\n\n .. code-block:: python\n\n from compiler_gym.service import runtime\n from my_compiler_service import MyCompilationSession\n\n if __name__ == \"__main__\":\n runtime.create_and_run_compiler_gym_service(MyCompilationSession)\n\n This function never returns.\n\n :param compilation_session_type: A sublass of :class:`CompilationSession\n <compiler_gym.service.CompilationSession>` that provides implementations\n of the abstract methods.\n \"\"\"\n\n def main(argv):\n # Register a signal handler for SIGTERM that will set the shutdownSignal\n # future value.\n signal(SIGTERM, _shutdown_handler)\n\n argv = [x for x in argv if x.strip()]\n if len(argv) > 1:\n print(\n f\"ERROR: Unrecognized command line argument '{argv[1]}'\",\n file=sys.stderr,\n )\n sys.exit(1)\n\n working_dir = Path(FLAGS.working_dir or mkdtemp(prefix=\"compiler_gym-service-\"))\n (working_dir / \"logs\").mkdir(exist_ok=True, parents=True)\n\n FLAGS.log_dir = str(working_dir / \"logs\")\n logging.get_absl_handler().use_absl_log_file()\n logging.set_verbosity(dbg.get_logging_level())\n\n # Create the service.\n server = grpc.server(\n futures.ThreadPoolExecutor(max_workers=FLAGS.rpc_service_threads),\n options=connection.GRPC_CHANNEL_OPTIONS,\n )\n service = CompilerGymService(\n working_directory=working_dir,\n compilation_session_type=compilation_session_type,\n )\n compiler_gym_service_pb2_grpc.add_CompilerGymServiceServicer_to_server(\n service, server\n )\n\n address = f\"0.0.0.0:{FLAGS.port}\" if FLAGS.port else \"0.0.0.0:0\"\n port = server.add_insecure_port(address)\n\n with atomic_file_write(working_dir / \"port.txt\", fileobj=True, mode=\"w\") as f:\n f.write(str(port))\n\n with atomic_file_write(working_dir / \"pid.txt\", fileobj=True, mode=\"w\") as f:\n f.write(str(os.getpid()))\n\n logging.info(\n \"Service %s listening on %d, PID = %d\", working_dir, port, os.getpid()\n )\n\n server.start()\n\n # Block on the RPC service in a separate thread. This enables the\n # current thread to handle the shutdown routine.\n server_thread = Thread(target=server.wait_for_termination)\n server_thread.start()\n\n # Block until the shutdown signal is received.\n shutdown_signal.wait()\n logging.info(\"Shutting down the RPC service\")\n server.stop(60).wait()\n server_thread.join()\n logging.info(\"Service closed\")\n\n if len(service.sessions):\n print(\n \"ERROR: Killing a service with\",\n plural(len(service.session), \"active session\", \"active sessions\"),\n file=sys.stderr,\n )\n sys.exit(6)\n\n app.run(main)\n", "path": "compiler_gym/service/runtime/create_and_run_compiler_gym_service.py" } ]
diff --git a/compiler_gym/envs/llvm/service/BUILD b/compiler_gym/envs/llvm/service/BUILD index 6b1d21c6b..294f037e9 100644 --- a/compiler_gym/envs/llvm/service/BUILD +++ b/compiler_gym/envs/llvm/service/BUILD @@ -63,6 +63,7 @@ cc_binary( name = "compiler_gym-llvm-service-prelinked", srcs = ["RunService.cc"], deps = [ + ":BenchmarkFactory", ":LlvmSession", "//compiler_gym/service/runtime:cc_runtime", ], diff --git a/compiler_gym/envs/llvm/service/Benchmark.cc b/compiler_gym/envs/llvm/service/Benchmark.cc index 0ad8be841..7d18e73a3 100644 --- a/compiler_gym/envs/llvm/service/Benchmark.cc +++ b/compiler_gym/envs/llvm/service/Benchmark.cc @@ -162,9 +162,12 @@ Benchmark::Benchmark(const std::string& name, std::unique_ptr<llvm::LLVMContext> needsRecompile_(true) {} void Benchmark::close() { + VLOG(3) << "Closing benchmark " << name() << " with scratch directory " + << scratchDirectory().string(); sys::error_code ec; fs::remove_all(scratchDirectory(), ec); CHECK(!ec) << "Failed to delete scratch directory: " << scratchDirectory().string(); + VLOG(3) << "Closed benchmark " << name(); } std::unique_ptr<Benchmark> Benchmark::clone(const fs::path& workingDirectory) const { diff --git a/compiler_gym/envs/llvm/service/BenchmarkFactory.cc b/compiler_gym/envs/llvm/service/BenchmarkFactory.cc index 1108f2069..638aa651e 100644 --- a/compiler_gym/envs/llvm/service/BenchmarkFactory.cc +++ b/compiler_gym/envs/llvm/service/BenchmarkFactory.cc @@ -48,6 +48,7 @@ void BenchmarkFactory::close() { for (auto& entry : benchmarks_) { entry.second.close(); } + benchmarks_.clear(); } Status BenchmarkFactory::getBenchmark(const BenchmarkProto& benchmarkMessage, diff --git a/compiler_gym/envs/llvm/service/CMakeLists.txt b/compiler_gym/envs/llvm/service/CMakeLists.txt index c267b40d7..62f23f9f7 100644 --- a/compiler_gym/envs/llvm/service/CMakeLists.txt +++ b/compiler_gym/envs/llvm/service/CMakeLists.txt @@ -21,6 +21,7 @@ cg_cc_binary( "RunService.cc" DEPS ::LlvmSession + ::BenchmarkFactory compiler_gym::service::runtime::cc_runtime ) diff --git a/compiler_gym/envs/llvm/service/RunService.cc b/compiler_gym/envs/llvm/service/RunService.cc index f271fd89a..508ae456e 100644 --- a/compiler_gym/envs/llvm/service/RunService.cc +++ b/compiler_gym/envs/llvm/service/RunService.cc @@ -2,6 +2,7 @@ // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. +#include "compiler_gym/envs/llvm/service/BenchmarkFactory.h" #include "compiler_gym/envs/llvm/service/LlvmSession.h" #include "compiler_gym/service/runtime/Runtime.h" #include "llvm/InitializePasses.h" @@ -59,5 +60,17 @@ void initLlvm() { int main(int argc, char** argv) { initLlvm(); - createAndRunCompilerGymService<LlvmSession>(argc, argv, usage); + const auto ret = createAndRunCompilerGymService<LlvmSession>(argc, argv, usage); + + // NOTE(github.com/facebookresearch/CompilerGym/issues/582): We need to make + // sure that BenchmarkFactory::close() is called on the global singleton + // instance, so that the temporary scratch directories are tidied up. + // + // TODO(github.com/facebookresearch/CompilerGym/issues/591): Once the runtime + // has been refactored to support intra-session mutable state, this singleton + // can be replaced by a member variable that is closed on + // CompilerGymServiceContext::shutdown(). + BenchmarkFactory::getSingleton(FLAGS_working_dir).close(); + + return ret; } diff --git a/compiler_gym/service/runtime/CreateAndRunCompilerGymServiceImpl.h b/compiler_gym/service/runtime/CreateAndRunCompilerGymServiceImpl.h index 6a4f1b2c1..e22d6b85e 100644 --- a/compiler_gym/service/runtime/CreateAndRunCompilerGymServiceImpl.h +++ b/compiler_gym/service/runtime/CreateAndRunCompilerGymServiceImpl.h @@ -51,7 +51,7 @@ void setGrpcChannelOptions(grpc::ServerBuilder& builder); // createAndRunCompilerGymServiceImpl(argc, argv, "usage string"); // } template <typename CompilationSessionType> -[[noreturn]] void createAndRunCompilerGymServiceImpl(int argc, char** argv, const char* usage) { +[[nodiscard]] int createAndRunCompilerGymServiceImpl(int argc, char** argv, const char* usage) { // Register a signal handler for SIGTERM that will set the shutdown_signal // future value. std::signal(SIGTERM, shutdown_handler); @@ -62,7 +62,7 @@ template <typename CompilationSessionType> gflags::ParseCommandLineFlags(&argc, &argv, /*remove_flags=*/true); if (argc > 1) { std::cerr << "ERROR: unknown command line argument '" << argv[1] << '\''; - exit(1); + return 1; } // Set up the working and logging directories. @@ -129,15 +129,16 @@ template <typename CompilationSessionType> VLOG(2) << "Shutting down the RPC service"; server->Shutdown(); serverThread.join(); + VLOG(2) << "Service closed"; if (service.sessionCount()) { LOG(ERROR) << "ERROR: Killing a service with " << service.sessionCount() << (service.sessionCount() > 1 ? " active sessions!" : " active session!") << std::endl; - exit(6); + return 6; } - exit(0); + return 0; } } // namespace compiler_gym::runtime diff --git a/compiler_gym/service/runtime/Runtime.h b/compiler_gym/service/runtime/Runtime.h index ef154bb1c..42d162eb2 100644 --- a/compiler_gym/service/runtime/Runtime.h +++ b/compiler_gym/service/runtime/Runtime.h @@ -20,20 +20,20 @@ namespace compiler_gym::runtime { * #include "my_compiler_service/MyCompilationSession.h" * * int main(int argc, char** argv) { - * createAndRunCompilerGymService<MyCompilationSession>( + * return createAndRunCompilerGymService<MyCompilationSession>( * argc, argc, "My compiler service" * ); * } * \endcode * - * This function never returns. - * * @tparam CompilationSessionType A sublass of CompilationSession that provides * implementations of the abstract methods. + * + * @return An integer return code. */ template <typename CompilationSessionType> -[[noreturn]] void createAndRunCompilerGymService(int argc, char** argv, const char* usage) { - createAndRunCompilerGymServiceImpl<CompilationSessionType>(argc, argv, usage); +[[nodiscard]] int createAndRunCompilerGymService(int argc, char** argv, const char* usage) { + return createAndRunCompilerGymServiceImpl<CompilationSessionType>(argc, argv, usage); } } // namespace compiler_gym::runtime diff --git a/compiler_gym/service/runtime/create_and_run_compiler_gym_service.py b/compiler_gym/service/runtime/create_and_run_compiler_gym_service.py index 94e410465..3b53acde1 100644 --- a/compiler_gym/service/runtime/create_and_run_compiler_gym_service.py +++ b/compiler_gym/service/runtime/create_and_run_compiler_gym_service.py @@ -130,6 +130,7 @@ def main(argv): logging.info("Shutting down the RPC service") server.stop(60).wait() server_thread.join() + logging.info("Service closed") if len(service.sessions): print( diff --git a/examples/example_compiler_gym_service/service_cc/ExampleService.cc b/examples/example_compiler_gym_service/service_cc/ExampleService.cc index 99cacb769..76de9635d 100644 --- a/examples/example_compiler_gym_service/service_cc/ExampleService.cc +++ b/examples/example_compiler_gym_service/service_cc/ExampleService.cc @@ -133,5 +133,5 @@ class ExampleCompilationSession final : public CompilationSession { } // namespace int main(int argc, char** argv) { - runtime::createAndRunCompilerGymService<ExampleCompilationSession>(argc, argv, usage); + return runtime::createAndRunCompilerGymService<ExampleCompilationSession>(argc, argv, usage); }
urllib3__urllib3-2843
flaky and pytest-memray incompatible ### Subject ``` ______________________________________________________________________________________________________ TestHTTPProxyManager.test_forwarding_proxy_request_timeout[https-https-True] ______________________________________________________________________________________________________ Traceback (most recent call last): File "/home/graingert/projects/urllib3/.nox/test-3-11/lib/python3.11/site-packages/pytest_memray/plugin.py", line 122, in wrapper result: object | None = func(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^ File "/home/graingert/projects/urllib3/.nox/test-3-11/lib/python3.11/site-packages/pytest_memray/plugin.py", line 121, in wrapper with Tracker(result_file): File "src/memray/_memray.pyx", line 404, in memray._memray.Tracker.__enter__ RuntimeError: No more than one Tracker instance can be active at the same time ``` caused by a flaky test: ``` ===Flaky Test Report=== test_forwarding_proxy_request_timeout[https-https-True] failed (1 runs remaining out of 2). <class 'AssertionError'> assert <class 'urllib3.exceptions.ProxyError'> == ReadTimeoutError + where <class 'urllib3.exceptions.ProxyError'> = type(ProxyError('Unable to connect to proxy', ReadTimeoutError("HTTPSConnectionPool(host='240.0.0.0', port=443): Read timed out. (read timeout=0.01)"))) + where ProxyError('Unable to connect to proxy', ReadTimeoutError("HTTPSConnectionPool(host='240.0.0.0', port=443): Read timed out. (read timeout=0.01)")) = MaxRetryError('HTTPSConnectionPool(host=\'240.0.0.0\', port=443): Max retries exceeded with url: https://240.0.0.0 (Caused by ProxyError(\'Unable to connect to proxy\', ReadTimeoutError("HTTPSConnectionPool(host=\'240.0.0.0\', port=443): Read timed out. (read timeout=0.01)")))').reason + where MaxRetryError('HTTPSConnectionPool(host=\'240.0.0.0\', port=443): Max retries exceeded with url: https://240.0.0.0 (Caused by ProxyError(\'Unable to connect to proxy\', ReadTimeoutError("HTTPSConnectionPool(host=\'240.0.0.0\', port=443): Read timed out. (read timeout=0.01)")))') = <ExceptionInfo MaxRetryError('HTTPSConnectionPool(host=\'240.0.0.0\', port=443): Max retries exceeded with url: https://240.0.0.0 (Ca...proxy\', ReadTimeoutError("HTTPSConnectionPool(host=\'240.0.0.0\', port=443): Read timed out. (read timeout=0.01)")))') tblen=10>.value [<TracebackEntry /home/graingert/projects/urllib3/test/with_dummyserver/test_proxy_poolmanager.py:484>] test_forwarding_proxy_request_timeout[https-https-True] failed; it passed 0 out of the required 1 times. <class 'RuntimeError'> No more than one Tracker instance can be active at the same time [<TracebackEntry /home/graingert/projects/urllib3/.nox/test-3-11/lib/python3.11/site-packages/pytest_memray/plugin.py:122>, <TracebackEntry /home/graingert/projects/urllib3/.nox/test-3-11/lib/python3.11/site-packages/pytest_memray/plugin.py:121>, <TracebackEntry src/memray/_memray.pyx:404>] ``` see also https://github.com/bloomberg/pytest-memray/issues/53
[ { "content": "from __future__ import annotations\n\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nimport nox\n\nSOURCE_FILES = [\n \"docs/\",\n \"dummyserver/\",\n \"src/\",\n \"test/\",\n \"noxfile.py\",\n \"setup.py\",\n]\n\n\ndef tests_impl(\n session: nox.Session,\n extras: str = \"socks,secure,brotli,zstd\",\n byte_string_comparisons: bool = True,\n) -> None:\n # Install deps and the package itself.\n session.install(\"-r\", \"dev-requirements.txt\")\n session.install(f\".[{extras}]\")\n\n # Show the pip version.\n session.run(\"pip\", \"--version\")\n # Print the Python version and bytesize.\n session.run(\"python\", \"--version\")\n session.run(\"python\", \"-c\", \"import struct; print(struct.calcsize('P') * 8)\")\n # Print OpenSSL information.\n session.run(\"python\", \"-m\", \"OpenSSL.debug\")\n\n memray_supported = True\n if sys.implementation.name != \"cpython\" or sys.version_info < (3, 8):\n memray_supported = False # pytest-memray requires CPython 3.8+\n elif sys.platform == \"win32\":\n memray_supported = False\n\n # Inspired from https://hynek.me/articles/ditch-codecov-python/\n # We use parallel mode and then combine in a later CI step\n session.run(\n \"python\",\n *((\"-bb\",) if byte_string_comparisons else ()),\n \"-m\",\n \"coverage\",\n \"run\",\n \"--parallel-mode\",\n \"-m\",\n \"pytest\",\n *(\"--memray\", \"--hide-memray-summary\") if memray_supported else (),\n \"-v\",\n \"-ra\",\n f\"--color={'yes' if 'GITHUB_ACTIONS' in os.environ else 'auto'}\",\n \"--tb=native\",\n \"--no-success-flaky-report\",\n \"--durations=10\",\n \"--strict-config\",\n \"--strict-markers\",\n *(session.posargs or (\"test/\",)),\n env={\"PYTHONWARNINGS\": \"always::DeprecationWarning\"},\n )\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\", \"3.11\", \"pypy\"])\ndef test(session: nox.Session) -> None:\n tests_impl(session)\n\n\[email protected](python=[\"2.7\"])\ndef unsupported_setup_py(session: nox.Session) -> None:\n # Can't check both returncode and output with session.run\n process = subprocess.run(\n [\"python\", \"setup.py\", \"install\"],\n env={**session.env},\n text=True,\n capture_output=True,\n )\n assert process.returncode == 1\n print(process.stderr)\n assert \"Please use `python -m pip install .` instead.\" in process.stderr\n\n\[email protected](python=[\"3\"])\ndef test_brotlipy(session: nox.Session) -> None:\n \"\"\"Check that if 'brotlipy' is installed instead of 'brotli' or\n 'brotlicffi' that we still don't blow up.\n \"\"\"\n session.install(\"brotlipy\")\n tests_impl(session, extras=\"socks,secure\", byte_string_comparisons=False)\n\n\ndef git_clone(session: nox.Session, git_url: str) -> None:\n \"\"\"We either clone the target repository or if already exist\n simply reset the state and pull.\n \"\"\"\n expected_directory = git_url.split(\"/\")[-1]\n\n if expected_directory.endswith(\".git\"):\n expected_directory = expected_directory[:-4]\n\n if not os.path.isdir(expected_directory):\n session.run(\"git\", \"clone\", \"--depth\", \"1\", git_url, external=True)\n else:\n session.run(\n \"git\", \"-C\", expected_directory, \"reset\", \"--hard\", \"HEAD\", external=True\n )\n session.run(\"git\", \"-C\", expected_directory, \"pull\", external=True)\n\n\[email protected]()\ndef downstream_botocore(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/boto/botocore\")\n session.chdir(\"botocore\")\n for patch in [\n \"0001-Mark-100-Continue-tests-as-failing.patch\",\n \"0002-Stop-relying-on-removed-DEFAULT_CIPHERS.patch\",\n ]:\n session.run(\"git\", \"apply\", f\"{root}/ci/{patch}\", external=True)\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.run(\"python\", \"scripts/ci/install\")\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/botocore\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"python\", \"scripts/ci/run-tests\")\n\n\[email protected]()\ndef downstream_requests(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/psf/requests\")\n session.chdir(\"requests\")\n session.run(\n \"git\", \"apply\", f\"{root}/ci/0003-requests-removed-warnings.patch\", external=True\n )\n session.run(\n \"git\", \"apply\", f\"{root}/ci/0004-requests-chunked-requests.patch\", external=True\n )\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.install(\".[socks]\", silent=False)\n session.install(\"-r\", \"requirements-dev.txt\", silent=False)\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/requests\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"pytest\", \"tests\")\n\n\[email protected]()\ndef format(session: nox.Session) -> None:\n \"\"\"Run code formatters.\"\"\"\n lint(session)\n\n\[email protected]\ndef lint(session: nox.Session) -> None:\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"--all-files\")\n\n mypy(session)\n\n\[email protected](python=\"3.8\")\ndef mypy(session: nox.Session) -> None:\n \"\"\"Run mypy.\"\"\"\n session.install(\"-r\", \"mypy-requirements.txt\")\n session.run(\"mypy\", \"--version\")\n session.run(\n \"mypy\",\n \"dummyserver\",\n \"noxfile.py\",\n \"src/urllib3\",\n \"test\",\n )\n\n\[email protected]\ndef docs(session: nox.Session) -> None:\n session.install(\"-r\", \"docs/requirements.txt\")\n session.install(\".[socks,secure,brotli,zstd]\")\n\n session.chdir(\"docs\")\n if os.path.exists(\"_build\"):\n shutil.rmtree(\"_build\")\n session.run(\"sphinx-build\", \"-b\", \"html\", \"-W\", \".\", \"_build/html\")\n", "path": "noxfile.py" } ]
[ { "content": "from __future__ import annotations\n\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nimport nox\n\nSOURCE_FILES = [\n \"docs/\",\n \"dummyserver/\",\n \"src/\",\n \"test/\",\n \"noxfile.py\",\n \"setup.py\",\n]\n\n\ndef tests_impl(\n session: nox.Session,\n extras: str = \"socks,secure,brotli,zstd\",\n byte_string_comparisons: bool = True,\n) -> None:\n # Install deps and the package itself.\n session.install(\"-r\", \"dev-requirements.txt\")\n session.install(f\".[{extras}]\")\n\n # Show the pip version.\n session.run(\"pip\", \"--version\")\n # Print the Python version and bytesize.\n session.run(\"python\", \"--version\")\n session.run(\"python\", \"-c\", \"import struct; print(struct.calcsize('P') * 8)\")\n # Print OpenSSL information.\n session.run(\"python\", \"-m\", \"OpenSSL.debug\")\n\n memray_supported = True\n if sys.implementation.name != \"cpython\" or sys.version_info < (3, 8):\n memray_supported = False # pytest-memray requires CPython 3.8+\n elif sys.platform == \"win32\":\n memray_supported = False\n\n # Inspired from https://hynek.me/articles/ditch-codecov-python/\n # We use parallel mode and then combine in a later CI step\n session.run(\n \"python\",\n *((\"-bb\",) if byte_string_comparisons else ()),\n \"-m\",\n \"coverage\",\n \"run\",\n \"--parallel-mode\",\n \"-m\",\n \"pytest\",\n *(\"--memray\", \"--hide-memray-summary\") if memray_supported else (),\n \"-v\",\n \"-ra\",\n f\"--color={'yes' if 'GITHUB_ACTIONS' in os.environ else 'auto'}\",\n \"--tb=native\",\n \"--durations=10\",\n \"--strict-config\",\n \"--strict-markers\",\n *(session.posargs or (\"test/\",)),\n env={\"PYTHONWARNINGS\": \"always::DeprecationWarning\"},\n )\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\", \"3.11\", \"pypy\"])\ndef test(session: nox.Session) -> None:\n tests_impl(session)\n\n\[email protected](python=[\"2.7\"])\ndef unsupported_setup_py(session: nox.Session) -> None:\n # Can't check both returncode and output with session.run\n process = subprocess.run(\n [\"python\", \"setup.py\", \"install\"],\n env={**session.env},\n text=True,\n capture_output=True,\n )\n assert process.returncode == 1\n print(process.stderr)\n assert \"Please use `python -m pip install .` instead.\" in process.stderr\n\n\[email protected](python=[\"3\"])\ndef test_brotlipy(session: nox.Session) -> None:\n \"\"\"Check that if 'brotlipy' is installed instead of 'brotli' or\n 'brotlicffi' that we still don't blow up.\n \"\"\"\n session.install(\"brotlipy\")\n tests_impl(session, extras=\"socks,secure\", byte_string_comparisons=False)\n\n\ndef git_clone(session: nox.Session, git_url: str) -> None:\n \"\"\"We either clone the target repository or if already exist\n simply reset the state and pull.\n \"\"\"\n expected_directory = git_url.split(\"/\")[-1]\n\n if expected_directory.endswith(\".git\"):\n expected_directory = expected_directory[:-4]\n\n if not os.path.isdir(expected_directory):\n session.run(\"git\", \"clone\", \"--depth\", \"1\", git_url, external=True)\n else:\n session.run(\n \"git\", \"-C\", expected_directory, \"reset\", \"--hard\", \"HEAD\", external=True\n )\n session.run(\"git\", \"-C\", expected_directory, \"pull\", external=True)\n\n\[email protected]()\ndef downstream_botocore(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/boto/botocore\")\n session.chdir(\"botocore\")\n for patch in [\n \"0001-Mark-100-Continue-tests-as-failing.patch\",\n \"0002-Stop-relying-on-removed-DEFAULT_CIPHERS.patch\",\n ]:\n session.run(\"git\", \"apply\", f\"{root}/ci/{patch}\", external=True)\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.run(\"python\", \"scripts/ci/install\")\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/botocore\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"python\", \"scripts/ci/run-tests\")\n\n\[email protected]()\ndef downstream_requests(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/psf/requests\")\n session.chdir(\"requests\")\n session.run(\n \"git\", \"apply\", f\"{root}/ci/0003-requests-removed-warnings.patch\", external=True\n )\n session.run(\n \"git\", \"apply\", f\"{root}/ci/0004-requests-chunked-requests.patch\", external=True\n )\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.install(\".[socks]\", silent=False)\n session.install(\"-r\", \"requirements-dev.txt\", silent=False)\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/requests\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"pytest\", \"tests\")\n\n\[email protected]()\ndef format(session: nox.Session) -> None:\n \"\"\"Run code formatters.\"\"\"\n lint(session)\n\n\[email protected]\ndef lint(session: nox.Session) -> None:\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"--all-files\")\n\n mypy(session)\n\n\[email protected](python=\"3.8\")\ndef mypy(session: nox.Session) -> None:\n \"\"\"Run mypy.\"\"\"\n session.install(\"-r\", \"mypy-requirements.txt\")\n session.run(\"mypy\", \"--version\")\n session.run(\n \"mypy\",\n \"dummyserver\",\n \"noxfile.py\",\n \"src/urllib3\",\n \"test\",\n )\n\n\[email protected]\ndef docs(session: nox.Session) -> None:\n session.install(\"-r\", \"docs/requirements.txt\")\n session.install(\".[socks,secure,brotli,zstd]\")\n\n session.chdir(\"docs\")\n if os.path.exists(\"_build\"):\n shutil.rmtree(\"_build\")\n session.run(\"sphinx-build\", \"-b\", \"html\", \"-W\", \".\", \"_build/html\")\n", "path": "noxfile.py" } ]
diff --git a/dev-requirements.txt b/dev-requirements.txt index 2d1ef23faf..84840cd0f2 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -4,7 +4,6 @@ PySocks==1.7.1 pytest==7.2.0 pytest-timeout==2.1.0 pytest-freezegun==0.4.2 -flaky==3.7.0 trustme==0.9.0 cryptography==39.0.0 backports.zoneinfo==0.2.1;python_version<"3.9" diff --git a/noxfile.py b/noxfile.py index 81414f42e2..b0bec11470 100644 --- a/noxfile.py +++ b/noxfile.py @@ -56,7 +56,6 @@ def tests_impl( "-ra", f"--color={'yes' if 'GITHUB_ACTIONS' in os.environ else 'auto'}", "--tb=native", - "--no-success-flaky-report", "--durations=10", "--strict-config", "--strict-markers", diff --git a/test/with_dummyserver/test_chunked_transfer.py b/test/with_dummyserver/test_chunked_transfer.py index 707b59f306..c2dc12e769 100644 --- a/test/with_dummyserver/test_chunked_transfer.py +++ b/test/with_dummyserver/test_chunked_transfer.py @@ -13,9 +13,6 @@ from urllib3.util import SKIP_HEADER from urllib3.util.retry import Retry -# Retry failed tests -pytestmark = pytest.mark.flaky - class TestChunkedTransfer(SocketDummyServerTestCase): def start_chunked_handler(self) -> None: diff --git a/test/with_dummyserver/test_connectionpool.py b/test/with_dummyserver/test_connectionpool.py index aea46c8935..13ad811d06 100644 --- a/test/with_dummyserver/test_connectionpool.py +++ b/test/with_dummyserver/test_connectionpool.py @@ -35,8 +35,6 @@ from .. import INVALID_SOURCE_ADDRESSES, TARPIT_HOST, VALID_SOURCE_ADDRESSES from ..port_helpers import find_unused_port -pytestmark = pytest.mark.flaky - def wait_for_socket(ready_event: Event) -> None: ready_event.wait() diff --git a/test/with_dummyserver/test_https.py b/test/with_dummyserver/test_https.py index ac0fa9419c..7678bfbed1 100644 --- a/test/with_dummyserver/test_https.py +++ b/test/with_dummyserver/test_https.py @@ -47,10 +47,6 @@ from .. import has_alpn -# Retry failed tests -pytestmark = pytest.mark.flaky - - TLSv1_CERTS = DEFAULT_CERTS.copy() TLSv1_CERTS["ssl_version"] = getattr(ssl, "PROTOCOL_TLSv1", None) diff --git a/test/with_dummyserver/test_no_ssl.py b/test/with_dummyserver/test_no_ssl.py index 12e07839ee..6529636c3b 100644 --- a/test/with_dummyserver/test_no_ssl.py +++ b/test/with_dummyserver/test_no_ssl.py @@ -5,16 +5,11 @@ """ from __future__ import annotations -import pytest - import urllib3 from dummyserver.testcase import HTTPDummyServerTestCase, HTTPSDummyServerTestCase from ..test_no_ssl import TestWithoutSSL -# Retry failed tests -pytestmark = pytest.mark.flaky - class TestHTTPWithoutSSL(HTTPDummyServerTestCase, TestWithoutSSL): def test_simple(self) -> None: diff --git a/test/with_dummyserver/test_poolmanager.py b/test/with_dummyserver/test_poolmanager.py index 2c0f1002aa..c4f1947037 100644 --- a/test/with_dummyserver/test_poolmanager.py +++ b/test/with_dummyserver/test_poolmanager.py @@ -14,9 +14,6 @@ from urllib3.poolmanager import PoolManager from urllib3.util.retry import Retry -# Retry failed tests -pytestmark = pytest.mark.flaky - class TestPoolManager(HTTPDummyServerTestCase): @classmethod diff --git a/test/with_dummyserver/test_proxy_poolmanager.py b/test/with_dummyserver/test_proxy_poolmanager.py index a0566ecea3..171cb23b67 100644 --- a/test/with_dummyserver/test_proxy_poolmanager.py +++ b/test/with_dummyserver/test_proxy_poolmanager.py @@ -37,9 +37,6 @@ from .. import TARPIT_HOST, requires_network -# Retry failed tests -pytestmark = pytest.mark.flaky - class TestHTTPProxyManager(HTTPDummyProxyTestCase): @classmethod diff --git a/test/with_dummyserver/test_socketlevel.py b/test/with_dummyserver/test_socketlevel.py index df005e5a40..56cd224ec0 100644 --- a/test/with_dummyserver/test_socketlevel.py +++ b/test/with_dummyserver/test_socketlevel.py @@ -60,9 +60,6 @@ else: StrOrBytesPath = object -# Retry failed tests -pytestmark = pytest.mark.flaky - class TestCookies(SocketDummyServerTestCase): def test_multi_setcookie(self) -> None:
mne-tools__mne-bids-pipeline-289
`ValueError: n_jobs must be an integer` when calling freesurfer Hi, When I run `python run.py freesurfer --config=~/hMT+/config.py`, I get the following error traceback: ``` Traceback (most recent call last): File "/home/merlin/PhD/mne-bids-pipeline/run.py", line 194, in <module> fire.Fire(process) File "/home/merlin/miniconda3/envs/mne-bids/lib/python3.9/site-packages/fire/core.py", line 141, in Fire component_trace = _Fire(component, args, parsed_flag_args, context, name) File "/home/merlin/miniconda3/envs/mne-bids/lib/python3.9/site-packages/fire/core.py", line 466, in _Fire component, remaining_args = _CallAndUpdateTrace( File "/home/merlin/miniconda3/envs/mne-bids/lib/python3.9/site-packages/fire/core.py", line 681, in _CallAndUpdateTrace component = fn(*varargs, **kwargs) File "/home/merlin/PhD/mne-bids-pipeline/run.py", line 189, in process _run_script(script_path, config, root_dir, subject, session, task, run) File "/home/merlin/PhD/mne-bids-pipeline/run.py", line 98, in _run_script runpy.run_path(script_path, run_name='__main__') File "/home/merlin/miniconda3/envs/mne-bids/lib/python3.9/runpy.py", line 268, in run_path return _run_module_code(code, init_globals, run_name, File "/home/merlin/miniconda3/envs/mne-bids/lib/python3.9/runpy.py", line 97, in _run_module_code _run_code(code, mod_globals, init_globals, File "/home/merlin/miniconda3/envs/mne-bids/lib/python3.9/runpy.py", line 87, in _run_code exec(code, run_globals) File "/home/merlin/PhD/mne-bids-pipeline/scripts/freesurfer/recon_all.py", line 112, in <module> fire.Fire(main) File "/home/merlin/miniconda3/envs/mne-bids/lib/python3.9/site-packages/fire/core.py", line 141, in Fire component_trace = _Fire(component, args, parsed_flag_args, context, name) File "/home/merlin/miniconda3/envs/mne-bids/lib/python3.9/site-packages/fire/core.py", line 466, in _Fire component, remaining_args = _CallAndUpdateTrace( File "/home/merlin/miniconda3/envs/mne-bids/lib/python3.9/site-packages/fire/core.py", line 681, in _CallAndUpdateTrace component = fn(*varargs, **kwargs) File "/home/merlin/PhD/mne-bids-pipeline/scripts/freesurfer/recon_all.py", line 94, in main parallel, run_func, _ = parallel_func(run_recon, n_jobs=n_jobs) File "<decorator-gen-42>", line 24, in parallel_func File "/home/merlin/miniconda3/envs/mne-bids/lib/python3.9/site-packages/mne/parallel.py", line 112, in parallel_func n_jobs = check_n_jobs(n_jobs) File "/home/merlin/miniconda3/envs/mne-bids/lib/python3.9/site-packages/mne/parallel.py", line 159, in check_n_jobs raise ValueError('n_jobs must be an integer') ValueError: n_jobs must be an integer ``` Checking with pdb, it seems that `recon_all` is called with `n_jobs = "freesurfer"`. I'm not sure why that is. ``` 19:36:39 Using custom configuration: /home/merlin/hMT+/config.py 19:36:39 [Step-01] Running: Initializing output directories. 19:36:39 [Step-01] Initializing output directories. 19:36:39 [Step-01] Completed: Initializing output directories. 2021-04-14 19:36:39 INFO Successfully finished running: init_derivatives_dir 2021-04-14 19:36:39 INFO Now running: on_all > /home/merlin/PhD/mne-bids-pipeline/scripts/freesurfer/recon_all.py(88)main() 87 ---> 88 logger.info('Running FreeSurfer') 89 ipdb> n_jobs 'freesurfer' ipdb> ``` It might be the config needs to be changed, but I can't figure out how.
[ { "content": "#!/usr/bin/env python\n\nimport os\nimport shutil\nimport sys\nfrom pathlib import Path\nimport logging\nfrom typing import Union\n\nimport fire\n\nfrom mne.utils import run_subprocess\nfrom mne.parallel import parallel_func\n\nimport config\n\nPathLike = Union[str, Path]\nlogger = logging.getLogger('mne-bids-pipeline')\nfs_bids_app = Path(__file__).parent / 'contrib' / 'run.py'\n\n\ndef _get_subjects_dir(root_dir) -> Path:\n subjects_dir = \\\n Path(root_dir) / \"derivatives\" / \"freesurfer\" / \"subjects\"\n return subjects_dir\n\n\ndef run_recon(root_dir, subject, fs_bids_app) -> None:\n logger.info(f\"Running recon-all on subject {subject}. This will take \"\n f\"a LONG time – it's a good idea to let it run over night.\")\n\n subjects_dir = _get_subjects_dir(root_dir)\n subj_dir = subjects_dir / f\"sub-{subject}\"\n\n if subj_dir.exists():\n logger.info(f\"Subject {subject} is already present. Please delete the \"\n f\"directory if you want to recompute.\")\n return\n\n env = os.environ\n if 'FREESURFER_HOME' not in env:\n raise RuntimeError(\"FreeSurfer is not available.\")\n\n license_file = Path(f\"{env['FREESURFER_HOME']}/license.txt\")\n if not license_file.exists():\n license_file = Path(f\"{env['FREESURFER_HOME']}/.license\")\n if not license_file.exists():\n raise RuntimeError(\"FreeSurfer license file not found.\")\n\n cmd = [\n f\"{sys.executable}\",\n f\"{fs_bids_app}\",\n f\"{root_dir}\",\n f\"{subjects_dir}\", \"participant\",\n \"--n_cpus=2\", \"--stages=all\", \"--skip_bids_validator\",\n f\"--license_file={license_file}\",\n f\"--participant_label={subject}\"\n ]\n logger.debug(\"Running: \" + \" \".join(cmd))\n run_subprocess(cmd, env=env, verbose=logger.level)\n\n\ndef main(n_jobs: int = 1) -> None:\n \"\"\"Run freesurfer recon-all command on BIDS dataset.\n\n The command allows to run the freesurfer recon-all\n command on all subjects of your BIDS dataset. It can\n run in parallel with the --n_jobs parameter.\n\n It is built on top of the FreeSurfer BIDS app:\n\n https://github.com/BIDS-Apps/freesurfer\n\n You must have freesurfer available on your system.\n\n Examples\n --------\n run_freesurfer.py /path/to/bids/dataset/study-template-config.py /path/to/freesurfer_bids_app/\n\n or to run in parallel (3 subjects at a time):\n\n run_freesurfer.py /path/to/bids/dataset/study-template-config.py /path/to/freesurfer_bids_app/ --n_jobs=3\n\n \"\"\" # noqa\n\n logger.info('Running FreeSurfer')\n\n subjects = config.get_subjects()\n\n root_dir = config.bids_root\n subjects_dir = _get_subjects_dir(root_dir)\n subjects_dir.mkdir(parents=True, exist_ok=True)\n\n parallel, run_func, _ = parallel_func(run_recon, n_jobs=n_jobs)\n parallel(run_func(root_dir, subject, fs_bids_app)\n for subject in subjects)\n\n # Handle fsaverage\n fsaverage_dir = subjects_dir / 'fsaverage'\n if fsaverage_dir.exists():\n if fsaverage_dir.is_symlink():\n fsaverage_dir.unlink()\n else:\n shutil.rmtree(fsaverage_dir)\n\n env = os.environ\n shutil.copytree(f\"{env['FREESURFER_HOME']}/subjects/fsaverage\",\n subjects_dir / 'fsaverage')\n\n\nif __name__ == '__main__':\n fire.Fire(main)\n", "path": "scripts/freesurfer/recon_all.py" } ]
[ { "content": "#!/usr/bin/env python\n\nimport os\nimport shutil\nimport sys\nfrom pathlib import Path\nimport logging\nfrom typing import Union\n\nimport fire\n\nfrom mne.utils import run_subprocess\nfrom mne.parallel import parallel_func\n\nimport config\n\nPathLike = Union[str, Path]\nlogger = logging.getLogger('mne-bids-pipeline')\nfs_bids_app = Path(__file__).parent / 'contrib' / 'run.py'\n\n\ndef _get_subjects_dir(root_dir) -> Path:\n subjects_dir = \\\n Path(root_dir) / \"derivatives\" / \"freesurfer\" / \"subjects\"\n return subjects_dir\n\n\ndef run_recon(root_dir, subject, fs_bids_app) -> None:\n logger.info(f\"Running recon-all on subject {subject}. This will take \"\n f\"a LONG time – it's a good idea to let it run over night.\")\n\n subjects_dir = _get_subjects_dir(root_dir)\n subj_dir = subjects_dir / f\"sub-{subject}\"\n\n if subj_dir.exists():\n logger.info(f\"Subject {subject} is already present. Please delete the \"\n f\"directory if you want to recompute.\")\n return\n\n env = os.environ\n if 'FREESURFER_HOME' not in env:\n raise RuntimeError(\"FreeSurfer is not available.\")\n\n license_file = Path(f\"{env['FREESURFER_HOME']}/license.txt\")\n if not license_file.exists():\n license_file = Path(f\"{env['FREESURFER_HOME']}/.license\")\n if not license_file.exists():\n raise RuntimeError(\"FreeSurfer license file not found.\")\n\n cmd = [\n f\"{sys.executable}\",\n f\"{fs_bids_app}\",\n f\"{root_dir}\",\n f\"{subjects_dir}\", \"participant\",\n \"--n_cpus=2\", \"--stages=all\", \"--skip_bids_validator\",\n f\"--license_file={license_file}\",\n f\"--participant_label={subject}\"\n ]\n logger.debug(\"Running: \" + \" \".join(cmd))\n run_subprocess(cmd, env=env, verbose=logger.level)\n\n\ndef main(*, n_jobs: int = 1) -> None:\n \"\"\"Run freesurfer recon-all command on BIDS dataset.\n\n The command allows to run the freesurfer recon-all\n command on all subjects of your BIDS dataset. It can\n run in parallel with the --n_jobs parameter.\n\n It is built on top of the FreeSurfer BIDS app:\n\n https://github.com/BIDS-Apps/freesurfer\n\n You must have freesurfer available on your system.\n\n Examples\n --------\n run_freesurfer.py /path/to/bids/dataset/study-template-config.py /path/to/freesurfer_bids_app/\n\n or to run in parallel (3 subjects at a time):\n\n run_freesurfer.py /path/to/bids/dataset/study-template-config.py /path/to/freesurfer_bids_app/ --n_jobs=3\n\n \"\"\" # noqa\n\n logger.info('Running FreeSurfer')\n\n subjects = config.get_subjects()\n\n root_dir = config.bids_root\n subjects_dir = _get_subjects_dir(root_dir)\n subjects_dir.mkdir(parents=True, exist_ok=True)\n\n parallel, run_func, _ = parallel_func(run_recon, n_jobs=n_jobs)\n parallel(run_func(root_dir, subject, fs_bids_app)\n for subject in subjects)\n\n # Handle fsaverage\n fsaverage_dir = subjects_dir / 'fsaverage'\n if fsaverage_dir.exists():\n if fsaverage_dir.is_symlink():\n fsaverage_dir.unlink()\n else:\n shutil.rmtree(fsaverage_dir)\n\n env = os.environ\n shutil.copytree(f\"{env['FREESURFER_HOME']}/subjects/fsaverage\",\n subjects_dir / 'fsaverage')\n\n\nif __name__ == '__main__':\n fire.Fire(main)\n", "path": "scripts/freesurfer/recon_all.py" } ]
diff --git a/scripts/freesurfer/recon_all.py b/scripts/freesurfer/recon_all.py index 311be181c..f9186c9ae 100755 --- a/scripts/freesurfer/recon_all.py +++ b/scripts/freesurfer/recon_all.py @@ -60,7 +60,7 @@ def run_recon(root_dir, subject, fs_bids_app) -> None: run_subprocess(cmd, env=env, verbose=logger.level) -def main(n_jobs: int = 1) -> None: +def main(*, n_jobs: int = 1) -> None: """Run freesurfer recon-all command on BIDS dataset. The command allows to run the freesurfer recon-all
ephios-dev__ephios-384
Cannot delete section As a planner, I cannot delete an existing section from a shift with the section_based signup method
[ { "content": "import uuid\nfrom functools import cached_property\nfrom itertools import groupby\nfrom operator import itemgetter\n\nfrom django import forms\nfrom django.contrib import messages\nfrom django.core.exceptions import ValidationError\nfrom django.shortcuts import redirect\nfrom django.template.loader import get_template\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import FormView\nfrom django_select2.forms import Select2MultipleWidget\nfrom dynamic_preferences.registries import global_preferences_registry\n\nfrom ephios.core.models import AbstractParticipation, Qualification\nfrom ephios.core.signup import (\n AbstractParticipant,\n BaseDispositionParticipationForm,\n BaseSignupMethod,\n BaseSignupView,\n ParticipationError,\n)\n\n\ndef sections_participant_qualifies_for(sections, participant: AbstractParticipant):\n available_qualification_ids = set(q.id for q in participant.collect_all_qualifications())\n return [\n section\n for section in sections\n if set(section[\"qualifications\"]) <= available_qualification_ids\n ]\n\n\nclass SectionBasedDispositionParticipationForm(BaseDispositionParticipationForm):\n disposition_participation_template = \"basesignup/section_based/fragment_participant.html\"\n\n section = forms.ChoiceField(\n label=_(\"Section\"),\n required=False, # only required if participation is confirmed\n widget=forms.Select(\n attrs={\"data-show-for-state\": str(AbstractParticipation.States.CONFIRMED)}\n ),\n )\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n sections = self.shift.signup_method.configuration.sections\n qualified_sections = list(\n sections_participant_qualifies_for(\n sections,\n self.instance.participant,\n )\n )\n unqualified_sections = [\n section for section in sections if section not in qualified_sections\n ]\n self.fields[\"section\"].choices = [(\"\", \"---\")]\n if qualified_sections:\n self.fields[\"section\"].choices += [\n (\n _(\"qualified\"),\n [(section[\"uuid\"], section[\"title\"]) for section in qualified_sections],\n )\n ]\n if unqualified_sections:\n self.fields[\"section\"].choices += [\n (\n _(\"unqualified\"),\n [(section[\"uuid\"], section[\"title\"]) for section in unqualified_sections],\n )\n ]\n if preferred_section_uuid := self.instance.data.get(\"preferred_section_uuid\"):\n self.fields[\"section\"].initial = preferred_section_uuid\n self.preferred_section = next(\n filter(lambda section: section[\"uuid\"] == preferred_section_uuid, sections), None\n )\n if initial := self.instance.data.get(\"dispatched_section_uuid\"):\n self.fields[\"section\"].initial = initial\n\n def clean(self):\n super().clean()\n if (\n self.cleaned_data[\"state\"] == AbstractParticipation.States.CONFIRMED\n and not self.cleaned_data[\"section\"]\n ):\n self.add_error(\n \"section\",\n ValidationError(_(\"You must select a section when confirming a participation.\")),\n )\n\n def save(self, commit=True):\n self.instance.data[\"dispatched_section_uuid\"] = self.cleaned_data[\"section\"]\n super().save(commit)\n\n\nclass SectionForm(forms.Form):\n title = forms.CharField(label=_(\"Title\"), required=True)\n qualifications = forms.ModelMultipleChoiceField(\n label=_(\"Required Qualifications\"),\n queryset=Qualification.objects.all(),\n widget=Select2MultipleWidget,\n required=False,\n )\n min_count = forms.IntegerField(label=_(\"min amount\"), min_value=0, required=True)\n uuid = forms.CharField(widget=forms.HiddenInput, required=False)\n\n def clean_uuid(self):\n return self.cleaned_data.get(\"uuid\") or uuid.uuid4()\n\n\nSectionsFormset = forms.formset_factory(\n SectionForm, can_delete=True, min_num=1, validate_min=1, extra=0\n)\n\n\nclass SectionBasedConfigurationForm(forms.Form):\n def __init__(self, data=None, **kwargs):\n super().__init__(data, **kwargs)\n self.sections_formset = SectionsFormset(\n data=data,\n initial=self.initial.get(\"sections\", list()),\n prefix=\"sections\",\n )\n\n def clean_sections(self):\n if not self.sections_formset.is_valid():\n raise ValidationError(_(\"The sections aren't configured correctly.\"))\n\n sections = [\n {\n key: form.cleaned_data[key]\n for key in (\"title\", \"qualifications\", \"min_count\", \"uuid\")\n }\n for form in self.sections_formset\n ]\n return sections\n\n\nclass SectionSignupForm(forms.Form):\n section = forms.ChoiceField(\n label=_(\"Preferred Section\"),\n widget=forms.RadioSelect,\n required=False,\n # choices are set as (uuid, title) of section\n )\n\n\nclass SectionBasedSignupView(FormView, BaseSignupView):\n template_name = \"basesignup/section_based/signup.html\"\n\n @cached_property\n def sections_participant_qualifies_for(self):\n return sections_participant_qualifies_for(\n self.method.configuration.sections, self.participant\n )\n\n def get_form(self, form_class=None):\n form = SectionSignupForm(self.request.POST)\n form.fields[\"section\"].choices = [\n (section[\"uuid\"], section[\"title\"])\n for section in self.sections_participant_qualifies_for\n ]\n return form\n\n def get_context_data(self, **kwargs):\n kwargs.setdefault(\"shift\", self.shift)\n kwargs.setdefault(\n \"unqualified_sections\",\n [\n section[\"title\"]\n for section in self.method.configuration.sections\n if section not in self.sections_participant_qualifies_for\n ],\n )\n return super().get_context_data(**kwargs)\n\n def form_valid(self, form):\n return super().signup_pressed(preferred_section_uuid=form.cleaned_data.get(\"section\"))\n\n def signup_pressed(self, **kwargs):\n if not self.method.configuration.choose_preferred_section:\n # do straight signup if choosing is not enabled\n return super().signup_pressed(**kwargs)\n\n if not self.method.can_sign_up(self.participant):\n # redirect a misled request\n messages.warning(self.request, _(\"You can not sign up for this shift.\"))\n return redirect(self.participant.reverse_event_detail(self.shift.event))\n\n # all good, redirect to the form\n return redirect(self.participant.reverse_signup_action(self.shift))\n\n\nclass SectionBasedSignupMethod(BaseSignupMethod):\n slug = \"section_based\"\n verbose_name = _(\"Apply for sections\")\n description = _(\n \"\"\"This method lets you define sections for which people can choose from.\n Sections contain qualifications that helpers need to fulfil.\"\"\"\n )\n registration_button_text = _(\"Request\")\n signup_success_message = _(\"You have successfully requested a participation for {shift}.\")\n signup_error_message = _(\"Requesting a participation failed: {error}\")\n\n configuration_form_class = SectionBasedConfigurationForm\n signup_view_class = SectionBasedSignupView\n\n disposition_participation_form_class = SectionBasedDispositionParticipationForm\n\n def get_configuration_fields(self):\n return {\n **super().get_configuration_fields(),\n \"choose_preferred_section\": {\n \"formfield\": forms.BooleanField(\n label=_(\"Ask participants for a preferred section\"),\n help_text=_(\"This only makes sense if you configure multiple sections.\"),\n widget=forms.CheckboxInput,\n required=False,\n ),\n \"default\": False,\n },\n \"sections\": {\n \"formfield\": forms.Field(\n label=_(\"Structure\"),\n widget=forms.HiddenInput,\n required=False,\n ),\n \"default\": [],\n },\n }\n\n def get_participant_count_bounds(self):\n return sum(section.get(\"min_count\") or 0 for section in self.configuration.sections), None\n\n @staticmethod\n def check_qualification(method, participant):\n if not sections_participant_qualifies_for(method.configuration.sections, participant):\n return ParticipationError(_(\"You are not qualified.\"))\n\n @property\n def _signup_checkers(self):\n return super()._signup_checkers + [self.check_qualification]\n\n # pylint: disable=arguments-differ\n def _configure_participation(\n self, participation: AbstractParticipation, preferred_section_uuid=None, **kwargs\n ) -> AbstractParticipation:\n participation.data[\"preferred_section_uuid\"] = preferred_section_uuid\n if preferred_section_uuid:\n # reset dispatch decision, as that would have overwritten the preferred choice\n participation.data[\"dispatched_section_uuid\"] = None\n participation.state = AbstractParticipation.States.REQUESTED\n return participation\n\n def render_configuration_form(self, *args, form=None, **kwargs):\n form = form or self.get_configuration_form(*args, **kwargs)\n template = get_template(\"basesignup/section_based/configuration_form.html\").render(\n {\"form\": form}\n )\n return template\n\n def _get_sections_with_users(self):\n relevant_qualification_categories = global_preferences_registry.manager()[\n \"general__relevant_qualification_categories\"\n ]\n section_by_uuid = {section[\"uuid\"]: section for section in self.configuration.sections}\n # get name and preferred section uuid for confirmed participants\n # if they have a section assigned and we have that section on record\n confirmed_participations = [\n {\n \"name\": str(participation.participant),\n \"relevant_qualifications\": \", \".join(\n participation.participant.qualifications.filter(\n category__in=relevant_qualification_categories\n ).values_list(\"abbreviation\", flat=True)\n ),\n \"uuid\": dispatched_section_uuid,\n }\n for participation in self.shift.participations.filter(\n state=AbstractParticipation.States.CONFIRMED\n )\n if (dispatched_section_uuid := participation.data.get(\"dispatched_section_uuid\"))\n and dispatched_section_uuid in section_by_uuid\n ]\n # group by section and do some stats\n sections_with_users = [\n (\n section_by_uuid.pop(uuid),\n [[user[\"name\"], user[\"relevant_qualifications\"]] for user in group],\n )\n for uuid, group in groupby(\n sorted(confirmed_participations, key=itemgetter(\"uuid\")), itemgetter(\"uuid\")\n )\n ]\n # add sections without participants\n sections_with_users += [(section, None) for section in section_by_uuid.values()]\n return sections_with_users\n\n def render_shift_state(self, request):\n return get_template(\"basesignup/section_based/fragment_state.html\").render(\n {\n \"shift\": self.shift,\n \"requested_participations\": (\n self.shift.participations.filter(state=AbstractParticipation.States.REQUESTED)\n ),\n \"sections_with_users\": self._get_sections_with_users(),\n \"disposition_url\": (\n reverse(\n \"core:shift_disposition\",\n kwargs=dict(pk=self.shift.pk),\n )\n if request.user.has_perm(\"core.change_event\", obj=self.shift.event)\n else None\n ),\n }\n )\n\n def get_participation_display(self):\n confirmed_sections_with_users = self._get_sections_with_users()\n participation_display = []\n for section, users in confirmed_sections_with_users:\n if users:\n participation_display += [[user[0], user[1], section[\"title\"]] for user in users]\n if not users or len(users) < section[\"min_count\"]:\n required_qualifications = \", \".join(\n Qualification.objects.filter(pk__in=section[\"qualifications\"]).values_list(\n \"abbreviation\", flat=True\n )\n )\n participation_display += [[\"\", required_qualifications, section[\"title\"]]] * (\n section[\"min_count\"] - (len(users) if users else 0)\n )\n return participation_display\n", "path": "ephios/plugins/basesignup/signup/section_based.py" } ]
[ { "content": "import uuid\nfrom functools import cached_property\nfrom itertools import groupby\nfrom operator import itemgetter\n\nfrom django import forms\nfrom django.contrib import messages\nfrom django.core.exceptions import ValidationError\nfrom django.shortcuts import redirect\nfrom django.template.loader import get_template\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import FormView\nfrom django_select2.forms import Select2MultipleWidget\nfrom dynamic_preferences.registries import global_preferences_registry\n\nfrom ephios.core.models import AbstractParticipation, Qualification\nfrom ephios.core.signup import (\n AbstractParticipant,\n BaseDispositionParticipationForm,\n BaseSignupMethod,\n BaseSignupView,\n ParticipationError,\n)\n\n\ndef sections_participant_qualifies_for(sections, participant: AbstractParticipant):\n available_qualification_ids = set(q.id for q in participant.collect_all_qualifications())\n return [\n section\n for section in sections\n if set(section[\"qualifications\"]) <= available_qualification_ids\n ]\n\n\nclass SectionBasedDispositionParticipationForm(BaseDispositionParticipationForm):\n disposition_participation_template = \"basesignup/section_based/fragment_participant.html\"\n\n section = forms.ChoiceField(\n label=_(\"Section\"),\n required=False, # only required if participation is confirmed\n widget=forms.Select(\n attrs={\"data-show-for-state\": str(AbstractParticipation.States.CONFIRMED)}\n ),\n )\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n sections = self.shift.signup_method.configuration.sections\n qualified_sections = list(\n sections_participant_qualifies_for(\n sections,\n self.instance.participant,\n )\n )\n unqualified_sections = [\n section for section in sections if section not in qualified_sections\n ]\n self.fields[\"section\"].choices = [(\"\", \"---\")]\n if qualified_sections:\n self.fields[\"section\"].choices += [\n (\n _(\"qualified\"),\n [(section[\"uuid\"], section[\"title\"]) for section in qualified_sections],\n )\n ]\n if unqualified_sections:\n self.fields[\"section\"].choices += [\n (\n _(\"unqualified\"),\n [(section[\"uuid\"], section[\"title\"]) for section in unqualified_sections],\n )\n ]\n if preferred_section_uuid := self.instance.data.get(\"preferred_section_uuid\"):\n self.fields[\"section\"].initial = preferred_section_uuid\n self.preferred_section = next(\n filter(lambda section: section[\"uuid\"] == preferred_section_uuid, sections), None\n )\n if initial := self.instance.data.get(\"dispatched_section_uuid\"):\n self.fields[\"section\"].initial = initial\n\n def clean(self):\n super().clean()\n if (\n self.cleaned_data[\"state\"] == AbstractParticipation.States.CONFIRMED\n and not self.cleaned_data[\"section\"]\n ):\n self.add_error(\n \"section\",\n ValidationError(_(\"You must select a section when confirming a participation.\")),\n )\n\n def save(self, commit=True):\n self.instance.data[\"dispatched_section_uuid\"] = self.cleaned_data[\"section\"]\n super().save(commit)\n\n\nclass SectionForm(forms.Form):\n title = forms.CharField(label=_(\"Title\"), required=True)\n qualifications = forms.ModelMultipleChoiceField(\n label=_(\"Required Qualifications\"),\n queryset=Qualification.objects.all(),\n widget=Select2MultipleWidget,\n required=False,\n )\n min_count = forms.IntegerField(label=_(\"min amount\"), min_value=0, required=True)\n uuid = forms.CharField(widget=forms.HiddenInput, required=False)\n\n def clean_uuid(self):\n return self.cleaned_data.get(\"uuid\") or uuid.uuid4()\n\n\nSectionsFormset = forms.formset_factory(\n SectionForm, can_delete=True, min_num=1, validate_min=1, extra=0\n)\n\n\nclass SectionBasedConfigurationForm(forms.Form):\n def __init__(self, data=None, **kwargs):\n super().__init__(data, **kwargs)\n self.sections_formset = SectionsFormset(\n data=data,\n initial=self.initial.get(\"sections\", list()),\n prefix=\"sections\",\n )\n\n def clean_sections(self):\n if not self.sections_formset.is_valid():\n raise ValidationError(_(\"The sections aren't configured correctly.\"))\n\n sections = [\n {\n key: form.cleaned_data[key]\n for key in (\"title\", \"qualifications\", \"min_count\", \"uuid\")\n }\n for form in self.sections_formset\n if not form.cleaned_data.get(\"DELETE\")\n ]\n return sections\n\n\nclass SectionSignupForm(forms.Form):\n section = forms.ChoiceField(\n label=_(\"Preferred Section\"),\n widget=forms.RadioSelect,\n required=False,\n # choices are set as (uuid, title) of section\n )\n\n\nclass SectionBasedSignupView(FormView, BaseSignupView):\n template_name = \"basesignup/section_based/signup.html\"\n\n @cached_property\n def sections_participant_qualifies_for(self):\n return sections_participant_qualifies_for(\n self.method.configuration.sections, self.participant\n )\n\n def get_form(self, form_class=None):\n form = SectionSignupForm(self.request.POST)\n form.fields[\"section\"].choices = [\n (section[\"uuid\"], section[\"title\"])\n for section in self.sections_participant_qualifies_for\n ]\n return form\n\n def get_context_data(self, **kwargs):\n kwargs.setdefault(\"shift\", self.shift)\n kwargs.setdefault(\n \"unqualified_sections\",\n [\n section[\"title\"]\n for section in self.method.configuration.sections\n if section not in self.sections_participant_qualifies_for\n ],\n )\n return super().get_context_data(**kwargs)\n\n def form_valid(self, form):\n return super().signup_pressed(preferred_section_uuid=form.cleaned_data.get(\"section\"))\n\n def signup_pressed(self, **kwargs):\n if not self.method.configuration.choose_preferred_section:\n # do straight signup if choosing is not enabled\n return super().signup_pressed(**kwargs)\n\n if not self.method.can_sign_up(self.participant):\n # redirect a misled request\n messages.warning(self.request, _(\"You can not sign up for this shift.\"))\n return redirect(self.participant.reverse_event_detail(self.shift.event))\n\n # all good, redirect to the form\n return redirect(self.participant.reverse_signup_action(self.shift))\n\n\nclass SectionBasedSignupMethod(BaseSignupMethod):\n slug = \"section_based\"\n verbose_name = _(\"Apply for sections\")\n description = _(\n \"\"\"This method lets you define sections for which people can choose from.\n Sections contain qualifications that helpers need to fulfil.\"\"\"\n )\n registration_button_text = _(\"Request\")\n signup_success_message = _(\"You have successfully requested a participation for {shift}.\")\n signup_error_message = _(\"Requesting a participation failed: {error}\")\n\n configuration_form_class = SectionBasedConfigurationForm\n signup_view_class = SectionBasedSignupView\n\n disposition_participation_form_class = SectionBasedDispositionParticipationForm\n\n def get_configuration_fields(self):\n return {\n **super().get_configuration_fields(),\n \"choose_preferred_section\": {\n \"formfield\": forms.BooleanField(\n label=_(\"Ask participants for a preferred section\"),\n help_text=_(\"This only makes sense if you configure multiple sections.\"),\n widget=forms.CheckboxInput,\n required=False,\n ),\n \"default\": False,\n },\n \"sections\": {\n \"formfield\": forms.Field(\n label=_(\"Structure\"),\n widget=forms.HiddenInput,\n required=False,\n ),\n \"default\": [],\n },\n }\n\n def get_participant_count_bounds(self):\n return sum(section.get(\"min_count\") or 0 for section in self.configuration.sections), None\n\n @staticmethod\n def check_qualification(method, participant):\n if not sections_participant_qualifies_for(method.configuration.sections, participant):\n return ParticipationError(_(\"You are not qualified.\"))\n\n @property\n def _signup_checkers(self):\n return super()._signup_checkers + [self.check_qualification]\n\n # pylint: disable=arguments-differ\n def _configure_participation(\n self, participation: AbstractParticipation, preferred_section_uuid=None, **kwargs\n ) -> AbstractParticipation:\n participation.data[\"preferred_section_uuid\"] = preferred_section_uuid\n if preferred_section_uuid:\n # reset dispatch decision, as that would have overwritten the preferred choice\n participation.data[\"dispatched_section_uuid\"] = None\n participation.state = AbstractParticipation.States.REQUESTED\n return participation\n\n def render_configuration_form(self, *args, form=None, **kwargs):\n form = form or self.get_configuration_form(*args, **kwargs)\n template = get_template(\"basesignup/section_based/configuration_form.html\").render(\n {\"form\": form}\n )\n return template\n\n def _get_sections_with_users(self):\n relevant_qualification_categories = global_preferences_registry.manager()[\n \"general__relevant_qualification_categories\"\n ]\n section_by_uuid = {section[\"uuid\"]: section for section in self.configuration.sections}\n # get name and preferred section uuid for confirmed participants\n # if they have a section assigned and we have that section on record\n confirmed_participations = [\n {\n \"name\": str(participation.participant),\n \"relevant_qualifications\": \", \".join(\n participation.participant.qualifications.filter(\n category__in=relevant_qualification_categories\n ).values_list(\"abbreviation\", flat=True)\n ),\n \"uuid\": dispatched_section_uuid,\n }\n for participation in self.shift.participations.filter(\n state=AbstractParticipation.States.CONFIRMED\n )\n if (dispatched_section_uuid := participation.data.get(\"dispatched_section_uuid\"))\n and dispatched_section_uuid in section_by_uuid\n ]\n # group by section and do some stats\n sections_with_users = [\n (\n section_by_uuid.pop(uuid),\n [[user[\"name\"], user[\"relevant_qualifications\"]] for user in group],\n )\n for uuid, group in groupby(\n sorted(confirmed_participations, key=itemgetter(\"uuid\")), itemgetter(\"uuid\")\n )\n ]\n # add sections without participants\n sections_with_users += [(section, None) for section in section_by_uuid.values()]\n return sections_with_users\n\n def render_shift_state(self, request):\n return get_template(\"basesignup/section_based/fragment_state.html\").render(\n {\n \"shift\": self.shift,\n \"requested_participations\": (\n self.shift.participations.filter(state=AbstractParticipation.States.REQUESTED)\n ),\n \"sections_with_users\": self._get_sections_with_users(),\n \"disposition_url\": (\n reverse(\n \"core:shift_disposition\",\n kwargs=dict(pk=self.shift.pk),\n )\n if request.user.has_perm(\"core.change_event\", obj=self.shift.event)\n else None\n ),\n }\n )\n\n def get_participation_display(self):\n confirmed_sections_with_users = self._get_sections_with_users()\n participation_display = []\n for section, users in confirmed_sections_with_users:\n if users:\n participation_display += [[user[0], user[1], section[\"title\"]] for user in users]\n if not users or len(users) < section[\"min_count\"]:\n required_qualifications = \", \".join(\n Qualification.objects.filter(pk__in=section[\"qualifications\"]).values_list(\n \"abbreviation\", flat=True\n )\n )\n participation_display += [[\"\", required_qualifications, section[\"title\"]]] * (\n section[\"min_count\"] - (len(users) if users else 0)\n )\n return participation_display\n", "path": "ephios/plugins/basesignup/signup/section_based.py" } ]
diff --git a/ephios/plugins/basesignup/signup/section_based.py b/ephios/plugins/basesignup/signup/section_based.py index 82a006e49..a99000858 100644 --- a/ephios/plugins/basesignup/signup/section_based.py +++ b/ephios/plugins/basesignup/signup/section_based.py @@ -134,6 +134,7 @@ def clean_sections(self): for key in ("title", "qualifications", "min_count", "uuid") } for form in self.sections_formset + if not form.cleaned_data.get("DELETE") ] return sections
nautobot__nautobot-2730
Changelog Filter "Object Type" - The results could not be loaded. <!-- NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED. This form is only for reporting reproducible bugs. If you need assistance with Nautobot installation, or if you have a general question, please start a discussion instead: https://github.com/nautobot/nautobot/discussions Please describe the environment in which you are running Nautobot. Be sure that you are running an unmodified instance of the latest stable release before submitting a bug report, and that any plugins have been disabled. --> ### Environment * Nautobot version (Docker tag too if applicable): d3bb49d5c396 (v1.4.7) * Python version: 3.9 * Database platform, version: postgres * Middleware(s): <!-- Describe in detail the exact steps that someone else can take to reproduce this bug using the current stable release of Nautobot. Begin with the creation of any necessary database objects and call out every operation being performed explicitly. If reporting a bug in the REST API, be sure to reconstruct the raw HTTP request(s) being made: Don't rely on a client library such as pynautobot. --> ### Steps to Reproduce 1. Open Changelog 2. Klick into "Object Type" on "Serach" Field 3. <!-- What did you expect to happen? --> ### Expected Behavior A list of object types should be displayed <!-- What happened instead? --> ### Observed Behavior ![image](https://user-images.githubusercontent.com/8406301/197801973-f02e2850-24f2-4198-a615-4f4d44de0258.png)
[ { "content": "import django_filters\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db.models import Q\nfrom django.forms import IntegerField\n\nfrom nautobot.dcim.models import DeviceRole, DeviceType, Location, Platform, Region, Site\nfrom nautobot.extras.utils import ChangeLoggedModelsQuery, FeatureQuery, TaggableClassesQuery\nfrom nautobot.tenancy.models import Tenant, TenantGroup\nfrom nautobot.utilities.constants import FILTER_CHAR_BASED_LOOKUP_MAP, FILTER_NUMERIC_BASED_LOOKUP_MAP\nfrom nautobot.utilities.filters import (\n BaseFilterSet,\n ContentTypeFilter,\n ContentTypeMultipleChoiceFilter,\n MultiValueCharFilter,\n MultiValueDateFilter,\n MultiValueNumberFilter,\n MultiValueUUIDFilter,\n NaturalKeyOrPKMultipleChoiceFilter,\n SearchFilter,\n TagFilter,\n)\nfrom nautobot.virtualization.models import Cluster, ClusterGroup\nfrom .choices import (\n CustomFieldFilterLogicChoices,\n CustomFieldTypeChoices,\n JobResultStatusChoices,\n RelationshipSideChoices,\n RelationshipTypeChoices,\n SecretsGroupAccessTypeChoices,\n SecretsGroupSecretTypeChoices,\n)\nfrom .models import (\n ComputedField,\n ConfigContext,\n ConfigContextSchema,\n CustomField,\n CustomFieldChoice,\n CustomLink,\n DynamicGroup,\n DynamicGroupMembership,\n ExportTemplate,\n GitRepository,\n GraphQLQuery,\n ImageAttachment,\n Job,\n JobHook,\n JobLogEntry,\n JobResult,\n Note,\n ObjectChange,\n Relationship,\n RelationshipAssociation,\n ScheduledJob,\n Secret,\n SecretsGroup,\n SecretsGroupAssociation,\n Status,\n Tag,\n Webhook,\n)\n\n\n__all__ = (\n \"ComputedFieldFilterSet\",\n \"ConfigContextFilterSet\",\n \"ContentTypeFilterSet\",\n \"CreatedUpdatedFilterSet\",\n \"CustomFieldBooleanFilter\",\n \"CustomFieldCharFilter\",\n \"CustomFieldDateFilter\",\n \"CustomFieldFilterMixin\",\n \"CustomFieldJSONFilter\",\n \"CustomFieldMultiSelectFilter\",\n \"CustomFieldMultiValueCharFilter\",\n \"CustomFieldMultiValueDateFilter\",\n \"CustomFieldMultiValueNumberFilter\",\n \"CustomFieldNumberFilter\",\n \"CustomFieldModelFilterSet\",\n \"CustomLinkFilterSet\",\n \"DynamicGroupFilterSet\",\n \"DynamicGroupMembershipFilterSet\",\n \"ExportTemplateFilterSet\",\n \"GitRepositoryFilterSet\",\n \"GraphQLQueryFilterSet\",\n \"ImageAttachmentFilterSet\",\n \"JobFilterSet\",\n \"JobLogEntryFilterSet\",\n \"JobResultFilterSet\",\n \"LocalContextFilterSet\",\n \"NautobotFilterSet\",\n \"NoteFilterSet\",\n \"ObjectChangeFilterSet\",\n \"RelationshipFilterSet\",\n \"RelationshipAssociationFilterSet\",\n \"ScheduledJobFilterSet\",\n \"SecretFilterSet\",\n \"SecretsGroupFilterSet\",\n \"SecretsGroupAssociationFilterSet\",\n \"StatusFilter\",\n \"StatusFilterSet\",\n \"StatusModelFilterSetMixin\",\n \"TagFilterSet\",\n \"WebhookFilterSet\",\n)\n\n\n#\n# Mixins\n#\n\n\n# TODO: should be CreatedUpdatedFilterSetMixin.\nclass CreatedUpdatedFilterSet(django_filters.FilterSet):\n created = django_filters.DateFilter()\n created__gte = django_filters.DateFilter(field_name=\"created\", lookup_expr=\"gte\")\n created__lte = django_filters.DateFilter(field_name=\"created\", lookup_expr=\"lte\")\n last_updated = django_filters.DateTimeFilter()\n last_updated__gte = django_filters.DateTimeFilter(field_name=\"last_updated\", lookup_expr=\"gte\")\n last_updated__lte = django_filters.DateTimeFilter(field_name=\"last_updated\", lookup_expr=\"lte\")\n\n\nclass RelationshipFilter(django_filters.ModelMultipleChoiceFilter):\n \"\"\"\n Filter objects by the presence of associations on a given Relationship.\n \"\"\"\n\n def __init__(self, side, relationship=None, queryset=None, qs=None, *args, **kwargs):\n self.relationship = relationship\n self.qs = qs\n self.side = side\n super().__init__(queryset=queryset, *args, **kwargs)\n\n def filter(self, qs, value):\n value = [entry.id for entry in value]\n # Check if value is empty or a DynamicChoiceField that is empty.\n if not value or \"\" in value:\n # if value is empty we return the entire unmodified queryset\n return qs\n else:\n if self.side == \"source\":\n values = RelationshipAssociation.objects.filter(\n destination_id__in=value,\n source_type=self.relationship.source_type,\n relationship=self.relationship,\n ).values_list(\"source_id\", flat=True)\n elif self.side == \"destination\":\n values = RelationshipAssociation.objects.filter(\n source_id__in=value,\n destination_type=self.relationship.destination_type,\n relationship=self.relationship,\n ).values_list(\"destination_id\", flat=True)\n else:\n destinations = RelationshipAssociation.objects.filter(\n source_id__in=value,\n destination_type=self.relationship.destination_type,\n relationship=self.relationship,\n ).values_list(\"destination_id\", flat=True)\n\n sources = RelationshipAssociation.objects.filter(\n destination_id__in=value,\n source_type=self.relationship.source_type,\n relationship=self.relationship,\n ).values_list(\"source_id\", flat=True)\n\n values = list(destinations) + list(sources)\n qs &= self.get_method(self.qs)(Q(**{\"id__in\": values}))\n return qs\n\n\nclass RelationshipModelFilterSet(django_filters.FilterSet):\n \"\"\"\n Filterset for applicable to the parent model.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.obj_type = ContentType.objects.get_for_model(self._meta.model)\n super().__init__(*args, **kwargs)\n self.relationships = []\n self._append_relationships(model=self._meta.model)\n\n def _append_relationships(self, model):\n \"\"\"\n Append form fields for all Relationships assigned to this model.\n \"\"\"\n source_relationships = Relationship.objects.filter(source_type=self.obj_type, source_hidden=False)\n self._append_relationships_side(source_relationships, RelationshipSideChoices.SIDE_SOURCE, model)\n\n dest_relationships = Relationship.objects.filter(destination_type=self.obj_type, destination_hidden=False)\n self._append_relationships_side(dest_relationships, RelationshipSideChoices.SIDE_DESTINATION, model)\n\n def _append_relationships_side(self, relationships, initial_side, model):\n \"\"\"\n Helper method to _append_relationships, for processing one \"side\" of the relationships for this model.\n \"\"\"\n for relationship in relationships:\n if relationship.symmetric:\n side = RelationshipSideChoices.SIDE_PEER\n else:\n side = initial_side\n peer_side = RelationshipSideChoices.OPPOSITE[side]\n\n # If this model is on the \"source\" side of the relationship, then the field will be named\n # \"cr_<relationship-slug>__destination\" since it's used to pick the destination object(s).\n # If we're on the \"destination\" side, the field will be \"cr_<relationship-slug>__source\".\n # For a symmetric relationship, both sides are \"peer\", so the field will be \"cr_<relationship-slug>__peer\"\n field_name = f\"cr_{relationship.slug}__{peer_side}\"\n\n if field_name in self.relationships:\n # This is a symmetric relationship that we already processed from the opposing \"initial_side\".\n # No need to process it a second time!\n continue\n if peer_side == \"source\":\n choice_model = relationship.source_type.model_class()\n elif peer_side == \"destination\":\n choice_model = relationship.destination_type.model_class()\n else:\n choice_model = model\n # Check for invalid_relationship unit test\n if choice_model:\n self.filters[field_name] = RelationshipFilter(\n relationship=relationship,\n side=side,\n field_name=field_name,\n queryset=choice_model.objects.all(),\n qs=model.objects.all(),\n )\n self.relationships.append(field_name)\n\n\n#\n# Computed Fields\n#\n\n\nclass ComputedFieldFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"label\": \"icontains\",\n \"description\": \"icontains\",\n \"content_type__app_label\": \"icontains\",\n \"content_type__model\": \"icontains\",\n \"template\": \"icontains\",\n \"fallback_value\": \"icontains\",\n },\n )\n content_type = ContentTypeFilter()\n\n class Meta:\n model = ComputedField\n fields = (\n \"content_type\",\n \"slug\",\n \"template\",\n \"fallback_value\",\n \"weight\",\n )\n\n\n#\n# Config Contexts\n#\n\n\nclass ConfigContextFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"description\": \"icontains\",\n \"data\": \"icontains\",\n },\n )\n owner_content_type = ContentTypeFilter()\n region_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"regions\",\n queryset=Region.objects.all(),\n label=\"Region\",\n )\n region = django_filters.ModelMultipleChoiceFilter(\n field_name=\"regions__slug\",\n queryset=Region.objects.all(),\n to_field_name=\"slug\",\n label=\"Region (slug)\",\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"sites\",\n queryset=Site.objects.all(),\n label=\"Site\",\n )\n site = django_filters.ModelMultipleChoiceFilter(\n field_name=\"sites__slug\",\n queryset=Site.objects.all(),\n to_field_name=\"slug\",\n label=\"Site (slug)\",\n )\n location_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"locations\",\n queryset=Location.objects.all(),\n label=\"Location (ID)\",\n )\n location = django_filters.ModelMultipleChoiceFilter(\n field_name=\"locations__slug\",\n queryset=Location.objects.all(),\n label=\"Location (slug)\",\n )\n role_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"roles\",\n queryset=DeviceRole.objects.all(),\n label=\"Role\",\n )\n role = django_filters.ModelMultipleChoiceFilter(\n field_name=\"roles__slug\",\n queryset=DeviceRole.objects.all(),\n to_field_name=\"slug\",\n label=\"Role (slug)\",\n )\n device_type_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"device_types\",\n queryset=DeviceType.objects.all(),\n label=\"Device Type\",\n )\n device_type = django_filters.ModelMultipleChoiceFilter(\n field_name=\"device_types__slug\",\n queryset=DeviceType.objects.all(),\n to_field_name=\"slug\",\n label=\"Device Type (slug)\",\n )\n platform_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"platforms\",\n queryset=Platform.objects.all(),\n label=\"Platform\",\n )\n platform = django_filters.ModelMultipleChoiceFilter(\n field_name=\"platforms__slug\",\n queryset=Platform.objects.all(),\n to_field_name=\"slug\",\n label=\"Platform (slug)\",\n )\n cluster_group_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"cluster_groups\",\n queryset=ClusterGroup.objects.all(),\n label=\"Cluster group\",\n )\n cluster_group = django_filters.ModelMultipleChoiceFilter(\n field_name=\"cluster_groups__slug\",\n queryset=ClusterGroup.objects.all(),\n to_field_name=\"slug\",\n label=\"Cluster group (slug)\",\n )\n cluster_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"clusters\",\n queryset=Cluster.objects.all(),\n label=\"Cluster\",\n )\n tenant_group_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"tenant_groups\",\n queryset=TenantGroup.objects.all(),\n label=\"Tenant group\",\n )\n tenant_group = django_filters.ModelMultipleChoiceFilter(\n field_name=\"tenant_groups__slug\",\n queryset=TenantGroup.objects.all(),\n to_field_name=\"slug\",\n label=\"Tenant group (slug)\",\n )\n tenant_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"tenants\",\n queryset=Tenant.objects.all(),\n label=\"Tenant\",\n )\n tenant = django_filters.ModelMultipleChoiceFilter(\n field_name=\"tenants__slug\",\n queryset=Tenant.objects.all(),\n to_field_name=\"slug\",\n label=\"Tenant (slug)\",\n )\n tag = django_filters.ModelMultipleChoiceFilter(\n field_name=\"tags__slug\",\n queryset=Tag.objects.all(),\n to_field_name=\"slug\",\n label=\"Tag (slug)\",\n )\n\n class Meta:\n model = ConfigContext\n fields = [\"id\", \"name\", \"is_active\", \"owner_content_type\", \"owner_object_id\"]\n\n\n#\n# Filter for config context schema\n#\n\n\nclass ConfigContextSchemaFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"description\": \"icontains\",\n \"data_schema\": \"icontains\",\n },\n )\n owner_content_type = ContentTypeFilter()\n\n class Meta:\n model = ConfigContextSchema\n fields = [\n \"id\",\n \"name\",\n \"description\",\n ]\n\n\n#\n# ContentTypes\n#\n\n\nclass ContentTypeFilterSet(BaseFilterSet):\n class Meta:\n model = ContentType\n fields = [\"id\", \"app_label\", \"model\"]\n\n\n#\n# Custom Fields\n#\n\n\nEXACT_FILTER_TYPES = (\n CustomFieldTypeChoices.TYPE_BOOLEAN,\n CustomFieldTypeChoices.TYPE_DATE,\n CustomFieldTypeChoices.TYPE_INTEGER,\n CustomFieldTypeChoices.TYPE_SELECT,\n CustomFieldTypeChoices.TYPE_MULTISELECT,\n)\n\n\nclass CustomFieldFilterMixin:\n \"\"\"\n Filter mixin for CustomField to handle CustomField.filter_logic setting\n and queryset.exclude filtering specific to the JSONField where CustomField data is stored.\n \"\"\"\n\n def __init__(self, custom_field, *args, **kwargs):\n self.custom_field = custom_field\n if custom_field.type not in EXACT_FILTER_TYPES:\n if custom_field.filter_logic == CustomFieldFilterLogicChoices.FILTER_LOOSE:\n kwargs.setdefault(\"lookup_expr\", \"icontains\")\n super().__init__(*args, **kwargs)\n self.field_name = f\"_custom_field_data__{self.field_name}\"\n\n def filter(self, qs, value):\n if value in django_filters.constants.EMPTY_VALUES:\n return qs\n\n if value == \"null\":\n return self.get_method(qs)(\n Q(**{f\"{self.field_name}__exact\": None}) | Q(**{f\"{self.field_name}__isnull\": True})\n )\n\n # Custom fields require special handling for exclude filtering.\n # Return custom fields that don't match the value and null custom fields\n if self.exclude:\n qs_null_custom_fields = qs.filter(**{f\"{self.field_name}__isnull\": True}).distinct()\n return super().filter(qs, value) | qs_null_custom_fields\n\n return super().filter(qs, value)\n\n\nclass CustomFieldBooleanFilter(CustomFieldFilterMixin, django_filters.BooleanFilter):\n \"\"\"Custom field single value filter for backwards compatibility\"\"\"\n\n\nclass CustomFieldCharFilter(CustomFieldFilterMixin, django_filters.Filter):\n \"\"\"Custom field single value filter for backwards compatibility\"\"\"\n\n\nclass CustomFieldDateFilter(CustomFieldFilterMixin, django_filters.DateFilter):\n \"\"\"Custom field single value filter for backwards compatibility\"\"\"\n\n\nclass CustomFieldJSONFilter(CustomFieldFilterMixin, django_filters.Filter):\n \"\"\"Custom field single value filter for backwards compatibility\"\"\"\n\n\nclass CustomFieldMultiSelectFilter(CustomFieldFilterMixin, django_filters.Filter):\n \"\"\"Custom field single value filter for backwards compatibility\"\"\"\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault(\"lookup_expr\", \"contains\")\n super().__init__(*args, **kwargs)\n\n\nclass CustomFieldNumberFilter(CustomFieldFilterMixin, django_filters.Filter):\n \"\"\"Custom field single value filter for backwards compatibility\"\"\"\n\n field_class = IntegerField\n\n\nclass CustomFieldMultiValueCharFilter(CustomFieldFilterMixin, MultiValueCharFilter):\n \"\"\"Custom field multi value char filter for extended lookup expressions\"\"\"\n\n\nclass CustomFieldMultiValueDateFilter(CustomFieldFilterMixin, MultiValueDateFilter):\n \"\"\"Custom field multi value date filter for extended lookup expressions\"\"\"\n\n\nclass CustomFieldMultiValueNumberFilter(CustomFieldFilterMixin, MultiValueNumberFilter):\n \"\"\"Custom field multi value number filter for extended lookup expressions\"\"\"\n\n\n# TODO: should be CustomFieldModelFilterSetMixin\nclass CustomFieldModelFilterSet(django_filters.FilterSet):\n \"\"\"\n Dynamically add a Filter for each CustomField applicable to the parent model. Add filters for\n extra lookup expressions on supported CustomField types.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n custom_field_filter_classes = {\n CustomFieldTypeChoices.TYPE_DATE: CustomFieldDateFilter,\n CustomFieldTypeChoices.TYPE_BOOLEAN: CustomFieldBooleanFilter,\n CustomFieldTypeChoices.TYPE_INTEGER: CustomFieldNumberFilter,\n CustomFieldTypeChoices.TYPE_JSON: CustomFieldJSONFilter,\n CustomFieldTypeChoices.TYPE_MULTISELECT: CustomFieldMultiSelectFilter,\n }\n\n custom_fields = CustomField.objects.filter(\n content_types=ContentType.objects.get_for_model(self._meta.model)\n ).exclude(filter_logic=CustomFieldFilterLogicChoices.FILTER_DISABLED)\n for cf in custom_fields:\n # Determine filter class for this CustomField type, default to CustomFieldBaseFilter\n # 2.0 TODO: #824 use cf.slug instead\n new_filter_name = f\"cf_{cf.name}\"\n filter_class = custom_field_filter_classes.get(cf.type, CustomFieldCharFilter)\n new_filter_field = filter_class(field_name=cf.name, custom_field=cf)\n\n # Create base filter (cf_customfieldname)\n self.filters[new_filter_name] = new_filter_field\n\n # Create extra lookup expression filters (cf_customfieldname__lookup_expr)\n self.filters.update(\n self._generate_custom_field_lookup_expression_filters(filter_name=new_filter_name, custom_field=cf)\n )\n\n @staticmethod\n def _get_custom_field_filter_lookup_dict(filter_type):\n # Choose the lookup expression map based on the filter type\n if issubclass(filter_type, (CustomFieldMultiValueNumberFilter, CustomFieldMultiValueDateFilter)):\n lookup_map = FILTER_NUMERIC_BASED_LOOKUP_MAP\n else:\n lookup_map = FILTER_CHAR_BASED_LOOKUP_MAP\n\n return lookup_map\n\n # TODO 2.0: Transition CustomField filters to nautobot.utilities.filters.MultiValue* filters and\n # leverage BaseFilterSet to add dynamic lookup expression filters. Remove CustomField.filter_logic field\n @classmethod\n def _generate_custom_field_lookup_expression_filters(cls, filter_name, custom_field):\n \"\"\"\n For specific filter types, new filters are created based on defined lookup expressions in\n the form `<field_name>__<lookup_expr>`. Copied from nautobot.utilities.filters.BaseFilterSet\n and updated to work with custom fields.\n \"\"\"\n magic_filters = {}\n custom_field_type_to_filter_map = {\n CustomFieldTypeChoices.TYPE_DATE: CustomFieldMultiValueDateFilter,\n CustomFieldTypeChoices.TYPE_INTEGER: CustomFieldMultiValueNumberFilter,\n CustomFieldTypeChoices.TYPE_SELECT: CustomFieldMultiValueCharFilter,\n CustomFieldTypeChoices.TYPE_TEXT: CustomFieldMultiValueCharFilter,\n CustomFieldTypeChoices.TYPE_URL: CustomFieldMultiValueCharFilter,\n }\n\n if custom_field.type in custom_field_type_to_filter_map:\n filter_type = custom_field_type_to_filter_map[custom_field.type]\n else:\n return magic_filters\n\n # Choose the lookup expression map based on the filter type\n lookup_map = cls._get_custom_field_filter_lookup_dict(filter_type)\n\n # Create new filters for each lookup expression in the map\n for lookup_name, lookup_expr in lookup_map.items():\n new_filter_name = f\"{filter_name}__{lookup_name}\"\n new_filter = filter_type(\n field_name=custom_field.name,\n lookup_expr=lookup_expr,\n custom_field=custom_field,\n exclude=lookup_name.startswith(\"n\"),\n )\n\n magic_filters[new_filter_name] = new_filter\n\n return magic_filters\n\n\nclass CustomFieldFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"label\": \"icontains\",\n \"description\": \"icontains\",\n },\n )\n content_types = ContentTypeMultipleChoiceFilter(\n choices=FeatureQuery(\"custom_fields\").get_choices,\n )\n\n class Meta:\n model = CustomField\n fields = [\"id\", \"content_types\", \"name\", \"required\", \"filter_logic\", \"weight\"]\n\n\nclass CustomFieldChoiceFilterSet(BaseFilterSet):\n q = SearchFilter(filter_predicates={\"value\": \"icontains\"})\n field_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"field\",\n queryset=CustomField.objects.all(),\n label=\"Field\",\n )\n field = django_filters.ModelMultipleChoiceFilter(\n field_name=\"field__name\",\n queryset=CustomField.objects.all(),\n to_field_name=\"name\",\n label=\"Field (name)\",\n )\n\n class Meta:\n model = CustomFieldChoice\n fields = [\"id\", \"value\", \"weight\"]\n\n\n#\n# Nautobot base filterset to use for most custom filterset classes.\n#\n\n\nclass NautobotFilterSet(BaseFilterSet, CreatedUpdatedFilterSet, RelationshipModelFilterSet, CustomFieldModelFilterSet):\n \"\"\"\n This class exists to combine common functionality and is used as a base class throughout the\n codebase where all of BaseFilterSet, CreatedUpdatedFilterSet, RelationshipModelFilterSet and CustomFieldModelFilterSet\n are needed.\n \"\"\"\n\n\n#\n# Custom Links\n#\n\n\nclass CustomLinkFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"target_url\": \"icontains\",\n \"text\": \"icontains\",\n \"content_type__app_label\": \"icontains\",\n \"content_type__model\": \"icontains\",\n },\n )\n content_type = ContentTypeFilter()\n\n class Meta:\n model = CustomLink\n fields = (\n \"content_type\",\n \"name\",\n \"text\",\n \"target_url\",\n \"weight\",\n \"group_name\",\n \"button_class\",\n \"new_window\",\n )\n\n\n#\n# Dynamic Groups\n#\n\n\nclass DynamicGroupFilterSet(NautobotFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"slug\": \"icontains\",\n \"description\": \"icontains\",\n \"content_type__app_label\": \"icontains\",\n \"content_type__model\": \"icontains\",\n },\n )\n content_type = ContentTypeMultipleChoiceFilter(choices=FeatureQuery(\"dynamic_groups\").get_choices, conjoined=False)\n\n class Meta:\n model = DynamicGroup\n fields = (\"id\", \"name\", \"slug\", \"description\")\n\n\nclass DynamicGroupMembershipFilterSet(NautobotFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"operator\": \"icontains\",\n \"group__name\": \"icontains\",\n \"group__slug\": \"icontains\",\n \"parent_group__name\": \"icontains\",\n \"parent_group__slug\": \"icontains\",\n },\n )\n group = NaturalKeyOrPKMultipleChoiceFilter(\n queryset=DynamicGroup.objects.all(),\n label=\"Group (slug or ID)\",\n )\n parent_group = NaturalKeyOrPKMultipleChoiceFilter(\n queryset=DynamicGroup.objects.all(),\n label=\"Parent Group (slug or ID)\",\n )\n\n class Meta:\n model = DynamicGroupMembership\n fields = (\"id\", \"group\", \"parent_group\", \"operator\", \"weight\")\n\n\n#\n# Export Templates\n#\n\n\nclass ExportTemplateFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"owner_content_type__app_label\": \"icontains\",\n \"owner_content_type__model\": \"icontains\",\n \"content_type__app_label\": \"icontains\",\n \"content_type__model\": \"icontains\",\n \"description\": \"icontains\",\n },\n )\n owner_content_type = ContentTypeFilter()\n\n class Meta:\n model = ExportTemplate\n fields = [\"id\", \"content_type\", \"owner_content_type\", \"owner_object_id\", \"name\"]\n\n\n#\n# Datasources (Git)\n#\n\n\nclass GitRepositoryFilterSet(NautobotFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"remote_url\": \"icontains\",\n \"branch\": \"icontains\",\n },\n )\n secrets_group_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"secrets_group\",\n queryset=SecretsGroup.objects.all(),\n label=\"Secrets group (ID)\",\n )\n secrets_group = django_filters.ModelMultipleChoiceFilter(\n field_name=\"secrets_group__slug\",\n queryset=SecretsGroup.objects.all(),\n to_field_name=\"slug\",\n label=\"Secrets group (slug)\",\n )\n tag = TagFilter()\n\n class Meta:\n model = GitRepository\n fields = [\"id\", \"name\", \"slug\", \"remote_url\", \"branch\", \"provided_contents\"]\n\n\n#\n# GraphQL Queries\n#\n\n\nclass GraphQLQueryFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"slug\": \"icontains\",\n \"query\": \"icontains\",\n },\n )\n\n class Meta:\n model = GraphQLQuery\n fields = [\"name\", \"slug\"]\n\n\n#\n# Image Attachments\n#\n\n\nclass ImageAttachmentFilterSet(BaseFilterSet):\n content_type = ContentTypeFilter()\n\n class Meta:\n model = ImageAttachment\n fields = [\"id\", \"content_type_id\", \"object_id\", \"name\"]\n\n\n#\n# Jobs\n#\n\n\nclass JobFilterSet(BaseFilterSet, CustomFieldModelFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"slug\": \"icontains\",\n \"grouping\": \"icontains\",\n \"description\": \"icontains\",\n },\n )\n tag = TagFilter()\n\n class Meta:\n model = Job\n fields = [\n \"id\",\n \"source\",\n \"module_name\",\n \"job_class_name\",\n \"slug\",\n \"name\",\n \"grouping\",\n \"installed\",\n \"enabled\",\n \"has_sensitive_variables\",\n \"approval_required\",\n \"commit_default\",\n \"hidden\",\n \"read_only\",\n \"is_job_hook_receiver\",\n \"soft_time_limit\",\n \"time_limit\",\n \"grouping_override\",\n \"name_override\",\n \"approval_required_override\",\n \"description_override\",\n \"commit_default_override\",\n \"hidden_override\",\n \"read_only_override\",\n \"soft_time_limit_override\",\n \"time_limit_override\",\n \"has_sensitive_variables_override\",\n ]\n\n\nclass JobHookFilterSet(BaseFilterSet):\n q = SearchFilter(filter_predicates={\"name\": \"icontains\", \"slug\": \"icontains\"})\n content_types = ContentTypeMultipleChoiceFilter(\n choices=ChangeLoggedModelsQuery().get_choices,\n )\n job = NaturalKeyOrPKMultipleChoiceFilter(\n queryset=Job.objects.all(),\n label=\"Job (slug or ID)\",\n )\n\n class Meta:\n model = JobHook\n fields = [\n \"name\",\n \"content_types\",\n \"enabled\",\n \"job\",\n \"slug\",\n \"type_create\",\n \"type_update\",\n \"type_delete\",\n ]\n\n\nclass JobResultFilterSet(BaseFilterSet, CustomFieldModelFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"job_model__name\": \"icontains\",\n \"name\": \"icontains\",\n \"user__username\": \"icontains\",\n },\n )\n job_model = django_filters.ModelMultipleChoiceFilter(\n field_name=\"job_model__slug\",\n queryset=Job.objects.all(),\n to_field_name=\"slug\",\n label=\"Job (slug)\",\n )\n job_model_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Job.objects.all(),\n label=\"Job (ID)\",\n )\n obj_type = ContentTypeFilter()\n created = django_filters.DateTimeFilter()\n completed = django_filters.DateTimeFilter()\n status = django_filters.MultipleChoiceFilter(choices=JobResultStatusChoices, null_value=None)\n\n class Meta:\n model = JobResult\n fields = [\"id\", \"created\", \"completed\", \"status\", \"user\", \"obj_type\", \"name\"]\n\n\nclass JobLogEntryFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"grouping\": \"icontains\",\n \"message\": \"icontains\",\n \"log_level\": \"icontains\",\n },\n )\n\n class Meta:\n model = JobLogEntry\n exclude = []\n\n\nclass ScheduledJobFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"job_class\": \"icontains\",\n \"description\": \"icontains\",\n },\n )\n job_model = django_filters.ModelMultipleChoiceFilter(\n field_name=\"job_model__slug\",\n queryset=Job.objects.all(),\n to_field_name=\"slug\",\n label=\"Job (slug)\",\n )\n job_model_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Job.objects.all(),\n label=\"Job (ID)\",\n )\n\n first_run = django_filters.DateTimeFilter()\n last_run = django_filters.DateTimeFilter()\n\n class Meta:\n model = ScheduledJob\n fields = [\"id\", \"name\", \"total_run_count\"]\n\n\n#\n# Filter for Local Config Context Data\n#\n\n\n# TODO: should be LocalContextFilterSetMixin\nclass LocalContextFilterSet(django_filters.FilterSet):\n local_context_data = django_filters.BooleanFilter(\n method=\"_local_context_data\",\n label=\"Has local config context data\",\n )\n local_context_schema_id = django_filters.ModelMultipleChoiceFilter(\n queryset=ConfigContextSchema.objects.all(),\n label=\"Schema (ID)\",\n )\n local_context_schema = django_filters.ModelMultipleChoiceFilter(\n field_name=\"local_context_schema__slug\",\n queryset=ConfigContextSchema.objects.all(),\n to_field_name=\"slug\",\n label=\"Schema (slug)\",\n )\n\n def _local_context_data(self, queryset, name, value):\n return queryset.exclude(local_context_data__isnull=value)\n\n\nclass NoteFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"user_name\": \"icontains\",\n \"note\": \"icontains\",\n \"assigned_object_id\": \"exact\",\n },\n )\n assigned_object_type = ContentTypeFilter()\n user = NaturalKeyOrPKMultipleChoiceFilter(\n to_field_name=\"username\",\n queryset=get_user_model().objects.all(),\n label=\"User (username or ID)\",\n )\n\n class Meta:\n model = Note\n fields = [\n \"id\",\n \"user\",\n \"user_name\",\n \"assigned_object_type_id\",\n \"assigned_object_id\",\n \"note\",\n ]\n\n\nclass ObjectChangeFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"user_name\": \"icontains\",\n \"object_repr\": \"icontains\",\n },\n )\n changed_object_type = ContentTypeFilter()\n user_id = django_filters.ModelMultipleChoiceFilter(\n queryset=get_user_model().objects.all(),\n label=\"User (ID)\",\n )\n user = django_filters.ModelMultipleChoiceFilter(\n field_name=\"user__username\",\n queryset=get_user_model().objects.all(),\n to_field_name=\"username\",\n label=\"User name\",\n )\n\n class Meta:\n model = ObjectChange\n fields = [\n \"id\",\n \"user\",\n \"user_name\",\n \"request_id\",\n \"action\",\n \"changed_object_type_id\",\n \"changed_object_id\",\n \"object_repr\",\n \"time\",\n ]\n\n\n#\n# Relationships\n#\n\n\nclass RelationshipFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"description\": \"icontains\",\n }\n )\n\n source_type = ContentTypeMultipleChoiceFilter(choices=FeatureQuery(\"relationships\").get_choices, conjoined=False)\n destination_type = ContentTypeMultipleChoiceFilter(\n choices=FeatureQuery(\"relationships\").get_choices, conjoined=False\n )\n\n class Meta:\n model = Relationship\n fields = [\"id\", \"name\", \"slug\", \"type\", \"source_type\", \"destination_type\"]\n\n\nclass RelationshipAssociationFilterSet(BaseFilterSet):\n\n relationship = django_filters.ModelMultipleChoiceFilter(\n field_name=\"relationship__slug\",\n queryset=Relationship.objects.all(),\n to_field_name=\"slug\",\n label=\"Relationship (slug)\",\n )\n source_type = ContentTypeMultipleChoiceFilter(choices=FeatureQuery(\"relationships\").get_choices, conjoined=False)\n destination_type = ContentTypeMultipleChoiceFilter(\n choices=FeatureQuery(\"relationships\").get_choices, conjoined=False\n )\n peer_id = MultiValueUUIDFilter(method=\"peer_id_filter\")\n\n class Meta:\n model = RelationshipAssociation\n fields = [\"id\", \"relationship\", \"source_type\", \"source_id\", \"destination_type\", \"destination_id\", \"peer_id\"]\n\n def peer_id_filter(self, queryset, name, value):\n # Filter down to symmetric relationships only.\n queryset = queryset.filter(\n relationship__type__in=[\n RelationshipTypeChoices.TYPE_ONE_TO_ONE_SYMMETRIC,\n RelationshipTypeChoices.TYPE_MANY_TO_MANY_SYMMETRIC,\n ]\n )\n # Then Filter based on peer_id.\n queryset = queryset.filter(source_id__in=value) | queryset.filter(destination_id__in=value)\n return queryset\n\n\n#\n# Secrets\n#\n\n\nclass SecretFilterSet(\n BaseFilterSet,\n CustomFieldModelFilterSet,\n CreatedUpdatedFilterSet,\n):\n \"\"\"Filterset for the Secret model.\"\"\"\n\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"slug\": \"icontains\",\n },\n )\n # TODO dynamic choices needed\n # provider = django_filters.MultipleChoiceFilter(choices=..., null_value=None)\n\n class Meta:\n model = Secret\n fields = (\"id\", \"name\", \"slug\", \"provider\", \"created\", \"last_updated\")\n\n\nclass SecretsGroupFilterSet(\n BaseFilterSet,\n CustomFieldModelFilterSet,\n CreatedUpdatedFilterSet,\n):\n \"\"\"Filterset for the SecretsGroup model.\"\"\"\n\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"slug\": \"icontains\",\n },\n )\n\n class Meta:\n model = SecretsGroup\n fields = (\"id\", \"name\", \"slug\", \"created\", \"last_updated\")\n\n\nclass SecretsGroupAssociationFilterSet(BaseFilterSet):\n \"\"\"Filterset for the SecretsGroupAssociation through model.\"\"\"\n\n group_id = django_filters.ModelMultipleChoiceFilter(\n queryset=SecretsGroup.objects.all(),\n label=\"Group (ID)\",\n )\n group = django_filters.ModelMultipleChoiceFilter(\n queryset=SecretsGroup.objects.all(),\n field_name=\"group__slug\",\n to_field_name=\"slug\",\n label=\"Group (slug)\",\n )\n secret_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Secret.objects.all(),\n label=\"Secret (ID)\",\n )\n secret = django_filters.ModelMultipleChoiceFilter(\n queryset=Secret.objects.all(),\n field_name=\"secret__slug\",\n to_field_name=\"slug\",\n label=\"Secret (slug)\",\n )\n access_type = django_filters.MultipleChoiceFilter(choices=SecretsGroupAccessTypeChoices)\n secret_type = django_filters.MultipleChoiceFilter(choices=SecretsGroupSecretTypeChoices)\n\n class Meta:\n model = SecretsGroupAssociation\n fields = (\"id\",)\n\n\n#\n# Statuses\n#\n\n\nclass StatusFilter(django_filters.ModelMultipleChoiceFilter):\n \"\"\"\n Filter field used for filtering Status fields.\n\n Explicitly sets `to_field_name='value'` and dynamically sets queryset to\n retrieve choices for the corresponding model & field name bound to the\n filterset.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n kwargs[\"to_field_name\"] = \"slug\"\n super().__init__(*args, **kwargs)\n\n def get_queryset(self, request):\n self.queryset = Status.objects.all()\n return super().get_queryset(request)\n\n def get_filter_predicate(self, value):\n \"\"\"Always use the field's name and the `to_field_name` attribute as predicate.\"\"\"\n # e.g. `status__slug`\n to_field_name = self.field.to_field_name\n name = f\"{self.field_name}__{to_field_name}\"\n # Sometimes the incoming value is an instance. This block of logic comes from the base\n # `get_filter_predicate()` and was added here to support this.\n try:\n return {name: getattr(value, to_field_name)}\n except (AttributeError, TypeError):\n return {name: value}\n\n\nclass StatusFilterSet(NautobotFilterSet):\n \"\"\"API filter for filtering custom status object fields.\"\"\"\n\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"slug\": \"icontains\",\n \"content_types__model\": \"icontains\",\n },\n )\n content_types = ContentTypeMultipleChoiceFilter(\n choices=FeatureQuery(\"statuses\").get_choices,\n )\n\n class Meta:\n model = Status\n fields = [\n \"id\",\n \"content_types\",\n \"color\",\n \"name\",\n \"slug\",\n \"created\",\n \"last_updated\",\n ]\n\n\nclass StatusModelFilterSetMixin(django_filters.FilterSet):\n \"\"\"\n Mixin to add a `status` filter field to a FilterSet.\n \"\"\"\n\n status = StatusFilter()\n\n\n#\n# Tags\n#\n\n\nclass TagFilterSet(NautobotFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"slug\": \"icontains\",\n \"content_types__model\": \"icontains\",\n },\n )\n content_types = ContentTypeMultipleChoiceFilter(\n choices=TaggableClassesQuery().get_choices,\n )\n\n class Meta:\n model = Tag\n fields = [\"id\", \"name\", \"slug\", \"color\", \"content_types\"]\n\n\n#\n# Webhooks\n#\n\n\nclass WebhookFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"payload_url\": \"icontains\",\n \"additional_headers\": \"icontains\",\n \"body_template\": \"icontains\",\n },\n )\n content_types = ContentTypeMultipleChoiceFilter(\n choices=FeatureQuery(\"webhooks\").get_choices,\n )\n\n class Meta:\n model = Webhook\n fields = [\n \"name\",\n \"payload_url\",\n \"enabled\",\n \"content_types\",\n \"type_create\",\n \"type_update\",\n \"type_delete\",\n ]\n", "path": "nautobot/extras/filters.py" } ]
[ { "content": "import django_filters\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db.models import Q\nfrom django.forms import IntegerField\n\nfrom nautobot.dcim.models import DeviceRole, DeviceType, Location, Platform, Region, Site\nfrom nautobot.extras.utils import ChangeLoggedModelsQuery, FeatureQuery, TaggableClassesQuery\nfrom nautobot.tenancy.models import Tenant, TenantGroup\nfrom nautobot.utilities.constants import FILTER_CHAR_BASED_LOOKUP_MAP, FILTER_NUMERIC_BASED_LOOKUP_MAP\nfrom nautobot.utilities.filters import (\n BaseFilterSet,\n ContentTypeFilter,\n ContentTypeMultipleChoiceFilter,\n MultiValueCharFilter,\n MultiValueDateFilter,\n MultiValueNumberFilter,\n MultiValueUUIDFilter,\n NaturalKeyOrPKMultipleChoiceFilter,\n SearchFilter,\n TagFilter,\n)\nfrom nautobot.virtualization.models import Cluster, ClusterGroup\nfrom .choices import (\n CustomFieldFilterLogicChoices,\n CustomFieldTypeChoices,\n JobResultStatusChoices,\n RelationshipSideChoices,\n RelationshipTypeChoices,\n SecretsGroupAccessTypeChoices,\n SecretsGroupSecretTypeChoices,\n)\nfrom .models import (\n ComputedField,\n ConfigContext,\n ConfigContextSchema,\n CustomField,\n CustomFieldChoice,\n CustomLink,\n DynamicGroup,\n DynamicGroupMembership,\n ExportTemplate,\n GitRepository,\n GraphQLQuery,\n ImageAttachment,\n Job,\n JobHook,\n JobLogEntry,\n JobResult,\n Note,\n ObjectChange,\n Relationship,\n RelationshipAssociation,\n ScheduledJob,\n Secret,\n SecretsGroup,\n SecretsGroupAssociation,\n Status,\n Tag,\n Webhook,\n)\n\n\n__all__ = (\n \"ComputedFieldFilterSet\",\n \"ConfigContextFilterSet\",\n \"ContentTypeFilterSet\",\n \"CreatedUpdatedFilterSet\",\n \"CustomFieldBooleanFilter\",\n \"CustomFieldCharFilter\",\n \"CustomFieldDateFilter\",\n \"CustomFieldFilterMixin\",\n \"CustomFieldJSONFilter\",\n \"CustomFieldMultiSelectFilter\",\n \"CustomFieldMultiValueCharFilter\",\n \"CustomFieldMultiValueDateFilter\",\n \"CustomFieldMultiValueNumberFilter\",\n \"CustomFieldNumberFilter\",\n \"CustomFieldModelFilterSet\",\n \"CustomLinkFilterSet\",\n \"DynamicGroupFilterSet\",\n \"DynamicGroupMembershipFilterSet\",\n \"ExportTemplateFilterSet\",\n \"GitRepositoryFilterSet\",\n \"GraphQLQueryFilterSet\",\n \"ImageAttachmentFilterSet\",\n \"JobFilterSet\",\n \"JobLogEntryFilterSet\",\n \"JobResultFilterSet\",\n \"LocalContextFilterSet\",\n \"NautobotFilterSet\",\n \"NoteFilterSet\",\n \"ObjectChangeFilterSet\",\n \"RelationshipFilterSet\",\n \"RelationshipAssociationFilterSet\",\n \"ScheduledJobFilterSet\",\n \"SecretFilterSet\",\n \"SecretsGroupFilterSet\",\n \"SecretsGroupAssociationFilterSet\",\n \"StatusFilter\",\n \"StatusFilterSet\",\n \"StatusModelFilterSetMixin\",\n \"TagFilterSet\",\n \"WebhookFilterSet\",\n)\n\n\n#\n# Mixins\n#\n\n\n# TODO: should be CreatedUpdatedFilterSetMixin.\nclass CreatedUpdatedFilterSet(django_filters.FilterSet):\n created = django_filters.DateFilter()\n created__gte = django_filters.DateFilter(field_name=\"created\", lookup_expr=\"gte\")\n created__lte = django_filters.DateFilter(field_name=\"created\", lookup_expr=\"lte\")\n last_updated = django_filters.DateTimeFilter()\n last_updated__gte = django_filters.DateTimeFilter(field_name=\"last_updated\", lookup_expr=\"gte\")\n last_updated__lte = django_filters.DateTimeFilter(field_name=\"last_updated\", lookup_expr=\"lte\")\n\n\nclass RelationshipFilter(django_filters.ModelMultipleChoiceFilter):\n \"\"\"\n Filter objects by the presence of associations on a given Relationship.\n \"\"\"\n\n def __init__(self, side, relationship=None, queryset=None, qs=None, *args, **kwargs):\n self.relationship = relationship\n self.qs = qs\n self.side = side\n super().__init__(queryset=queryset, *args, **kwargs)\n\n def filter(self, qs, value):\n value = [entry.id for entry in value]\n # Check if value is empty or a DynamicChoiceField that is empty.\n if not value or \"\" in value:\n # if value is empty we return the entire unmodified queryset\n return qs\n else:\n if self.side == \"source\":\n values = RelationshipAssociation.objects.filter(\n destination_id__in=value,\n source_type=self.relationship.source_type,\n relationship=self.relationship,\n ).values_list(\"source_id\", flat=True)\n elif self.side == \"destination\":\n values = RelationshipAssociation.objects.filter(\n source_id__in=value,\n destination_type=self.relationship.destination_type,\n relationship=self.relationship,\n ).values_list(\"destination_id\", flat=True)\n else:\n destinations = RelationshipAssociation.objects.filter(\n source_id__in=value,\n destination_type=self.relationship.destination_type,\n relationship=self.relationship,\n ).values_list(\"destination_id\", flat=True)\n\n sources = RelationshipAssociation.objects.filter(\n destination_id__in=value,\n source_type=self.relationship.source_type,\n relationship=self.relationship,\n ).values_list(\"source_id\", flat=True)\n\n values = list(destinations) + list(sources)\n qs &= self.get_method(self.qs)(Q(**{\"id__in\": values}))\n return qs\n\n\nclass RelationshipModelFilterSet(django_filters.FilterSet):\n \"\"\"\n Filterset for applicable to the parent model.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.obj_type = ContentType.objects.get_for_model(self._meta.model)\n super().__init__(*args, **kwargs)\n self.relationships = []\n self._append_relationships(model=self._meta.model)\n\n def _append_relationships(self, model):\n \"\"\"\n Append form fields for all Relationships assigned to this model.\n \"\"\"\n source_relationships = Relationship.objects.filter(source_type=self.obj_type, source_hidden=False)\n self._append_relationships_side(source_relationships, RelationshipSideChoices.SIDE_SOURCE, model)\n\n dest_relationships = Relationship.objects.filter(destination_type=self.obj_type, destination_hidden=False)\n self._append_relationships_side(dest_relationships, RelationshipSideChoices.SIDE_DESTINATION, model)\n\n def _append_relationships_side(self, relationships, initial_side, model):\n \"\"\"\n Helper method to _append_relationships, for processing one \"side\" of the relationships for this model.\n \"\"\"\n for relationship in relationships:\n if relationship.symmetric:\n side = RelationshipSideChoices.SIDE_PEER\n else:\n side = initial_side\n peer_side = RelationshipSideChoices.OPPOSITE[side]\n\n # If this model is on the \"source\" side of the relationship, then the field will be named\n # \"cr_<relationship-slug>__destination\" since it's used to pick the destination object(s).\n # If we're on the \"destination\" side, the field will be \"cr_<relationship-slug>__source\".\n # For a symmetric relationship, both sides are \"peer\", so the field will be \"cr_<relationship-slug>__peer\"\n field_name = f\"cr_{relationship.slug}__{peer_side}\"\n\n if field_name in self.relationships:\n # This is a symmetric relationship that we already processed from the opposing \"initial_side\".\n # No need to process it a second time!\n continue\n if peer_side == \"source\":\n choice_model = relationship.source_type.model_class()\n elif peer_side == \"destination\":\n choice_model = relationship.destination_type.model_class()\n else:\n choice_model = model\n # Check for invalid_relationship unit test\n if choice_model:\n self.filters[field_name] = RelationshipFilter(\n relationship=relationship,\n side=side,\n field_name=field_name,\n queryset=choice_model.objects.all(),\n qs=model.objects.all(),\n )\n self.relationships.append(field_name)\n\n\n#\n# Computed Fields\n#\n\n\nclass ComputedFieldFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"label\": \"icontains\",\n \"description\": \"icontains\",\n \"content_type__app_label\": \"icontains\",\n \"content_type__model\": \"icontains\",\n \"template\": \"icontains\",\n \"fallback_value\": \"icontains\",\n },\n )\n content_type = ContentTypeFilter()\n\n class Meta:\n model = ComputedField\n fields = (\n \"content_type\",\n \"slug\",\n \"template\",\n \"fallback_value\",\n \"weight\",\n )\n\n\n#\n# Config Contexts\n#\n\n\nclass ConfigContextFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"description\": \"icontains\",\n \"data\": \"icontains\",\n },\n )\n owner_content_type = ContentTypeFilter()\n region_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"regions\",\n queryset=Region.objects.all(),\n label=\"Region\",\n )\n region = django_filters.ModelMultipleChoiceFilter(\n field_name=\"regions__slug\",\n queryset=Region.objects.all(),\n to_field_name=\"slug\",\n label=\"Region (slug)\",\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"sites\",\n queryset=Site.objects.all(),\n label=\"Site\",\n )\n site = django_filters.ModelMultipleChoiceFilter(\n field_name=\"sites__slug\",\n queryset=Site.objects.all(),\n to_field_name=\"slug\",\n label=\"Site (slug)\",\n )\n location_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"locations\",\n queryset=Location.objects.all(),\n label=\"Location (ID)\",\n )\n location = django_filters.ModelMultipleChoiceFilter(\n field_name=\"locations__slug\",\n queryset=Location.objects.all(),\n label=\"Location (slug)\",\n )\n role_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"roles\",\n queryset=DeviceRole.objects.all(),\n label=\"Role\",\n )\n role = django_filters.ModelMultipleChoiceFilter(\n field_name=\"roles__slug\",\n queryset=DeviceRole.objects.all(),\n to_field_name=\"slug\",\n label=\"Role (slug)\",\n )\n device_type_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"device_types\",\n queryset=DeviceType.objects.all(),\n label=\"Device Type\",\n )\n device_type = django_filters.ModelMultipleChoiceFilter(\n field_name=\"device_types__slug\",\n queryset=DeviceType.objects.all(),\n to_field_name=\"slug\",\n label=\"Device Type (slug)\",\n )\n platform_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"platforms\",\n queryset=Platform.objects.all(),\n label=\"Platform\",\n )\n platform = django_filters.ModelMultipleChoiceFilter(\n field_name=\"platforms__slug\",\n queryset=Platform.objects.all(),\n to_field_name=\"slug\",\n label=\"Platform (slug)\",\n )\n cluster_group_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"cluster_groups\",\n queryset=ClusterGroup.objects.all(),\n label=\"Cluster group\",\n )\n cluster_group = django_filters.ModelMultipleChoiceFilter(\n field_name=\"cluster_groups__slug\",\n queryset=ClusterGroup.objects.all(),\n to_field_name=\"slug\",\n label=\"Cluster group (slug)\",\n )\n cluster_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"clusters\",\n queryset=Cluster.objects.all(),\n label=\"Cluster\",\n )\n tenant_group_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"tenant_groups\",\n queryset=TenantGroup.objects.all(),\n label=\"Tenant group\",\n )\n tenant_group = django_filters.ModelMultipleChoiceFilter(\n field_name=\"tenant_groups__slug\",\n queryset=TenantGroup.objects.all(),\n to_field_name=\"slug\",\n label=\"Tenant group (slug)\",\n )\n tenant_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"tenants\",\n queryset=Tenant.objects.all(),\n label=\"Tenant\",\n )\n tenant = django_filters.ModelMultipleChoiceFilter(\n field_name=\"tenants__slug\",\n queryset=Tenant.objects.all(),\n to_field_name=\"slug\",\n label=\"Tenant (slug)\",\n )\n tag = django_filters.ModelMultipleChoiceFilter(\n field_name=\"tags__slug\",\n queryset=Tag.objects.all(),\n to_field_name=\"slug\",\n label=\"Tag (slug)\",\n )\n\n class Meta:\n model = ConfigContext\n fields = [\"id\", \"name\", \"is_active\", \"owner_content_type\", \"owner_object_id\"]\n\n\n#\n# Filter for config context schema\n#\n\n\nclass ConfigContextSchemaFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"description\": \"icontains\",\n \"data_schema\": \"icontains\",\n },\n )\n owner_content_type = ContentTypeFilter()\n\n class Meta:\n model = ConfigContextSchema\n fields = [\n \"id\",\n \"name\",\n \"description\",\n ]\n\n\n#\n# ContentTypes\n#\n\n\nclass ContentTypeFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"app_label\": \"icontains\",\n \"model\": \"icontains\",\n },\n )\n\n class Meta:\n model = ContentType\n fields = [\"id\", \"app_label\", \"model\"]\n\n\n#\n# Custom Fields\n#\n\n\nEXACT_FILTER_TYPES = (\n CustomFieldTypeChoices.TYPE_BOOLEAN,\n CustomFieldTypeChoices.TYPE_DATE,\n CustomFieldTypeChoices.TYPE_INTEGER,\n CustomFieldTypeChoices.TYPE_SELECT,\n CustomFieldTypeChoices.TYPE_MULTISELECT,\n)\n\n\nclass CustomFieldFilterMixin:\n \"\"\"\n Filter mixin for CustomField to handle CustomField.filter_logic setting\n and queryset.exclude filtering specific to the JSONField where CustomField data is stored.\n \"\"\"\n\n def __init__(self, custom_field, *args, **kwargs):\n self.custom_field = custom_field\n if custom_field.type not in EXACT_FILTER_TYPES:\n if custom_field.filter_logic == CustomFieldFilterLogicChoices.FILTER_LOOSE:\n kwargs.setdefault(\"lookup_expr\", \"icontains\")\n super().__init__(*args, **kwargs)\n self.field_name = f\"_custom_field_data__{self.field_name}\"\n\n def filter(self, qs, value):\n if value in django_filters.constants.EMPTY_VALUES:\n return qs\n\n if value == \"null\":\n return self.get_method(qs)(\n Q(**{f\"{self.field_name}__exact\": None}) | Q(**{f\"{self.field_name}__isnull\": True})\n )\n\n # Custom fields require special handling for exclude filtering.\n # Return custom fields that don't match the value and null custom fields\n if self.exclude:\n qs_null_custom_fields = qs.filter(**{f\"{self.field_name}__isnull\": True}).distinct()\n return super().filter(qs, value) | qs_null_custom_fields\n\n return super().filter(qs, value)\n\n\nclass CustomFieldBooleanFilter(CustomFieldFilterMixin, django_filters.BooleanFilter):\n \"\"\"Custom field single value filter for backwards compatibility\"\"\"\n\n\nclass CustomFieldCharFilter(CustomFieldFilterMixin, django_filters.Filter):\n \"\"\"Custom field single value filter for backwards compatibility\"\"\"\n\n\nclass CustomFieldDateFilter(CustomFieldFilterMixin, django_filters.DateFilter):\n \"\"\"Custom field single value filter for backwards compatibility\"\"\"\n\n\nclass CustomFieldJSONFilter(CustomFieldFilterMixin, django_filters.Filter):\n \"\"\"Custom field single value filter for backwards compatibility\"\"\"\n\n\nclass CustomFieldMultiSelectFilter(CustomFieldFilterMixin, django_filters.Filter):\n \"\"\"Custom field single value filter for backwards compatibility\"\"\"\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault(\"lookup_expr\", \"contains\")\n super().__init__(*args, **kwargs)\n\n\nclass CustomFieldNumberFilter(CustomFieldFilterMixin, django_filters.Filter):\n \"\"\"Custom field single value filter for backwards compatibility\"\"\"\n\n field_class = IntegerField\n\n\nclass CustomFieldMultiValueCharFilter(CustomFieldFilterMixin, MultiValueCharFilter):\n \"\"\"Custom field multi value char filter for extended lookup expressions\"\"\"\n\n\nclass CustomFieldMultiValueDateFilter(CustomFieldFilterMixin, MultiValueDateFilter):\n \"\"\"Custom field multi value date filter for extended lookup expressions\"\"\"\n\n\nclass CustomFieldMultiValueNumberFilter(CustomFieldFilterMixin, MultiValueNumberFilter):\n \"\"\"Custom field multi value number filter for extended lookup expressions\"\"\"\n\n\n# TODO: should be CustomFieldModelFilterSetMixin\nclass CustomFieldModelFilterSet(django_filters.FilterSet):\n \"\"\"\n Dynamically add a Filter for each CustomField applicable to the parent model. Add filters for\n extra lookup expressions on supported CustomField types.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n custom_field_filter_classes = {\n CustomFieldTypeChoices.TYPE_DATE: CustomFieldDateFilter,\n CustomFieldTypeChoices.TYPE_BOOLEAN: CustomFieldBooleanFilter,\n CustomFieldTypeChoices.TYPE_INTEGER: CustomFieldNumberFilter,\n CustomFieldTypeChoices.TYPE_JSON: CustomFieldJSONFilter,\n CustomFieldTypeChoices.TYPE_MULTISELECT: CustomFieldMultiSelectFilter,\n }\n\n custom_fields = CustomField.objects.filter(\n content_types=ContentType.objects.get_for_model(self._meta.model)\n ).exclude(filter_logic=CustomFieldFilterLogicChoices.FILTER_DISABLED)\n for cf in custom_fields:\n # Determine filter class for this CustomField type, default to CustomFieldBaseFilter\n # 2.0 TODO: #824 use cf.slug instead\n new_filter_name = f\"cf_{cf.name}\"\n filter_class = custom_field_filter_classes.get(cf.type, CustomFieldCharFilter)\n new_filter_field = filter_class(field_name=cf.name, custom_field=cf)\n\n # Create base filter (cf_customfieldname)\n self.filters[new_filter_name] = new_filter_field\n\n # Create extra lookup expression filters (cf_customfieldname__lookup_expr)\n self.filters.update(\n self._generate_custom_field_lookup_expression_filters(filter_name=new_filter_name, custom_field=cf)\n )\n\n @staticmethod\n def _get_custom_field_filter_lookup_dict(filter_type):\n # Choose the lookup expression map based on the filter type\n if issubclass(filter_type, (CustomFieldMultiValueNumberFilter, CustomFieldMultiValueDateFilter)):\n lookup_map = FILTER_NUMERIC_BASED_LOOKUP_MAP\n else:\n lookup_map = FILTER_CHAR_BASED_LOOKUP_MAP\n\n return lookup_map\n\n # TODO 2.0: Transition CustomField filters to nautobot.utilities.filters.MultiValue* filters and\n # leverage BaseFilterSet to add dynamic lookup expression filters. Remove CustomField.filter_logic field\n @classmethod\n def _generate_custom_field_lookup_expression_filters(cls, filter_name, custom_field):\n \"\"\"\n For specific filter types, new filters are created based on defined lookup expressions in\n the form `<field_name>__<lookup_expr>`. Copied from nautobot.utilities.filters.BaseFilterSet\n and updated to work with custom fields.\n \"\"\"\n magic_filters = {}\n custom_field_type_to_filter_map = {\n CustomFieldTypeChoices.TYPE_DATE: CustomFieldMultiValueDateFilter,\n CustomFieldTypeChoices.TYPE_INTEGER: CustomFieldMultiValueNumberFilter,\n CustomFieldTypeChoices.TYPE_SELECT: CustomFieldMultiValueCharFilter,\n CustomFieldTypeChoices.TYPE_TEXT: CustomFieldMultiValueCharFilter,\n CustomFieldTypeChoices.TYPE_URL: CustomFieldMultiValueCharFilter,\n }\n\n if custom_field.type in custom_field_type_to_filter_map:\n filter_type = custom_field_type_to_filter_map[custom_field.type]\n else:\n return magic_filters\n\n # Choose the lookup expression map based on the filter type\n lookup_map = cls._get_custom_field_filter_lookup_dict(filter_type)\n\n # Create new filters for each lookup expression in the map\n for lookup_name, lookup_expr in lookup_map.items():\n new_filter_name = f\"{filter_name}__{lookup_name}\"\n new_filter = filter_type(\n field_name=custom_field.name,\n lookup_expr=lookup_expr,\n custom_field=custom_field,\n exclude=lookup_name.startswith(\"n\"),\n )\n\n magic_filters[new_filter_name] = new_filter\n\n return magic_filters\n\n\nclass CustomFieldFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"label\": \"icontains\",\n \"description\": \"icontains\",\n },\n )\n content_types = ContentTypeMultipleChoiceFilter(\n choices=FeatureQuery(\"custom_fields\").get_choices,\n )\n\n class Meta:\n model = CustomField\n fields = [\"id\", \"content_types\", \"name\", \"required\", \"filter_logic\", \"weight\"]\n\n\nclass CustomFieldChoiceFilterSet(BaseFilterSet):\n q = SearchFilter(filter_predicates={\"value\": \"icontains\"})\n field_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"field\",\n queryset=CustomField.objects.all(),\n label=\"Field\",\n )\n field = django_filters.ModelMultipleChoiceFilter(\n field_name=\"field__name\",\n queryset=CustomField.objects.all(),\n to_field_name=\"name\",\n label=\"Field (name)\",\n )\n\n class Meta:\n model = CustomFieldChoice\n fields = [\"id\", \"value\", \"weight\"]\n\n\n#\n# Nautobot base filterset to use for most custom filterset classes.\n#\n\n\nclass NautobotFilterSet(BaseFilterSet, CreatedUpdatedFilterSet, RelationshipModelFilterSet, CustomFieldModelFilterSet):\n \"\"\"\n This class exists to combine common functionality and is used as a base class throughout the\n codebase where all of BaseFilterSet, CreatedUpdatedFilterSet, RelationshipModelFilterSet and CustomFieldModelFilterSet\n are needed.\n \"\"\"\n\n\n#\n# Custom Links\n#\n\n\nclass CustomLinkFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"target_url\": \"icontains\",\n \"text\": \"icontains\",\n \"content_type__app_label\": \"icontains\",\n \"content_type__model\": \"icontains\",\n },\n )\n content_type = ContentTypeFilter()\n\n class Meta:\n model = CustomLink\n fields = (\n \"content_type\",\n \"name\",\n \"text\",\n \"target_url\",\n \"weight\",\n \"group_name\",\n \"button_class\",\n \"new_window\",\n )\n\n\n#\n# Dynamic Groups\n#\n\n\nclass DynamicGroupFilterSet(NautobotFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"slug\": \"icontains\",\n \"description\": \"icontains\",\n \"content_type__app_label\": \"icontains\",\n \"content_type__model\": \"icontains\",\n },\n )\n content_type = ContentTypeMultipleChoiceFilter(choices=FeatureQuery(\"dynamic_groups\").get_choices, conjoined=False)\n\n class Meta:\n model = DynamicGroup\n fields = (\"id\", \"name\", \"slug\", \"description\")\n\n\nclass DynamicGroupMembershipFilterSet(NautobotFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"operator\": \"icontains\",\n \"group__name\": \"icontains\",\n \"group__slug\": \"icontains\",\n \"parent_group__name\": \"icontains\",\n \"parent_group__slug\": \"icontains\",\n },\n )\n group = NaturalKeyOrPKMultipleChoiceFilter(\n queryset=DynamicGroup.objects.all(),\n label=\"Group (slug or ID)\",\n )\n parent_group = NaturalKeyOrPKMultipleChoiceFilter(\n queryset=DynamicGroup.objects.all(),\n label=\"Parent Group (slug or ID)\",\n )\n\n class Meta:\n model = DynamicGroupMembership\n fields = (\"id\", \"group\", \"parent_group\", \"operator\", \"weight\")\n\n\n#\n# Export Templates\n#\n\n\nclass ExportTemplateFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"owner_content_type__app_label\": \"icontains\",\n \"owner_content_type__model\": \"icontains\",\n \"content_type__app_label\": \"icontains\",\n \"content_type__model\": \"icontains\",\n \"description\": \"icontains\",\n },\n )\n owner_content_type = ContentTypeFilter()\n\n class Meta:\n model = ExportTemplate\n fields = [\"id\", \"content_type\", \"owner_content_type\", \"owner_object_id\", \"name\"]\n\n\n#\n# Datasources (Git)\n#\n\n\nclass GitRepositoryFilterSet(NautobotFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"remote_url\": \"icontains\",\n \"branch\": \"icontains\",\n },\n )\n secrets_group_id = django_filters.ModelMultipleChoiceFilter(\n field_name=\"secrets_group\",\n queryset=SecretsGroup.objects.all(),\n label=\"Secrets group (ID)\",\n )\n secrets_group = django_filters.ModelMultipleChoiceFilter(\n field_name=\"secrets_group__slug\",\n queryset=SecretsGroup.objects.all(),\n to_field_name=\"slug\",\n label=\"Secrets group (slug)\",\n )\n tag = TagFilter()\n\n class Meta:\n model = GitRepository\n fields = [\"id\", \"name\", \"slug\", \"remote_url\", \"branch\", \"provided_contents\"]\n\n\n#\n# GraphQL Queries\n#\n\n\nclass GraphQLQueryFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"slug\": \"icontains\",\n \"query\": \"icontains\",\n },\n )\n\n class Meta:\n model = GraphQLQuery\n fields = [\"name\", \"slug\"]\n\n\n#\n# Image Attachments\n#\n\n\nclass ImageAttachmentFilterSet(BaseFilterSet):\n content_type = ContentTypeFilter()\n\n class Meta:\n model = ImageAttachment\n fields = [\"id\", \"content_type_id\", \"object_id\", \"name\"]\n\n\n#\n# Jobs\n#\n\n\nclass JobFilterSet(BaseFilterSet, CustomFieldModelFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"slug\": \"icontains\",\n \"grouping\": \"icontains\",\n \"description\": \"icontains\",\n },\n )\n tag = TagFilter()\n\n class Meta:\n model = Job\n fields = [\n \"id\",\n \"source\",\n \"module_name\",\n \"job_class_name\",\n \"slug\",\n \"name\",\n \"grouping\",\n \"installed\",\n \"enabled\",\n \"has_sensitive_variables\",\n \"approval_required\",\n \"commit_default\",\n \"hidden\",\n \"read_only\",\n \"is_job_hook_receiver\",\n \"soft_time_limit\",\n \"time_limit\",\n \"grouping_override\",\n \"name_override\",\n \"approval_required_override\",\n \"description_override\",\n \"commit_default_override\",\n \"hidden_override\",\n \"read_only_override\",\n \"soft_time_limit_override\",\n \"time_limit_override\",\n \"has_sensitive_variables_override\",\n ]\n\n\nclass JobHookFilterSet(BaseFilterSet):\n q = SearchFilter(filter_predicates={\"name\": \"icontains\", \"slug\": \"icontains\"})\n content_types = ContentTypeMultipleChoiceFilter(\n choices=ChangeLoggedModelsQuery().get_choices,\n )\n job = NaturalKeyOrPKMultipleChoiceFilter(\n queryset=Job.objects.all(),\n label=\"Job (slug or ID)\",\n )\n\n class Meta:\n model = JobHook\n fields = [\n \"name\",\n \"content_types\",\n \"enabled\",\n \"job\",\n \"slug\",\n \"type_create\",\n \"type_update\",\n \"type_delete\",\n ]\n\n\nclass JobResultFilterSet(BaseFilterSet, CustomFieldModelFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"job_model__name\": \"icontains\",\n \"name\": \"icontains\",\n \"user__username\": \"icontains\",\n },\n )\n job_model = django_filters.ModelMultipleChoiceFilter(\n field_name=\"job_model__slug\",\n queryset=Job.objects.all(),\n to_field_name=\"slug\",\n label=\"Job (slug)\",\n )\n job_model_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Job.objects.all(),\n label=\"Job (ID)\",\n )\n obj_type = ContentTypeFilter()\n created = django_filters.DateTimeFilter()\n completed = django_filters.DateTimeFilter()\n status = django_filters.MultipleChoiceFilter(choices=JobResultStatusChoices, null_value=None)\n\n class Meta:\n model = JobResult\n fields = [\"id\", \"created\", \"completed\", \"status\", \"user\", \"obj_type\", \"name\"]\n\n\nclass JobLogEntryFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"grouping\": \"icontains\",\n \"message\": \"icontains\",\n \"log_level\": \"icontains\",\n },\n )\n\n class Meta:\n model = JobLogEntry\n exclude = []\n\n\nclass ScheduledJobFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"job_class\": \"icontains\",\n \"description\": \"icontains\",\n },\n )\n job_model = django_filters.ModelMultipleChoiceFilter(\n field_name=\"job_model__slug\",\n queryset=Job.objects.all(),\n to_field_name=\"slug\",\n label=\"Job (slug)\",\n )\n job_model_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Job.objects.all(),\n label=\"Job (ID)\",\n )\n\n first_run = django_filters.DateTimeFilter()\n last_run = django_filters.DateTimeFilter()\n\n class Meta:\n model = ScheduledJob\n fields = [\"id\", \"name\", \"total_run_count\"]\n\n\n#\n# Filter for Local Config Context Data\n#\n\n\n# TODO: should be LocalContextFilterSetMixin\nclass LocalContextFilterSet(django_filters.FilterSet):\n local_context_data = django_filters.BooleanFilter(\n method=\"_local_context_data\",\n label=\"Has local config context data\",\n )\n local_context_schema_id = django_filters.ModelMultipleChoiceFilter(\n queryset=ConfigContextSchema.objects.all(),\n label=\"Schema (ID)\",\n )\n local_context_schema = django_filters.ModelMultipleChoiceFilter(\n field_name=\"local_context_schema__slug\",\n queryset=ConfigContextSchema.objects.all(),\n to_field_name=\"slug\",\n label=\"Schema (slug)\",\n )\n\n def _local_context_data(self, queryset, name, value):\n return queryset.exclude(local_context_data__isnull=value)\n\n\nclass NoteFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"user_name\": \"icontains\",\n \"note\": \"icontains\",\n \"assigned_object_id\": \"exact\",\n },\n )\n assigned_object_type = ContentTypeFilter()\n user = NaturalKeyOrPKMultipleChoiceFilter(\n to_field_name=\"username\",\n queryset=get_user_model().objects.all(),\n label=\"User (username or ID)\",\n )\n\n class Meta:\n model = Note\n fields = [\n \"id\",\n \"user\",\n \"user_name\",\n \"assigned_object_type_id\",\n \"assigned_object_id\",\n \"note\",\n ]\n\n\nclass ObjectChangeFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"user_name\": \"icontains\",\n \"object_repr\": \"icontains\",\n },\n )\n changed_object_type = ContentTypeFilter()\n user_id = django_filters.ModelMultipleChoiceFilter(\n queryset=get_user_model().objects.all(),\n label=\"User (ID)\",\n )\n user = django_filters.ModelMultipleChoiceFilter(\n field_name=\"user__username\",\n queryset=get_user_model().objects.all(),\n to_field_name=\"username\",\n label=\"User name\",\n )\n\n class Meta:\n model = ObjectChange\n fields = [\n \"id\",\n \"user\",\n \"user_name\",\n \"request_id\",\n \"action\",\n \"changed_object_type_id\",\n \"changed_object_id\",\n \"object_repr\",\n \"time\",\n ]\n\n\n#\n# Relationships\n#\n\n\nclass RelationshipFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"description\": \"icontains\",\n }\n )\n\n source_type = ContentTypeMultipleChoiceFilter(choices=FeatureQuery(\"relationships\").get_choices, conjoined=False)\n destination_type = ContentTypeMultipleChoiceFilter(\n choices=FeatureQuery(\"relationships\").get_choices, conjoined=False\n )\n\n class Meta:\n model = Relationship\n fields = [\"id\", \"name\", \"slug\", \"type\", \"source_type\", \"destination_type\"]\n\n\nclass RelationshipAssociationFilterSet(BaseFilterSet):\n\n relationship = django_filters.ModelMultipleChoiceFilter(\n field_name=\"relationship__slug\",\n queryset=Relationship.objects.all(),\n to_field_name=\"slug\",\n label=\"Relationship (slug)\",\n )\n source_type = ContentTypeMultipleChoiceFilter(choices=FeatureQuery(\"relationships\").get_choices, conjoined=False)\n destination_type = ContentTypeMultipleChoiceFilter(\n choices=FeatureQuery(\"relationships\").get_choices, conjoined=False\n )\n peer_id = MultiValueUUIDFilter(method=\"peer_id_filter\")\n\n class Meta:\n model = RelationshipAssociation\n fields = [\"id\", \"relationship\", \"source_type\", \"source_id\", \"destination_type\", \"destination_id\", \"peer_id\"]\n\n def peer_id_filter(self, queryset, name, value):\n # Filter down to symmetric relationships only.\n queryset = queryset.filter(\n relationship__type__in=[\n RelationshipTypeChoices.TYPE_ONE_TO_ONE_SYMMETRIC,\n RelationshipTypeChoices.TYPE_MANY_TO_MANY_SYMMETRIC,\n ]\n )\n # Then Filter based on peer_id.\n queryset = queryset.filter(source_id__in=value) | queryset.filter(destination_id__in=value)\n return queryset\n\n\n#\n# Secrets\n#\n\n\nclass SecretFilterSet(\n BaseFilterSet,\n CustomFieldModelFilterSet,\n CreatedUpdatedFilterSet,\n):\n \"\"\"Filterset for the Secret model.\"\"\"\n\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"slug\": \"icontains\",\n },\n )\n # TODO dynamic choices needed\n # provider = django_filters.MultipleChoiceFilter(choices=..., null_value=None)\n\n class Meta:\n model = Secret\n fields = (\"id\", \"name\", \"slug\", \"provider\", \"created\", \"last_updated\")\n\n\nclass SecretsGroupFilterSet(\n BaseFilterSet,\n CustomFieldModelFilterSet,\n CreatedUpdatedFilterSet,\n):\n \"\"\"Filterset for the SecretsGroup model.\"\"\"\n\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"slug\": \"icontains\",\n },\n )\n\n class Meta:\n model = SecretsGroup\n fields = (\"id\", \"name\", \"slug\", \"created\", \"last_updated\")\n\n\nclass SecretsGroupAssociationFilterSet(BaseFilterSet):\n \"\"\"Filterset for the SecretsGroupAssociation through model.\"\"\"\n\n group_id = django_filters.ModelMultipleChoiceFilter(\n queryset=SecretsGroup.objects.all(),\n label=\"Group (ID)\",\n )\n group = django_filters.ModelMultipleChoiceFilter(\n queryset=SecretsGroup.objects.all(),\n field_name=\"group__slug\",\n to_field_name=\"slug\",\n label=\"Group (slug)\",\n )\n secret_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Secret.objects.all(),\n label=\"Secret (ID)\",\n )\n secret = django_filters.ModelMultipleChoiceFilter(\n queryset=Secret.objects.all(),\n field_name=\"secret__slug\",\n to_field_name=\"slug\",\n label=\"Secret (slug)\",\n )\n access_type = django_filters.MultipleChoiceFilter(choices=SecretsGroupAccessTypeChoices)\n secret_type = django_filters.MultipleChoiceFilter(choices=SecretsGroupSecretTypeChoices)\n\n class Meta:\n model = SecretsGroupAssociation\n fields = (\"id\",)\n\n\n#\n# Statuses\n#\n\n\nclass StatusFilter(django_filters.ModelMultipleChoiceFilter):\n \"\"\"\n Filter field used for filtering Status fields.\n\n Explicitly sets `to_field_name='value'` and dynamically sets queryset to\n retrieve choices for the corresponding model & field name bound to the\n filterset.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n kwargs[\"to_field_name\"] = \"slug\"\n super().__init__(*args, **kwargs)\n\n def get_queryset(self, request):\n self.queryset = Status.objects.all()\n return super().get_queryset(request)\n\n def get_filter_predicate(self, value):\n \"\"\"Always use the field's name and the `to_field_name` attribute as predicate.\"\"\"\n # e.g. `status__slug`\n to_field_name = self.field.to_field_name\n name = f\"{self.field_name}__{to_field_name}\"\n # Sometimes the incoming value is an instance. This block of logic comes from the base\n # `get_filter_predicate()` and was added here to support this.\n try:\n return {name: getattr(value, to_field_name)}\n except (AttributeError, TypeError):\n return {name: value}\n\n\nclass StatusFilterSet(NautobotFilterSet):\n \"\"\"API filter for filtering custom status object fields.\"\"\"\n\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"slug\": \"icontains\",\n \"content_types__model\": \"icontains\",\n },\n )\n content_types = ContentTypeMultipleChoiceFilter(\n choices=FeatureQuery(\"statuses\").get_choices,\n )\n\n class Meta:\n model = Status\n fields = [\n \"id\",\n \"content_types\",\n \"color\",\n \"name\",\n \"slug\",\n \"created\",\n \"last_updated\",\n ]\n\n\nclass StatusModelFilterSetMixin(django_filters.FilterSet):\n \"\"\"\n Mixin to add a `status` filter field to a FilterSet.\n \"\"\"\n\n status = StatusFilter()\n\n\n#\n# Tags\n#\n\n\nclass TagFilterSet(NautobotFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"slug\": \"icontains\",\n \"content_types__model\": \"icontains\",\n },\n )\n content_types = ContentTypeMultipleChoiceFilter(\n choices=TaggableClassesQuery().get_choices,\n )\n\n class Meta:\n model = Tag\n fields = [\"id\", \"name\", \"slug\", \"color\", \"content_types\"]\n\n\n#\n# Webhooks\n#\n\n\nclass WebhookFilterSet(BaseFilterSet):\n q = SearchFilter(\n filter_predicates={\n \"name\": \"icontains\",\n \"payload_url\": \"icontains\",\n \"additional_headers\": \"icontains\",\n \"body_template\": \"icontains\",\n },\n )\n content_types = ContentTypeMultipleChoiceFilter(\n choices=FeatureQuery(\"webhooks\").get_choices,\n )\n\n class Meta:\n model = Webhook\n fields = [\n \"name\",\n \"payload_url\",\n \"enabled\",\n \"content_types\",\n \"type_create\",\n \"type_update\",\n \"type_delete\",\n ]\n", "path": "nautobot/extras/filters.py" } ]
diff --git a/changes/2684.fixed b/changes/2684.fixed new file mode 100644 index 00000000000..d4a31df7285 --- /dev/null +++ b/changes/2684.fixed @@ -0,0 +1 @@ +Fixed "The results could not be loaded" when filtering `ContentTypes` in the UI. diff --git a/nautobot/extras/filters.py b/nautobot/extras/filters.py index e500274f5c0..d0121f3b5ec 100644 --- a/nautobot/extras/filters.py +++ b/nautobot/extras/filters.py @@ -416,6 +416,13 @@ class Meta: class ContentTypeFilterSet(BaseFilterSet): + q = SearchFilter( + filter_predicates={ + "app_label": "icontains", + "model": "icontains", + }, + ) + class Meta: model = ContentType fields = ["id", "app_label", "model"] diff --git a/nautobot/extras/tests/test_filters.py b/nautobot/extras/tests/test_filters.py index d07fdfe3ba8..974ff9b7ebd 100644 --- a/nautobot/extras/tests/test_filters.py +++ b/nautobot/extras/tests/test_filters.py @@ -2,6 +2,7 @@ from django.contrib.auth import get_user_model from django.contrib.contenttypes.models import ContentType +from django.db.models import Q from nautobot.dcim.filters import DeviceFilterSet from nautobot.dcim.models import ( @@ -24,6 +25,7 @@ from nautobot.extras.filters import ( ComputedFieldFilterSet, ConfigContextFilterSet, + ContentTypeFilterSet, CustomLinkFilterSet, ExportTemplateFilterSet, GitRepositoryFilterSet, @@ -312,6 +314,28 @@ def test_search(self): self.assertEqual(self.filterset(params, self.queryset).qs.values_list("pk", flat=True)[0], value) +class ContentTypeFilterSetTestCase(FilterTestCases.FilterTestCase): + queryset = ContentType.objects.order_by("app_label", "model") + filterset = ContentTypeFilterSet + + def test_app_label(self): + params = {"app_label": ["dcim"]} + self.assertQuerysetEqual(self.filterset(params, self.queryset).qs, self.queryset.filter(app_label="dcim")) + + def test_model(self): + params = {"model": ["device", "virtualmachine"]} + self.assertQuerysetEqual( + self.filterset(params, self.queryset).qs, self.queryset.filter(model__in=["device", "virtualmachine"]) + ) + + def test_search(self): + params = {"q": "circ"} + self.assertQuerysetEqual( + self.filterset(params, self.queryset).qs, + self.queryset.filter(Q(app_label__icontains="circ") | Q(model__icontains="circ")), + ) + + class CustomLinkTestCase(FilterTestCases.FilterTestCase): queryset = CustomLink.objects.all() filterset = CustomLinkFilterSet
wagtail__wagtail-11430
Issue: Usage of an instead of a. Usage of an instead of a in client/src/includes/tabs.js: * Set url to have tab an tab hash at the end. and in many more location ![Screenshot 2024-01-08 203606](https://github.com/wagtail/wagtail/assets/108203322/118165fa-960f-43c6-947c-e8ffa32e7357) I have already changed the simple an to a in most of them, I hope it helps. Issue: Usage of an instead of a. Usage of an instead of a in client/src/includes/tabs.js: * Set url to have tab an tab hash at the end. and in many more location ![Screenshot 2024-01-08 203606](https://github.com/wagtail/wagtail/assets/108203322/118165fa-960f-43c6-947c-e8ffa32e7357) I have already changed the simple an to a in most of them, I hope it helps.
[ { "content": "import uuid\nfrom typing import Dict\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core import checks\nfrom django.db import migrations, models, transaction\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver\nfrom django.utils import translation\nfrom django.utils.encoding import force_str\nfrom modelcluster.fields import ParentalKey\n\nfrom wagtail.actions.copy_for_translation import CopyForTranslationAction\nfrom wagtail.coreutils import (\n get_content_languages,\n get_supported_content_language_variant,\n)\nfrom wagtail.signals import pre_validate_delete\n\n\ndef pk(obj):\n if isinstance(obj, models.Model):\n return obj.pk\n else:\n return obj\n\n\nclass LocaleManager(models.Manager):\n def get_for_language(self, language_code):\n \"\"\"\n Gets a Locale from a language code.\n \"\"\"\n return self.get(\n language_code=get_supported_content_language_variant(language_code)\n )\n\n\nclass Locale(models.Model):\n #: The language code that represents this locale\n #:\n #: The language code can either be a language code on its own (such as ``en``, ``fr``),\n #: or it can include a region code (such as ``en-gb``, ``fr-fr``).\n language_code = models.CharField(max_length=100, unique=True)\n\n # Objects excludes any Locales that have been removed from LANGUAGES, This effectively disables them\n # The Locale management UI needs to be able to see these so we provide a separate manager `all_objects`\n objects = LocaleManager()\n all_objects = models.Manager()\n\n class Meta:\n ordering = [\n \"language_code\",\n ]\n\n @classmethod\n def get_default(cls):\n \"\"\"\n Returns the default Locale based on the site's LANGUAGE_CODE setting\n \"\"\"\n return cls.objects.get_for_language(settings.LANGUAGE_CODE)\n\n @classmethod\n def get_active(cls):\n \"\"\"\n Returns the Locale that corresponds to the currently activated language in Django.\n \"\"\"\n try:\n return cls.objects.get_for_language(translation.get_language())\n except (cls.DoesNotExist, LookupError):\n return cls.get_default()\n\n @transaction.atomic\n def delete(self, *args, **kwargs):\n # Provide a signal like pre_delete, but sent before on_delete validation.\n # This allows us to use the signal to fix up references to the locale to be deleted\n # that would otherwise fail validation.\n # Workaround for https://code.djangoproject.com/ticket/6870\n pre_validate_delete.send(sender=Locale, instance=self)\n return super().delete(*args, **kwargs)\n\n def language_code_is_valid(self):\n return self.language_code in get_content_languages()\n\n def get_display_name(self) -> str:\n try:\n return get_content_languages()[self.language_code]\n except KeyError:\n pass\n try:\n return self.language_name\n except KeyError:\n pass\n\n return self.language_code\n\n def __str__(self):\n return force_str(self.get_display_name())\n\n def _get_language_info(self) -> Dict[str, str]:\n return translation.get_language_info(self.language_code)\n\n @property\n def language_info(self):\n return translation.get_language_info(self.language_code)\n\n @property\n def language_name(self):\n \"\"\"\n Uses data from ``django.conf.locale`` to return the language name in\n English. For example, if the object's ``language_code`` were ``\"fr\"``,\n the return value would be ``\"French\"``.\n\n Raises ``KeyError`` if ``django.conf.locale`` has no information\n for the object's ``language_code`` value.\n \"\"\"\n return self.language_info[\"name\"]\n\n @property\n def language_name_local(self):\n \"\"\"\n Uses data from ``django.conf.locale`` to return the language name in\n the language itself. For example, if the ``language_code`` were\n ``\"fr\"`` (French), the return value would be ``\"français\"``.\n\n Raises ``KeyError`` if ``django.conf.locale`` has no information\n for the object's ``language_code`` value.\n \"\"\"\n return self.language_info[\"name_local\"]\n\n @property\n def language_name_localized(self):\n \"\"\"\n Uses data from ``django.conf.locale`` to return the language name in\n the currently active language. For example, if ``language_code`` were\n ``\"fr\"`` (French), and the active language were ``\"da\"`` (Danish), the\n return value would be ``\"Fransk\"``.\n\n Raises ``KeyError`` if ``django.conf.locale`` has no information\n for the object's ``language_code`` value.\n\n \"\"\"\n return translation.gettext(self.language_name)\n\n @property\n def is_bidi(self) -> bool:\n \"\"\"\n Returns a boolean indicating whether the language is bi-directional.\n \"\"\"\n return self.language_code in settings.LANGUAGES_BIDI\n\n @property\n def is_default(self) -> bool:\n \"\"\"\n Returns a boolean indicating whether this object is the default locale.\n \"\"\"\n try:\n return self.language_code == get_supported_content_language_variant(\n settings.LANGUAGE_CODE\n )\n except LookupError:\n return False\n\n @property\n def is_active(self) -> bool:\n \"\"\"\n Returns a boolean indicating whether this object is the currently active locale.\n \"\"\"\n try:\n return self.language_code == get_supported_content_language_variant(\n translation.get_language()\n )\n except LookupError:\n return self.is_default\n\n\nclass TranslatableMixin(models.Model):\n translation_key = models.UUIDField(default=uuid.uuid4, editable=False)\n locale = models.ForeignKey(\n Locale, on_delete=models.PROTECT, related_name=\"+\", editable=False\n )\n locale.wagtail_reference_index_ignore = True\n\n class Meta:\n abstract = True\n unique_together = [(\"translation_key\", \"locale\")]\n\n @classmethod\n def check(cls, **kwargs):\n errors = super().check(**kwargs)\n # No need to check on multi-table-inheritance children as it only needs to be applied to\n # the table that has the translation_key/locale fields\n is_translation_model = cls.get_translation_model() is cls\n if not is_translation_model:\n return errors\n\n unique_constraint_fields = (\"translation_key\", \"locale\")\n\n has_unique_constraint = any(\n isinstance(constraint, models.UniqueConstraint)\n and set(constraint.fields) == set(unique_constraint_fields)\n for constraint in cls._meta.constraints\n )\n\n has_unique_together = unique_constraint_fields in cls._meta.unique_together\n\n # Raise error if subclass has removed constraints\n if not (has_unique_constraint or has_unique_together):\n errors.append(\n checks.Error(\n \"%s is missing a UniqueConstraint for the fields: %s.\"\n % (cls._meta.label, unique_constraint_fields),\n hint=(\n \"Add models.UniqueConstraint(fields=%s, \"\n \"name='unique_translation_key_locale_%s_%s') to %s.Meta.constraints.\"\n % (\n unique_constraint_fields,\n cls._meta.app_label,\n cls._meta.model_name,\n cls.__name__,\n )\n ),\n obj=cls,\n id=\"wagtailcore.E003\",\n )\n )\n\n # Raise error if subclass has both UniqueConstraint and unique_together\n if has_unique_constraint and has_unique_together:\n errors.append(\n checks.Error(\n \"%s should not have both UniqueConstraint and unique_together for: %s.\"\n % (cls._meta.label, unique_constraint_fields),\n hint=\"Remove unique_together in favor of UniqueConstraint.\",\n obj=cls,\n id=\"wagtailcore.E003\",\n )\n )\n\n return errors\n\n @property\n def localized(self):\n \"\"\"\n Finds the translation in the current active language.\n\n If there is no translation in the active language, self is returned.\n\n Note: This will not return the translation if it is in draft.\n If you want to include drafts, use the ``.localized_draft`` attribute instead.\n \"\"\"\n from wagtail.models import DraftStateMixin\n\n localized = self.localized_draft\n if isinstance(self, DraftStateMixin) and not localized.live:\n return self\n\n return localized\n\n @property\n def localized_draft(self):\n \"\"\"\n Finds the translation in the current active language.\n\n If there is no translation in the active language, self is returned.\n\n Note: This will return translations that are in draft. If you want to exclude\n these, use the ``.localized`` attribute.\n \"\"\"\n if not getattr(settings, \"WAGTAIL_I18N_ENABLED\", False):\n return self\n\n try:\n locale = Locale.get_active()\n except (LookupError, Locale.DoesNotExist):\n return self\n\n if locale.id == self.locale_id:\n return self\n\n return self.get_translation_or_none(locale) or self\n\n def get_translations(self, inclusive=False):\n \"\"\"\n Returns a queryset containing the translations of this instance.\n \"\"\"\n translations = self.__class__.objects.filter(\n translation_key=self.translation_key\n )\n\n if inclusive is False:\n translations = translations.exclude(id=self.id)\n\n return translations\n\n def get_translation(self, locale):\n \"\"\"\n Finds the translation in the specified locale.\n\n If there is no translation in that locale, this raises a ``model.DoesNotExist`` exception.\n \"\"\"\n return self.get_translations(inclusive=True).get(locale_id=pk(locale))\n\n def get_translation_or_none(self, locale):\n \"\"\"\n Finds the translation in the specified locale.\n\n If there is no translation in that locale, this returns None.\n \"\"\"\n try:\n return self.get_translation(locale)\n except self.__class__.DoesNotExist:\n return None\n\n def has_translation(self, locale):\n \"\"\"\n Returns True if a translation exists in the specified locale.\n \"\"\"\n return (\n self.get_translations(inclusive=True).filter(locale_id=pk(locale)).exists()\n )\n\n def copy_for_translation(self, locale, exclude_fields=None):\n \"\"\"\n Creates a copy of this instance with the specified locale.\n\n Note that the copy is initially unsaved.\n \"\"\"\n return CopyForTranslationAction(\n self,\n locale,\n exclude_fields=exclude_fields,\n ).execute()\n\n def get_default_locale(self):\n \"\"\"\n Finds the default locale to use for this object.\n\n This will be called just before the initial save.\n \"\"\"\n # Check if the object has any parental keys to another translatable model\n # If so, take the locale from the object referenced in that parental key\n parental_keys = [\n field\n for field in self._meta.get_fields()\n if isinstance(field, ParentalKey)\n and issubclass(field.related_model, TranslatableMixin)\n ]\n\n if parental_keys:\n parent_id = parental_keys[0].value_from_object(self)\n return (\n parental_keys[0]\n .related_model.objects.defer()\n .select_related(\"locale\")\n .get(id=parent_id)\n .locale\n )\n\n return Locale.get_default()\n\n @classmethod\n def get_translation_model(cls):\n \"\"\"\n Returns this model's \"Translation model\".\n\n The \"Translation model\" is the model that has the ``locale`` and\n ``translation_key`` fields.\n Typically this would be the current model, but it may be a\n super-class if multi-table inheritance is in use (as is the case\n for ``wagtailcore.Page``).\n \"\"\"\n return cls._meta.get_field(\"locale\").model\n\n\ndef bootstrap_translatable_model(model, locale):\n \"\"\"\n This function populates the \"translation_key\", and \"locale\" fields on model instances that were created\n before wagtail-localize was added to the site.\n\n This can be called from a data migration, or instead you could use the \"boostrap_translatable_models\"\n management command.\n \"\"\"\n for instance in (\n model.objects.filter(translation_key__isnull=True).defer().iterator()\n ):\n instance.translation_key = uuid.uuid4()\n instance.locale = locale\n instance.save(update_fields=[\"translation_key\", \"locale\"])\n\n\nclass BootstrapTranslatableModel(migrations.RunPython):\n def __init__(self, model_string, language_code=None):\n if language_code is None:\n language_code = get_supported_content_language_variant(\n settings.LANGUAGE_CODE\n )\n\n def forwards(apps, schema_editor):\n model = apps.get_model(model_string)\n Locale = apps.get_model(\"wagtailcore.Locale\")\n\n locale = Locale.objects.get(language_code=language_code)\n bootstrap_translatable_model(model, locale)\n\n def backwards(apps, schema_editor):\n pass\n\n super().__init__(forwards, backwards)\n\n\nclass BootstrapTranslatableMixin(TranslatableMixin):\n \"\"\"\n A version of TranslatableMixin without uniqueness constraints.\n\n This is to make it easy to transition existing models to being translatable.\n\n The process is as follows:\n - Add BootstrapTranslatableMixin to the model\n - Run makemigrations\n - Create a data migration for each app, then use the BootstrapTranslatableModel operation in\n wagtail.models on each model in that app\n - Change BootstrapTranslatableMixin to TranslatableMixin\n - Run makemigrations again\n - Migrate!\n \"\"\"\n\n translation_key = models.UUIDField(null=True, editable=False)\n locale = models.ForeignKey(\n Locale, on_delete=models.PROTECT, null=True, related_name=\"+\", editable=False\n )\n\n @classmethod\n def check(cls, **kwargs):\n # skip the check in TranslatableMixin that enforces the unique-together constraint\n return super(TranslatableMixin, cls).check(**kwargs)\n\n class Meta:\n abstract = True\n\n\ndef get_translatable_models(include_subclasses=False):\n \"\"\"\n Returns a list of all concrete models that inherit from TranslatableMixin.\n By default, this only includes models that are direct children of TranslatableMixin,\n to get all models, set the include_subclasses attribute to True.\n \"\"\"\n translatable_models = [\n model\n for model in apps.get_models()\n if issubclass(model, TranslatableMixin) and not model._meta.abstract\n ]\n\n if include_subclasses is False:\n # Exclude models that inherit from another translatable model\n root_translatable_models = set()\n\n for model in translatable_models:\n root_translatable_models.add(model.get_translation_model())\n\n translatable_models = [\n model for model in translatable_models if model in root_translatable_models\n ]\n\n return translatable_models\n\n\n@receiver(pre_save)\ndef set_locale_on_new_instance(sender, instance, **kwargs):\n if not isinstance(instance, TranslatableMixin):\n return\n\n if instance.locale_id is not None:\n return\n\n # If this is a fixture load, use the global default Locale\n # as the page tree is probably in an flux\n if kwargs[\"raw\"]:\n instance.locale = Locale.get_default()\n return\n\n instance.locale = instance.get_default_locale()\n", "path": "wagtail/models/i18n.py" } ]
[ { "content": "import uuid\nfrom typing import Dict\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core import checks\nfrom django.db import migrations, models, transaction\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver\nfrom django.utils import translation\nfrom django.utils.encoding import force_str\nfrom modelcluster.fields import ParentalKey\n\nfrom wagtail.actions.copy_for_translation import CopyForTranslationAction\nfrom wagtail.coreutils import (\n get_content_languages,\n get_supported_content_language_variant,\n)\nfrom wagtail.signals import pre_validate_delete\n\n\ndef pk(obj):\n if isinstance(obj, models.Model):\n return obj.pk\n else:\n return obj\n\n\nclass LocaleManager(models.Manager):\n def get_for_language(self, language_code):\n \"\"\"\n Gets a Locale from a language code.\n \"\"\"\n return self.get(\n language_code=get_supported_content_language_variant(language_code)\n )\n\n\nclass Locale(models.Model):\n #: The language code that represents this locale\n #:\n #: The language code can either be a language code on its own (such as ``en``, ``fr``),\n #: or it can include a region code (such as ``en-gb``, ``fr-fr``).\n language_code = models.CharField(max_length=100, unique=True)\n\n # Objects excludes any Locales that have been removed from LANGUAGES, This effectively disables them\n # The Locale management UI needs to be able to see these so we provide a separate manager `all_objects`\n objects = LocaleManager()\n all_objects = models.Manager()\n\n class Meta:\n ordering = [\n \"language_code\",\n ]\n\n @classmethod\n def get_default(cls):\n \"\"\"\n Returns the default Locale based on the site's LANGUAGE_CODE setting\n \"\"\"\n return cls.objects.get_for_language(settings.LANGUAGE_CODE)\n\n @classmethod\n def get_active(cls):\n \"\"\"\n Returns the Locale that corresponds to the currently activated language in Django.\n \"\"\"\n try:\n return cls.objects.get_for_language(translation.get_language())\n except (cls.DoesNotExist, LookupError):\n return cls.get_default()\n\n @transaction.atomic\n def delete(self, *args, **kwargs):\n # Provide a signal like pre_delete, but sent before on_delete validation.\n # This allows us to use the signal to fix up references to the locale to be deleted\n # that would otherwise fail validation.\n # Workaround for https://code.djangoproject.com/ticket/6870\n pre_validate_delete.send(sender=Locale, instance=self)\n return super().delete(*args, **kwargs)\n\n def language_code_is_valid(self):\n return self.language_code in get_content_languages()\n\n def get_display_name(self) -> str:\n try:\n return get_content_languages()[self.language_code]\n except KeyError:\n pass\n try:\n return self.language_name\n except KeyError:\n pass\n\n return self.language_code\n\n def __str__(self):\n return force_str(self.get_display_name())\n\n def _get_language_info(self) -> Dict[str, str]:\n return translation.get_language_info(self.language_code)\n\n @property\n def language_info(self):\n return translation.get_language_info(self.language_code)\n\n @property\n def language_name(self):\n \"\"\"\n Uses data from ``django.conf.locale`` to return the language name in\n English. For example, if the object's ``language_code`` were ``\"fr\"``,\n the return value would be ``\"French\"``.\n\n Raises ``KeyError`` if ``django.conf.locale`` has no information\n for the object's ``language_code`` value.\n \"\"\"\n return self.language_info[\"name\"]\n\n @property\n def language_name_local(self):\n \"\"\"\n Uses data from ``django.conf.locale`` to return the language name in\n the language itself. For example, if the ``language_code`` were\n ``\"fr\"`` (French), the return value would be ``\"français\"``.\n\n Raises ``KeyError`` if ``django.conf.locale`` has no information\n for the object's ``language_code`` value.\n \"\"\"\n return self.language_info[\"name_local\"]\n\n @property\n def language_name_localized(self):\n \"\"\"\n Uses data from ``django.conf.locale`` to return the language name in\n the currently active language. For example, if ``language_code`` were\n ``\"fr\"`` (French), and the active language were ``\"da\"`` (Danish), the\n return value would be ``\"Fransk\"``.\n\n Raises ``KeyError`` if ``django.conf.locale`` has no information\n for the object's ``language_code`` value.\n\n \"\"\"\n return translation.gettext(self.language_name)\n\n @property\n def is_bidi(self) -> bool:\n \"\"\"\n Returns a boolean indicating whether the language is bi-directional.\n \"\"\"\n return self.language_code in settings.LANGUAGES_BIDI\n\n @property\n def is_default(self) -> bool:\n \"\"\"\n Returns a boolean indicating whether this object is the default locale.\n \"\"\"\n try:\n return self.language_code == get_supported_content_language_variant(\n settings.LANGUAGE_CODE\n )\n except LookupError:\n return False\n\n @property\n def is_active(self) -> bool:\n \"\"\"\n Returns a boolean indicating whether this object is the currently active locale.\n \"\"\"\n try:\n return self.language_code == get_supported_content_language_variant(\n translation.get_language()\n )\n except LookupError:\n return self.is_default\n\n\nclass TranslatableMixin(models.Model):\n translation_key = models.UUIDField(default=uuid.uuid4, editable=False)\n locale = models.ForeignKey(\n Locale, on_delete=models.PROTECT, related_name=\"+\", editable=False\n )\n locale.wagtail_reference_index_ignore = True\n\n class Meta:\n abstract = True\n unique_together = [(\"translation_key\", \"locale\")]\n\n @classmethod\n def check(cls, **kwargs):\n errors = super().check(**kwargs)\n # No need to check on multi-table-inheritance children as it only needs to be applied to\n # the table that has the translation_key/locale fields\n is_translation_model = cls.get_translation_model() is cls\n if not is_translation_model:\n return errors\n\n unique_constraint_fields = (\"translation_key\", \"locale\")\n\n has_unique_constraint = any(\n isinstance(constraint, models.UniqueConstraint)\n and set(constraint.fields) == set(unique_constraint_fields)\n for constraint in cls._meta.constraints\n )\n\n has_unique_together = unique_constraint_fields in cls._meta.unique_together\n\n # Raise error if subclass has removed constraints\n if not (has_unique_constraint or has_unique_together):\n errors.append(\n checks.Error(\n \"%s is missing a UniqueConstraint for the fields: %s.\"\n % (cls._meta.label, unique_constraint_fields),\n hint=(\n \"Add models.UniqueConstraint(fields=%s, \"\n \"name='unique_translation_key_locale_%s_%s') to %s.Meta.constraints.\"\n % (\n unique_constraint_fields,\n cls._meta.app_label,\n cls._meta.model_name,\n cls.__name__,\n )\n ),\n obj=cls,\n id=\"wagtailcore.E003\",\n )\n )\n\n # Raise error if subclass has both UniqueConstraint and unique_together\n if has_unique_constraint and has_unique_together:\n errors.append(\n checks.Error(\n \"%s should not have both UniqueConstraint and unique_together for: %s.\"\n % (cls._meta.label, unique_constraint_fields),\n hint=\"Remove unique_together in favor of UniqueConstraint.\",\n obj=cls,\n id=\"wagtailcore.E003\",\n )\n )\n\n return errors\n\n @property\n def localized(self):\n \"\"\"\n Finds the translation in the current active language.\n\n If there is no translation in the active language, self is returned.\n\n Note: This will not return the translation if it is in draft.\n If you want to include drafts, use the ``.localized_draft`` attribute instead.\n \"\"\"\n from wagtail.models import DraftStateMixin\n\n localized = self.localized_draft\n if isinstance(self, DraftStateMixin) and not localized.live:\n return self\n\n return localized\n\n @property\n def localized_draft(self):\n \"\"\"\n Finds the translation in the current active language.\n\n If there is no translation in the active language, self is returned.\n\n Note: This will return translations that are in draft. If you want to exclude\n these, use the ``.localized`` attribute.\n \"\"\"\n if not getattr(settings, \"WAGTAIL_I18N_ENABLED\", False):\n return self\n\n try:\n locale = Locale.get_active()\n except (LookupError, Locale.DoesNotExist):\n return self\n\n if locale.id == self.locale_id:\n return self\n\n return self.get_translation_or_none(locale) or self\n\n def get_translations(self, inclusive=False):\n \"\"\"\n Returns a queryset containing the translations of this instance.\n \"\"\"\n translations = self.__class__.objects.filter(\n translation_key=self.translation_key\n )\n\n if inclusive is False:\n translations = translations.exclude(id=self.id)\n\n return translations\n\n def get_translation(self, locale):\n \"\"\"\n Finds the translation in the specified locale.\n\n If there is no translation in that locale, this raises a ``model.DoesNotExist`` exception.\n \"\"\"\n return self.get_translations(inclusive=True).get(locale_id=pk(locale))\n\n def get_translation_or_none(self, locale):\n \"\"\"\n Finds the translation in the specified locale.\n\n If there is no translation in that locale, this returns None.\n \"\"\"\n try:\n return self.get_translation(locale)\n except self.__class__.DoesNotExist:\n return None\n\n def has_translation(self, locale):\n \"\"\"\n Returns True if a translation exists in the specified locale.\n \"\"\"\n return (\n self.get_translations(inclusive=True).filter(locale_id=pk(locale)).exists()\n )\n\n def copy_for_translation(self, locale, exclude_fields=None):\n \"\"\"\n Creates a copy of this instance with the specified locale.\n\n Note that the copy is initially unsaved.\n \"\"\"\n return CopyForTranslationAction(\n self,\n locale,\n exclude_fields=exclude_fields,\n ).execute()\n\n def get_default_locale(self):\n \"\"\"\n Finds the default locale to use for this object.\n\n This will be called just before the initial save.\n \"\"\"\n # Check if the object has any parental keys to another translatable model\n # If so, take the locale from the object referenced in that parental key\n parental_keys = [\n field\n for field in self._meta.get_fields()\n if isinstance(field, ParentalKey)\n and issubclass(field.related_model, TranslatableMixin)\n ]\n\n if parental_keys:\n parent_id = parental_keys[0].value_from_object(self)\n return (\n parental_keys[0]\n .related_model.objects.defer()\n .select_related(\"locale\")\n .get(id=parent_id)\n .locale\n )\n\n return Locale.get_default()\n\n @classmethod\n def get_translation_model(cls):\n \"\"\"\n Returns this model's \"Translation model\".\n\n The \"Translation model\" is the model that has the ``locale`` and\n ``translation_key`` fields.\n Typically this would be the current model, but it may be a\n super-class if multi-table inheritance is in use (as is the case\n for ``wagtailcore.Page``).\n \"\"\"\n return cls._meta.get_field(\"locale\").model\n\n\ndef bootstrap_translatable_model(model, locale):\n \"\"\"\n This function populates the \"translation_key\", and \"locale\" fields on model instances that were created\n before wagtail-localize was added to the site.\n\n This can be called from a data migration, or instead you could use the \"boostrap_translatable_models\"\n management command.\n \"\"\"\n for instance in (\n model.objects.filter(translation_key__isnull=True).defer().iterator()\n ):\n instance.translation_key = uuid.uuid4()\n instance.locale = locale\n instance.save(update_fields=[\"translation_key\", \"locale\"])\n\n\nclass BootstrapTranslatableModel(migrations.RunPython):\n def __init__(self, model_string, language_code=None):\n if language_code is None:\n language_code = get_supported_content_language_variant(\n settings.LANGUAGE_CODE\n )\n\n def forwards(apps, schema_editor):\n model = apps.get_model(model_string)\n Locale = apps.get_model(\"wagtailcore.Locale\")\n\n locale = Locale.objects.get(language_code=language_code)\n bootstrap_translatable_model(model, locale)\n\n def backwards(apps, schema_editor):\n pass\n\n super().__init__(forwards, backwards)\n\n\nclass BootstrapTranslatableMixin(TranslatableMixin):\n \"\"\"\n A version of TranslatableMixin without uniqueness constraints.\n\n This is to make it easy to transition existing models to being translatable.\n\n The process is as follows:\n - Add BootstrapTranslatableMixin to the model\n - Run makemigrations\n - Create a data migration for each app, then use the BootstrapTranslatableModel operation in\n wagtail.models on each model in that app\n - Change BootstrapTranslatableMixin to TranslatableMixin\n - Run makemigrations again\n - Migrate!\n \"\"\"\n\n translation_key = models.UUIDField(null=True, editable=False)\n locale = models.ForeignKey(\n Locale, on_delete=models.PROTECT, null=True, related_name=\"+\", editable=False\n )\n\n @classmethod\n def check(cls, **kwargs):\n # skip the check in TranslatableMixin that enforces the unique-together constraint\n return super(TranslatableMixin, cls).check(**kwargs)\n\n class Meta:\n abstract = True\n\n\ndef get_translatable_models(include_subclasses=False):\n \"\"\"\n Returns a list of all concrete models that inherit from TranslatableMixin.\n By default, this only includes models that are direct children of TranslatableMixin,\n to get all models, set the include_subclasses attribute to True.\n \"\"\"\n translatable_models = [\n model\n for model in apps.get_models()\n if issubclass(model, TranslatableMixin) and not model._meta.abstract\n ]\n\n if include_subclasses is False:\n # Exclude models that inherit from another translatable model\n root_translatable_models = set()\n\n for model in translatable_models:\n root_translatable_models.add(model.get_translation_model())\n\n translatable_models = [\n model for model in translatable_models if model in root_translatable_models\n ]\n\n return translatable_models\n\n\n@receiver(pre_save)\ndef set_locale_on_new_instance(sender, instance, **kwargs):\n if not isinstance(instance, TranslatableMixin):\n return\n\n if instance.locale_id is not None:\n return\n\n # If this is a fixture load, use the global default Locale\n # as the page tree is probably in flux\n if kwargs[\"raw\"]:\n instance.locale = Locale.get_default()\n return\n\n instance.locale = instance.get_default_locale()\n", "path": "wagtail/models/i18n.py" } ]
diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 811caf992cf0..ccb59504ba97 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -780,6 +780,7 @@ * Nikhil S Kalburgi * Salvo Polizzi * Badr Fourane +* Vaishnav Dasari ## Translators diff --git a/client/src/includes/tabs.js b/client/src/includes/tabs.js index 3281acf32b62..f8db67d28083 100644 --- a/client/src/includes/tabs.js +++ b/client/src/includes/tabs.js @@ -294,7 +294,7 @@ class Tabs { } /** - * Set url to have tab an tab hash at the end + * Set url to have a tab hash at the end */ setURLHash(tabId) { if ( diff --git a/wagtail/admin/tests/pages/test_bulk_actions/test_bulk_publish.py b/wagtail/admin/tests/pages/test_bulk_actions/test_bulk_publish.py index 86d2d019dcee..21a0d235d845 100644 --- a/wagtail/admin/tests/pages/test_bulk_actions/test_bulk_publish.py +++ b/wagtail/admin/tests/pages/test_bulk_actions/test_bulk_publish.py @@ -71,12 +71,12 @@ def setUp(self): def test_publish_view(self): """ - This tests that the publish view responds with an publish confirm page + This tests that the publish view responds with a publish confirm page """ # Request confirm publish page response = self.client.get(self.url) - # # Check that the user received an publish confirm page + # # Check that the user received a publish confirm page self.assertEqual(response.status_code, 200) self.assertTemplateUsed( response, "wagtailadmin/pages/bulk_actions/confirm_bulk_publish.html" @@ -206,12 +206,12 @@ def hook_func(request, action_type, pages, action_class_instance): def test_publish_descendants_view(self): """ - This tests that the publish view responds with an publish confirm page that does not contain the form field 'include_descendants' + This tests that the publish view responds with a publish confirm page that does not contain the form field 'include_descendants' """ # Get publish page for page with no descendants response = self.client.get(self.url) - # Check that the user received an publish confirm page + # Check that the user received a publish confirm page self.assertEqual(response.status_code, 200) self.assertTemplateUsed( response, "wagtailadmin/pages/bulk_actions/confirm_bulk_publish.html" @@ -315,12 +315,12 @@ def setUp(self): def test_publish_descendants_view(self): """ - This tests that the publish view responds with an publish confirm page that contains the form field 'include_descendants' + This tests that the publish view responds with a publish confirm page that contains the form field 'include_descendants' """ # Get publish page response = self.client.get(self.url) - # Check that the user received an publish confirm page + # Check that the user received a publish confirm page self.assertEqual(response.status_code, 200) self.assertTemplateUsed( response, "wagtailadmin/pages/bulk_actions/confirm_bulk_publish.html" diff --git a/wagtail/admin/tests/test_collections_views.py b/wagtail/admin/tests/test_collections_views.py index b0ef5ed7bc86..223e0d1f1e1f 100644 --- a/wagtail/admin/tests/test_collections_views.py +++ b/wagtail/admin/tests/test_collections_views.py @@ -535,7 +535,7 @@ def test_page_shows_delete_link_only_if_delete_permitted(self): # Retrieve edit form and check fields response = self.get(collection_id=self.marketing_sub_collection.id) self.assertNotContains(response, "Delete collection") - # Add delete permission to parent collection an try again + # Add delete permission to parent collection and try again GroupCollectionPermission.objects.create( group=self.marketing_group, collection=self.marketing_collection, diff --git a/wagtail/contrib/search_promotions/tests.py b/wagtail/contrib/search_promotions/tests.py index 0af4ff2cbc9d..f0c4ad601d49 100644 --- a/wagtail/contrib/search_promotions/tests.py +++ b/wagtail/contrib/search_promotions/tests.py @@ -483,7 +483,7 @@ class TestSearchPromotionsEditView(WagtailTestUtils, TestCase): def setUp(self): self.user = self.login() - # Create an search pick to edit + # Create a search pick to edit self.query = Query.get("Hello") self.search_pick = self.query.editors_picks.create( page_id=1, sort_order=0, description="Root page" @@ -645,7 +645,7 @@ class TestSearchPromotionsDeleteView(WagtailTestUtils, TestCase): def setUp(self): self.login() - # Create an search pick to delete + # Create a search pick to delete self.query = Query.get("Hello") self.search_pick = self.query.editors_picks.create( page_id=1, description="Root page" diff --git a/wagtail/models/i18n.py b/wagtail/models/i18n.py index 474e9732f02c..68dcd596bcf0 100644 --- a/wagtail/models/i18n.py +++ b/wagtail/models/i18n.py @@ -474,7 +474,7 @@ def set_locale_on_new_instance(sender, instance, **kwargs): return # If this is a fixture load, use the global default Locale - # as the page tree is probably in an flux + # as the page tree is probably in flux if kwargs["raw"]: instance.locale = Locale.get_default() return
ivy-llc__ivy-13655
Add torch.Tensor.mul
[ { "content": "# global\n\n# local\nimport ivy\nimport ivy.functional.frontends.torch as torch_frontend\nimport ivy.functional.frontends.torch.nn.functional as torch_frontend_nn\nfrom ivy.functional.frontends.numpy.creation_routines.from_existing_data import (\n array as np_frontend_array,\n)\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom ivy.func_wrapper import with_supported_dtypes\n\n\nclass Tensor:\n def __init__(self, array, device=None, _init_overload=False):\n\n if _init_overload:\n self._ivy_array = (\n ivy.array(array) if not isinstance(array, ivy.Array) else array\n )\n\n else:\n self._ivy_array = ivy.array(\n array, dtype=torch_frontend.float32, device=device\n )\n\n def __repr__(self):\n return str(self._ivy_array.__repr__()).replace(\n \"ivy.array\", \"ivy.frontends.torch.Tensor\"\n )\n\n # Properties #\n # ---------- #\n\n @property\n def ivy_array(self):\n return self._ivy_array\n\n @property\n def device(self):\n return ivy.dev(self._ivy_array)\n\n @property\n def dtype(self):\n return self._ivy_array.dtype\n\n @property\n def shape(self):\n return self._ivy_array.shape\n\n # Setters #\n # --------#\n\n @ivy_array.setter\n def ivy_array(self, array):\n self._ivy_array = (\n ivy.array(array) if not isinstance(array, ivy.Array) else array\n )\n\n # Instance Methods #\n # ---------------- #\n def reshape(self, *args, shape=None):\n if args and shape:\n raise TypeError(\"reshape() got multiple values for argument 'shape'\")\n if shape is not None:\n return torch_frontend.reshape(self._ivy_array, shape)\n if args:\n if isinstance(args[0], (tuple, list)):\n shape = args[0]\n return torch_frontend.reshape(self._ivy_array, shape)\n else:\n return torch_frontend.reshape(self._ivy_array, args)\n return torch_frontend.reshape(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def reshape_as(self, other):\n return torch_frontend.reshape(self, other.shape)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def add(self, other, *, alpha=1):\n return torch_frontend.add(self._ivy_array, other, alpha=alpha)\n\n def chunk(self, chunks, dim=0):\n return torch_frontend.chunk(self._ivy_array, chunks, dim=dim)\n\n def any(self, dim=None, keepdim=False, *, out=None):\n return torch_frontend.any(self._ivy_array, dim=dim, keepdim=keepdim, out=out)\n\n def all(self, dim=None, keepdim=False):\n return torch_frontend.all(self._ivy_array, dim=dim, keepdim=keepdim)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def add_(self, other, *, alpha=1):\n self._ivy_array = self.add(other, alpha=alpha).ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def asin(self):\n return torch_frontend.asin(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def asin_(self):\n self._ivy_array = self.asin().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def sum(self):\n return torch_frontend.sum(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def sin(self):\n return torch_frontend.sin(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def sin_(self):\n self._ivy_array = self.sin().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def sinh(self):\n return torch_frontend.sinh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def sinh_(self):\n self._ivy_array = self.sinh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cos(self):\n return torch_frontend.cos(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cos_(self):\n self._ivy_array = self.cos().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cosh(self):\n return torch_frontend.cosh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cosh_(self):\n self._ivy_array = self.cosh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arcsin(self):\n return torch_frontend.arcsin(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arcsin_(self):\n self._ivy_array = self.arcsin().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def atan(self):\n return torch_frontend.atan(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def atan_(self):\n self._ivy_array = self.atan().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def atan2(self, other):\n return torch_frontend.atan2(self._ivy_array, other)\n\n def view(self, *args, shape=None):\n \"\"\"\n Reshape Tensor.\n\n possible arguments are either:\n - size\n - tuple of ints\n - list of ints\n - torch.Size object\n - ints\n Parameters\n ----------\n args:int arguments\n shape: optional shape\n\n Returns reshaped tensor\n -------\n \"\"\"\n if shape and not args:\n shape_tup = shape\n elif args and not shape:\n if (\n isinstance(args[0], tuple)\n or isinstance(args[0], list)\n or type(args[0]).__name__ == \"Size\"\n ) and len(args) == 1:\n shape_tup = args[0]\n else:\n shape_tup = args\n else:\n raise ValueError(\n \"View only accepts as argument ints, tuple or list of ints or \"\n \"the keyword argument size.\"\n )\n return torch_frontend.reshape(self._ivy_array, shape_tup)\n\n def float(self, memory_format=None):\n self._ivy_array = ivy.astype(self.ivy_array, ivy.float32, copy=False)\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def asinh(self):\n return torch_frontend.asinh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def asinh_(self):\n self._ivy_array = self.asinh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def tan(self):\n return torch_frontend.tan(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def tan_(self):\n self._ivy_array = self.tan().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def tanh(self):\n return torch_frontend.tanh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def tanh_(self):\n self._ivy_array = self.tanh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def atanh(self):\n return torch_frontend.atanh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def atanh_(self):\n self._ivy_array = self.atanh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arctanh(self):\n return torch_frontend.arctanh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arctanh_(self):\n self._ivy_array = self.arctanh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def log(self):\n return torch_frontend.log(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def log_(self):\n self._ivy_array = self.log().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def log2(self):\n return torch_frontend.log2(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def relu(self):\n return torch_frontend_nn.relu(self._ivy_array)\n\n def amax(self, dim=None, keepdim=False):\n return torch_frontend.amax(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def amin(self, dim=None, keepdim=False):\n return torch_frontend.amin(self._ivy_array, dim=dim, keepdim=keepdim)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def aminmax(self, dim=None, keepdim=False):\n return torch_frontend.aminmax(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def abs(self):\n return torch_frontend.abs(self._ivy_array)\n\n def abs_(self):\n self._ivy_array = self.abs().ivy_array\n return self\n\n def logical_and(self, other):\n return torch_frontend.logical_and(self._ivy_array, other)\n\n def bitwise_not(self, *, out=None):\n return torch_frontend.bitwise_not(self._ivy_array)\n\n def bitwise_and(self, other):\n return torch_frontend.bitwise_and(self._ivy_array, other)\n\n def bitwise_or(self, other, *, out=None):\n return torch_frontend.bitwise_or(self._ivy_array, other)\n\n def bitwise_left_shift(self, other, *, out=None):\n return torch_frontend.bitwise_left_shift(self._ivy_array, other)\n\n @with_supported_dtypes({\"1.11.0 and below\": (\"integer\",)}, \"torch\")\n def bitwise_or_(self, other, *, out=None):\n self._ivy_array = self.bitwise_or(other, out=out).ivy_array\n return self\n\n def contiguous(self, memory_format=None):\n return torch_frontend.tensor(self.ivy_array)\n\n def new_ones(self, size, *, dtype=None, device=None, requires_grad=False):\n return torch_frontend.ones(\n size, dtype=dtype, device=device, requires_grad=requires_grad\n )\n\n def new_zeros(self, size, *, dtype=None, device=None, requires_grad=False):\n return torch_frontend.zeros(\n size, dtype=dtype, device=device, requires_grad=requires_grad\n )\n\n def to(self, *args, **kwargs):\n if len(args) > 0:\n if isinstance(args[0], (ivy.Dtype, ivy.NativeDtype)):\n if self.dtype == args[0]:\n return self\n else:\n cast_tensor = self.clone()\n cast_tensor.ivy_array = ivy.asarray(self._ivy_array, dtype=args[0])\n return cast_tensor\n if isinstance(args[0], (ivy.Device, ivy.NativeDevice, str)):\n if isinstance(args[0], str):\n ivy.utils.assertions.check_elem_in_list(\n args[0],\n [\n \"cpu\",\n \"cuda\",\n \"xpu\",\n \"mkldnn\",\n \"opengl\",\n \"opencl\",\n \"ideep\",\n \"hip\",\n \"ve\",\n \"ort\",\n \"mlc\",\n \"xla\",\n \"lazy\",\n \"vulkan\",\n \"meta\",\n \"hpu\",\n ],\n )\n if self.device == args[0]:\n return self\n else:\n cast_tensor = self.clone()\n cast_tensor.ivy_array = ivy.asarray(self._ivy_array, device=args[0])\n return cast_tensor\n else:\n if self.dtype == args[0].dtype and self.device == ivy.dev(args[0]):\n return self\n else:\n cast_tensor = self.clone()\n cast_tensor.ivy_array = ivy.asarray(\n self._ivy_array,\n dtype=args[0].dtype,\n device=args[0].device,\n )\n return cast_tensor\n else:\n if (\n \"dtype\" in kwargs\n and \"device\" in kwargs\n and self.dtype == kwargs[\"dtype\"]\n and self.device == kwargs[\"device\"]\n ):\n return self\n else:\n cast_tensor = self.clone()\n cast_tensor.ivy_array = ivy.asarray(\n self._ivy_array,\n device=kwargs[\"device\"] if \"device\" in kwargs else self.device,\n dtype=kwargs[\"dtype\"] if \"dtype\" in kwargs else self.dtype,\n )\n return cast_tensor\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arctan(self):\n return torch_frontend.atan(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arctan_(self):\n self._ivy_array = self.arctan().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def arctan2(self, other):\n return torch_frontend.arctan2(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def arctan2_(self, other):\n self._ivy_array = self.arctan2(other).ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def acos(self):\n return torch_frontend.acos(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def acos_(self):\n self._ivy_array = self.acos().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arccos(self):\n return torch_frontend.arccos(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arccos_(self):\n self._ivy_array = self.arccos().ivy_array\n return self\n\n def new_tensor(\n self,\n data,\n *,\n dtype=None,\n device=None,\n requires_grad=False,\n layout=None,\n pin_memory=False,\n ):\n dtype = ivy.dtype(self._ivy_array) if dtype is None else dtype\n device = ivy.dev(self._ivy_array) if device is None else device\n _data = ivy.asarray(data, copy=True, dtype=dtype, device=device)\n return torch_frontend.tensor(_data)\n\n def view_as(self, other):\n return self.view(other.shape)\n\n def expand(self, *args, size=None):\n if args and size:\n raise TypeError(\"expand() got multiple values for argument 'size'\")\n if args:\n if isinstance(args[0], (tuple, list)):\n size = args[0]\n else:\n size = args\n\n return torch_frontend.tensor(ivy.expand(self._ivy_array, tuple(size)))\n\n def expand_as(self, other):\n return self.expand(\n ivy.shape(other.ivy_array if isinstance(other, Tensor) else other)\n )\n\n def detach(self):\n return torch_frontend.tensor(\n ivy.stop_gradient(self._ivy_array, preserve_type=False)\n )\n\n def unsqueeze(self, dim):\n return torch_frontend.unsqueeze(self, dim)\n\n def unsqueeze_(self, dim):\n self._ivy_array = self.unsqueeze(dim).ivy_array\n return self\n\n def split(self, split_size, dim=0):\n return torch_frontend.split(self, split_size, dim)\n\n def tensor_split(self, indices_or_sections, dim=0):\n return torch_frontend.tensor_split(self.ivy_array, indices_or_sections, dim)\n\n def vsplit(self, indices_or_sections=None, /, *, indices=None, sections=None):\n return torch_frontend.vsplit(\n self.ivy_array, indices_or_sections, indices=indices, sections=sections\n )\n\n def hsplit(self, indices_or_sections=None, /, *, indices=None, sections=None):\n return torch_frontend.hsplit(\n self.ivy_array, indices_or_sections, indices=indices, sections=sections\n )\n\n def dsplit(self, indices_or_sections=None, /, *, indices=None, sections=None):\n return torch_frontend.dsplit(\n self.ivy_array, indices_or_sections, indices=indices, sections=sections\n )\n\n def dim(self):\n return self._ivy_array.ndim\n\n def new_full(\n self,\n size,\n fill_value,\n *,\n dtype=None,\n device=None,\n requires_grad=False,\n layout=None,\n pin_memory=False,\n ):\n dtype = ivy.dtype(self._ivy_array) if dtype is None else dtype\n device = ivy.dev(self._ivy_array) if device is None else device\n _data = ivy.full(size, fill_value, dtype=dtype, device=device)\n return torch_frontend.tensor(_data)\n\n def new_empty(\n self,\n size,\n *,\n dtype=None,\n device=None,\n requires_grad=False,\n layout=None,\n pin_memory=False,\n ):\n dtype = ivy.dtype(self._ivy_array) if dtype is None else dtype\n device = ivy.dev(self._ivy_array) if device is None else device\n _data = ivy.empty(size, dtype=dtype, device=device)\n return torch_frontend.tensor(_data)\n\n def unfold(self, dimension, size, step):\n slices = []\n for i in range(0, self._ivy_array.shape[dimension] - size + 1, step):\n slices.append(self._ivy_array[i : i + size])\n return torch_frontend.stack(slices)\n\n def long(self, memory_format=None):\n self._ivy_array = ivy.astype(self.ivy_array, ivy.int64, copy=False)\n return self\n\n def max(self, dim=None, keepdim=False):\n return torch_frontend.max(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def is_cuda(self):\n return \"gpu\" in ivy.dev(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def pow(self, exponent):\n return torch_frontend.pow(self._ivy_array, exponent)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def pow_(self, exponent):\n self._ivy_array = self.pow(exponent).ivy_array\n return self\n\n def size(self, dim=None):\n shape = ivy.shape(self._ivy_array)\n if dim is None:\n return shape\n else:\n try:\n return shape[dim]\n except IndexError:\n raise IndexError(\n \"Dimension out of range (expected to be in range of [{}, {}], \"\n \"but got {}\".format(len(shape), len(shape) - 1, dim)\n )\n\n def matmul(self, other):\n return torch_frontend.matmul(self._ivy_array, other)\n\n def argwhere(self):\n return torch_frontend.argwhere(self._ivy_array)\n\n def argmax(self, dim=None, keepdim=False):\n return torch_frontend.argmax(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def argmin(self, dim=None, keepdim=False):\n return torch_frontend.argmin(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def argsort(self, dim=-1, descending=False):\n return torch_frontend.argsort(self._ivy_array, dim=dim, descending=descending)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def ceil(self):\n return torch_frontend.ceil(self._ivy_array)\n\n def min(self, dim=None, keepdim=False):\n return torch_frontend.min(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def permute(self, *args, dims=None):\n if args and dims:\n raise TypeError(\"permute() got multiple values for argument 'dims'\")\n if dims is not None:\n return torch_frontend.permute(self._ivy_array, dims)\n if args:\n if isinstance(args[0], (tuple, list)):\n dims = args[0]\n return torch_frontend.permute(self._ivy_array, dims)\n else:\n return torch_frontend.permute(self._ivy_array, args)\n return torch_frontend.permute(self._ivy_array)\n\n def mean(self, dim=None, keepdim=False):\n return torch_frontend.mean(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def transpose(self, dim0, dim1):\n return torch_frontend.transpose(self._ivy_array, dim0=dim0, dim1=dim1)\n\n def transpose_(self, dim0, dim1):\n self._ivy_array = self.transpose(dim0, dim1).ivy_array\n return self\n\n def flatten(self, start_dim=0, end_dim=-1):\n return torch_frontend.flatten(self._ivy_array, start_dim, end_dim)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cumsum(self, dim, dtype):\n return torch_frontend.cumsum(self._ivy_array, dim, dtype=dtype)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cumsum_(self, dim, *, dtype=None):\n self._ivy_array = self.cumsum(dim, dtype).ivy_array\n return self\n\n def inverse(self):\n return torch_frontend.inverse(self._ivy_array)\n\n def neg(self):\n return torch_frontend.negative(self._ivy_array)\n\n def int(self, memory_format=None):\n self._ivy_array = ivy.astype(self.ivy_array, ivy.int32, copy=False)\n return self\n\n def bool(self, memory_format=None):\n self._ivy_array = ivy.astype(self.ivy_array, ivy.bool, copy=False)\n return self\n\n def type(self, dtype=None, non_blocking=False, **kwargs):\n if ivy.exists(dtype):\n self._ivy_array = ivy.astype(self._ivy_array, dtype)\n return self\n else:\n return str(self._ivy_array.dtype)\n\n def type_as(self, other):\n if self.dtype != other.dtype:\n self._ivy_array = ivy.astype(self._ivy_array, other.dtype)\n return self\n else:\n pass\n\n def byte(self, memory_format=None):\n self._ivy_array = ivy.astype(self.ivy_array, ivy.uint8, copy=False)\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def ne(self, other):\n return torch_frontend.ne(self._ivy_array, other)\n\n def squeeze(self, dim):\n return torch_frontend.squeeze(self._ivy_array, dim)\n\n def flip(self, dims):\n return torch_frontend.flip(self._ivy_array, dims)\n\n def fliplr(self):\n return torch_frontend.fliplr(self._ivy_array)\n\n def sort(self, dim=-1, descending=False):\n return torch_frontend.sort(self._ivy_array, dim=dim, descending=descending)\n\n def tril(self, diagonal=0):\n return torch_frontend.tril(self._ivy_array, diagonal=diagonal)\n\n def index_select(self, dim, index):\n return torch_frontend.index_select(self._ivy_array, dim, index)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\", \"float16\")}, \"torch\")\n def clamp(self, min=None, max=None, *, out=None):\n if min is not None and max is not None and ivy.all(min > max):\n return torch_frontend.tensor(ivy.array(self._ivy_array).full_like(max))\n return torch_frontend.clamp(self._ivy_array, min=min, max=max, out=out)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\", \"float16\")}, \"torch\")\n def clamp_(self, min=None, max=None, *, out=None):\n self._ivy_array = self.clamp(min=min, max=max, out=out).ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def sqrt(self):\n return torch_frontend.sqrt(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def sqrt_(self):\n self._ivy_array = self.sqrt().ivy_array\n return self\n\n def where(self, condition, other):\n # TODO: replace with torch_frontend.where when it's added\n return torch_frontend.tensor(ivy.where(condition, self._ivy_array, other))\n\n def clone(self, memory_format=None):\n return torch_frontend.tensor(ivy.array(self._ivy_array, copy=True))\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def acosh(self):\n return torch_frontend.acosh(self._ivy_array)\n\n def real(self):\n return torch_frontend.real(self._ivy_array)\n\n def masked_fill(self, mask, value):\n # TODO: replace with torch_frontend.where when it's added\n return torch_frontend.tensor(ivy.where(mask, value, self._ivy_array))\n\n def masked_fill_(self, mask, value):\n self._ivy_array = self.masked_fill(mask, value).ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def acosh_(self):\n self._ivy_array = self.acosh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def numpy(self):\n return np_frontend_array(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def sigmoid(self):\n return torch_frontend.sigmoid(self.ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def softmax(self, dim=None, dtype=None):\n return torch_frontend.nn.functional.softmax(\n self._ivy_array, dim=dim, dtype=dtype\n )\n\n def repeat(self, *args, repeats=None):\n if args and repeats:\n raise ivy.utils.exceptions.IvyException(\n \"repeat() got multiple values for argument 'repeats'\"\n )\n if args:\n if isinstance(args[0], (tuple, list)):\n repeats = args[0]\n else:\n repeats = args\n elif not isinstance(repeats, (tuple, list)):\n raise ivy.utils.exceptions.IvyException(\n \"repeat(): argument 'repeats' must be tuple of ints\"\n )\n\n return torch_frontend.tile(self._ivy_array, repeats)\n\n def unbind(self, dim=0):\n return torch_frontend.unbind(self._ivy_array, dim=dim)\n\n def bitwise_and_(self, other):\n self.ivy_array = self.bitwise_and(other).ivy_array\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def atan2_(self, other):\n self._ivy_array = self.atan2(other).ivy_array\n return self\n\n def fmin(self, other, out=None):\n return torch_frontend.fmin(self._ivy_array, other, out=out)\n\n # Special Methods #\n # -------------------#\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __add__(self, other):\n return self.add(other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __mod__(self, other):\n return torch_frontend.remainder(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __pow__(self, exponent):\n return self.pow(exponent)\n\n def __long__(self, memory_format=None):\n return self.long()\n\n def __getitem__(self, query, /):\n ret = ivy.get_item(self._ivy_array, query)\n return torch_frontend.Tensor(ret, _init_overload=True)\n\n def __setitem__(self, key, value):\n if hasattr(value, \"ivy_array\"):\n value = (\n ivy.to_scalar(value.ivy_array)\n if value.shape == ()\n else ivy.to_list(value)\n )\n self._ivy_array[key] = value\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __radd__(self, other):\n return torch_frontend.add(other, self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __mul__(self, other):\n return torch_frontend.mul(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __rmul__(self, other):\n return torch_frontend.mul(other, self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __sub__(self, other):\n return torch_frontend.subtract(self._ivy_array, other)\n\n def __truediv__(self, other):\n return torch_frontend.div(self._ivy_array, other)\n\n def __iadd__(self, other):\n ret = torch_frontend.add(self._ivy_array, other)\n self.ivy_array = ivy.inplace_update(\n self._ivy_array, ivy.astype(ret.ivy_array, self.dtype)\n )\n return self\n\n def __imod__(self, other):\n ret = torch_frontend.remainder(self._ivy_array, other)\n self.ivy_array = ivy.inplace_update(\n self._ivy_array, ivy.astype(ret.ivy_array, self.dtype)\n )\n return self\n\n def __imul__(self, other):\n ret = torch_frontend.mul(self._ivy_array, other)\n self.ivy_array = ivy.inplace_update(\n self._ivy_array, ivy.astype(ret.ivy_array, self.dtype)\n )\n return self\n\n def __isub__(self, other):\n ret = torch_frontend.subtract(self._ivy_array, other)\n self.ivy_array = ivy.inplace_update(\n self._ivy_array, ivy.astype(ret.ivy_array, self.dtype)\n )\n return self\n\n def __itruediv__(self, other):\n ret = torch_frontend.div(self._ivy_array, other)\n self.ivy_array = ivy.inplace_update(\n self._ivy_array, ivy.astype(ret.ivy_array, self.dtype)\n )\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __eq__(self, other):\n return torch_frontend.equal(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __gt__(self, other):\n return torch_frontend.greater(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __ne__(self, other):\n return self.ne(other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __rsub__(self, other):\n return torch_frontend.subtract(other, self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __lt__(self, other):\n return torch_frontend.less(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __or__(self, other):\n return torch_frontend.bitwise_or(self._ivy_array, other)\n\n def __invert__(self):\n return torch_frontend.bitwise_not(self._ivy_array)\n\n def __and__(self, other):\n return torch_frontend.bitwise_and(self, other)\n\n # Method aliases\n absolute, absolute_ = abs, abs_\n ndimension = dim\n\n def bitwise_xor(self, other, *, out=None):\n return torch_frontend.bitwise_xor(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cumprod(self, dim, dtype):\n return torch_frontend.cumprod(self._ivy_array, dim, dtype=dtype)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def exp(self, *, out=None):\n return torch_frontend.exp(self._ivy_array)\n", "path": "ivy/functional/frontends/torch/tensor.py" } ]
[ { "content": "# global\n\n# local\nimport ivy\nimport ivy.functional.frontends.torch as torch_frontend\nimport ivy.functional.frontends.torch.nn.functional as torch_frontend_nn\nfrom ivy.functional.frontends.numpy.creation_routines.from_existing_data import (\n array as np_frontend_array,\n)\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom ivy.func_wrapper import with_supported_dtypes\n\n\nclass Tensor:\n def __init__(self, array, device=None, _init_overload=False):\n\n if _init_overload:\n self._ivy_array = (\n ivy.array(array) if not isinstance(array, ivy.Array) else array\n )\n\n else:\n self._ivy_array = ivy.array(\n array, dtype=torch_frontend.float32, device=device\n )\n\n def __repr__(self):\n return str(self._ivy_array.__repr__()).replace(\n \"ivy.array\", \"ivy.frontends.torch.Tensor\"\n )\n\n # Properties #\n # ---------- #\n\n @property\n def ivy_array(self):\n return self._ivy_array\n\n @property\n def device(self):\n return ivy.dev(self._ivy_array)\n\n @property\n def dtype(self):\n return self._ivy_array.dtype\n\n @property\n def shape(self):\n return self._ivy_array.shape\n\n # Setters #\n # --------#\n\n @ivy_array.setter\n def ivy_array(self, array):\n self._ivy_array = (\n ivy.array(array) if not isinstance(array, ivy.Array) else array\n )\n\n # Instance Methods #\n # ---------------- #\n def reshape(self, *args, shape=None):\n if args and shape:\n raise TypeError(\"reshape() got multiple values for argument 'shape'\")\n if shape is not None:\n return torch_frontend.reshape(self._ivy_array, shape)\n if args:\n if isinstance(args[0], (tuple, list)):\n shape = args[0]\n return torch_frontend.reshape(self._ivy_array, shape)\n else:\n return torch_frontend.reshape(self._ivy_array, args)\n return torch_frontend.reshape(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def reshape_as(self, other):\n return torch_frontend.reshape(self, other.shape)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def add(self, other, *, alpha=1):\n return torch_frontend.add(self._ivy_array, other, alpha=alpha)\n\n def chunk(self, chunks, dim=0):\n return torch_frontend.chunk(self._ivy_array, chunks, dim=dim)\n\n def any(self, dim=None, keepdim=False, *, out=None):\n return torch_frontend.any(self._ivy_array, dim=dim, keepdim=keepdim, out=out)\n\n def all(self, dim=None, keepdim=False):\n return torch_frontend.all(self._ivy_array, dim=dim, keepdim=keepdim)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def add_(self, other, *, alpha=1):\n self._ivy_array = self.add(other, alpha=alpha).ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def asin(self):\n return torch_frontend.asin(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def asin_(self):\n self._ivy_array = self.asin().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def sum(self):\n return torch_frontend.sum(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def sin(self):\n return torch_frontend.sin(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def sin_(self):\n self._ivy_array = self.sin().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def sinh(self):\n return torch_frontend.sinh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def sinh_(self):\n self._ivy_array = self.sinh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cos(self):\n return torch_frontend.cos(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cos_(self):\n self._ivy_array = self.cos().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cosh(self):\n return torch_frontend.cosh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cosh_(self):\n self._ivy_array = self.cosh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arcsin(self):\n return torch_frontend.arcsin(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arcsin_(self):\n self._ivy_array = self.arcsin().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def atan(self):\n return torch_frontend.atan(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def atan_(self):\n self._ivy_array = self.atan().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def atan2(self, other):\n return torch_frontend.atan2(self._ivy_array, other)\n\n def view(self, *args, shape=None):\n \"\"\"\n Reshape Tensor.\n\n possible arguments are either:\n - size\n - tuple of ints\n - list of ints\n - torch.Size object\n - ints\n Parameters\n ----------\n args:int arguments\n shape: optional shape\n\n Returns reshaped tensor\n -------\n \"\"\"\n if shape and not args:\n shape_tup = shape\n elif args and not shape:\n if (\n isinstance(args[0], tuple)\n or isinstance(args[0], list)\n or type(args[0]).__name__ == \"Size\"\n ) and len(args) == 1:\n shape_tup = args[0]\n else:\n shape_tup = args\n else:\n raise ValueError(\n \"View only accepts as argument ints, tuple or list of ints or \"\n \"the keyword argument size.\"\n )\n return torch_frontend.reshape(self._ivy_array, shape_tup)\n\n def float(self, memory_format=None):\n self._ivy_array = ivy.astype(self.ivy_array, ivy.float32, copy=False)\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def asinh(self):\n return torch_frontend.asinh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def asinh_(self):\n self._ivy_array = self.asinh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def tan(self):\n return torch_frontend.tan(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def tan_(self):\n self._ivy_array = self.tan().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def tanh(self):\n return torch_frontend.tanh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def tanh_(self):\n self._ivy_array = self.tanh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def atanh(self):\n return torch_frontend.atanh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def atanh_(self):\n self._ivy_array = self.atanh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arctanh(self):\n return torch_frontend.arctanh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arctanh_(self):\n self._ivy_array = self.arctanh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def log(self):\n return torch_frontend.log(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def log_(self):\n self._ivy_array = self.log().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def log2(self):\n return torch_frontend.log2(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def relu(self):\n return torch_frontend_nn.relu(self._ivy_array)\n\n def amax(self, dim=None, keepdim=False):\n return torch_frontend.amax(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def amin(self, dim=None, keepdim=False):\n return torch_frontend.amin(self._ivy_array, dim=dim, keepdim=keepdim)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def aminmax(self, dim=None, keepdim=False):\n return torch_frontend.aminmax(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def abs(self):\n return torch_frontend.abs(self._ivy_array)\n\n def abs_(self):\n self._ivy_array = self.abs().ivy_array\n return self\n\n def logical_and(self, other):\n return torch_frontend.logical_and(self._ivy_array, other)\n\n def bitwise_not(self, *, out=None):\n return torch_frontend.bitwise_not(self._ivy_array)\n\n def bitwise_and(self, other):\n return torch_frontend.bitwise_and(self._ivy_array, other)\n\n def bitwise_or(self, other, *, out=None):\n return torch_frontend.bitwise_or(self._ivy_array, other)\n\n def bitwise_left_shift(self, other, *, out=None):\n return torch_frontend.bitwise_left_shift(self._ivy_array, other)\n\n @with_supported_dtypes({\"1.11.0 and below\": (\"integer\",)}, \"torch\")\n def bitwise_or_(self, other, *, out=None):\n self._ivy_array = self.bitwise_or(other, out=out).ivy_array\n return self\n\n def contiguous(self, memory_format=None):\n return torch_frontend.tensor(self.ivy_array)\n\n def new_ones(self, size, *, dtype=None, device=None, requires_grad=False):\n return torch_frontend.ones(\n size, dtype=dtype, device=device, requires_grad=requires_grad\n )\n\n def new_zeros(self, size, *, dtype=None, device=None, requires_grad=False):\n return torch_frontend.zeros(\n size, dtype=dtype, device=device, requires_grad=requires_grad\n )\n\n def to(self, *args, **kwargs):\n if len(args) > 0:\n if isinstance(args[0], (ivy.Dtype, ivy.NativeDtype)):\n if self.dtype == args[0]:\n return self\n else:\n cast_tensor = self.clone()\n cast_tensor.ivy_array = ivy.asarray(self._ivy_array, dtype=args[0])\n return cast_tensor\n if isinstance(args[0], (ivy.Device, ivy.NativeDevice, str)):\n if isinstance(args[0], str):\n ivy.utils.assertions.check_elem_in_list(\n args[0],\n [\n \"cpu\",\n \"cuda\",\n \"xpu\",\n \"mkldnn\",\n \"opengl\",\n \"opencl\",\n \"ideep\",\n \"hip\",\n \"ve\",\n \"ort\",\n \"mlc\",\n \"xla\",\n \"lazy\",\n \"vulkan\",\n \"meta\",\n \"hpu\",\n ],\n )\n if self.device == args[0]:\n return self\n else:\n cast_tensor = self.clone()\n cast_tensor.ivy_array = ivy.asarray(self._ivy_array, device=args[0])\n return cast_tensor\n else:\n if self.dtype == args[0].dtype and self.device == ivy.dev(args[0]):\n return self\n else:\n cast_tensor = self.clone()\n cast_tensor.ivy_array = ivy.asarray(\n self._ivy_array,\n dtype=args[0].dtype,\n device=args[0].device,\n )\n return cast_tensor\n else:\n if (\n \"dtype\" in kwargs\n and \"device\" in kwargs\n and self.dtype == kwargs[\"dtype\"]\n and self.device == kwargs[\"device\"]\n ):\n return self\n else:\n cast_tensor = self.clone()\n cast_tensor.ivy_array = ivy.asarray(\n self._ivy_array,\n device=kwargs[\"device\"] if \"device\" in kwargs else self.device,\n dtype=kwargs[\"dtype\"] if \"dtype\" in kwargs else self.dtype,\n )\n return cast_tensor\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arctan(self):\n return torch_frontend.atan(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arctan_(self):\n self._ivy_array = self.arctan().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def arctan2(self, other):\n return torch_frontend.arctan2(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def arctan2_(self, other):\n self._ivy_array = self.arctan2(other).ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def acos(self):\n return torch_frontend.acos(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def acos_(self):\n self._ivy_array = self.acos().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arccos(self):\n return torch_frontend.arccos(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arccos_(self):\n self._ivy_array = self.arccos().ivy_array\n return self\n\n def new_tensor(\n self,\n data,\n *,\n dtype=None,\n device=None,\n requires_grad=False,\n layout=None,\n pin_memory=False,\n ):\n dtype = ivy.dtype(self._ivy_array) if dtype is None else dtype\n device = ivy.dev(self._ivy_array) if device is None else device\n _data = ivy.asarray(data, copy=True, dtype=dtype, device=device)\n return torch_frontend.tensor(_data)\n\n def view_as(self, other):\n return self.view(other.shape)\n\n def expand(self, *args, size=None):\n if args and size:\n raise TypeError(\"expand() got multiple values for argument 'size'\")\n if args:\n if isinstance(args[0], (tuple, list)):\n size = args[0]\n else:\n size = args\n\n return torch_frontend.tensor(ivy.expand(self._ivy_array, tuple(size)))\n\n def expand_as(self, other):\n return self.expand(\n ivy.shape(other.ivy_array if isinstance(other, Tensor) else other)\n )\n\n def detach(self):\n return torch_frontend.tensor(\n ivy.stop_gradient(self._ivy_array, preserve_type=False)\n )\n\n def unsqueeze(self, dim):\n return torch_frontend.unsqueeze(self, dim)\n\n def unsqueeze_(self, dim):\n self._ivy_array = self.unsqueeze(dim).ivy_array\n return self\n\n def split(self, split_size, dim=0):\n return torch_frontend.split(self, split_size, dim)\n\n def tensor_split(self, indices_or_sections, dim=0):\n return torch_frontend.tensor_split(self.ivy_array, indices_or_sections, dim)\n\n def vsplit(self, indices_or_sections=None, /, *, indices=None, sections=None):\n return torch_frontend.vsplit(\n self.ivy_array, indices_or_sections, indices=indices, sections=sections\n )\n\n def hsplit(self, indices_or_sections=None, /, *, indices=None, sections=None):\n return torch_frontend.hsplit(\n self.ivy_array, indices_or_sections, indices=indices, sections=sections\n )\n\n def dsplit(self, indices_or_sections=None, /, *, indices=None, sections=None):\n return torch_frontend.dsplit(\n self.ivy_array, indices_or_sections, indices=indices, sections=sections\n )\n\n def dim(self):\n return self._ivy_array.ndim\n\n def new_full(\n self,\n size,\n fill_value,\n *,\n dtype=None,\n device=None,\n requires_grad=False,\n layout=None,\n pin_memory=False,\n ):\n dtype = ivy.dtype(self._ivy_array) if dtype is None else dtype\n device = ivy.dev(self._ivy_array) if device is None else device\n _data = ivy.full(size, fill_value, dtype=dtype, device=device)\n return torch_frontend.tensor(_data)\n\n def new_empty(\n self,\n size,\n *,\n dtype=None,\n device=None,\n requires_grad=False,\n layout=None,\n pin_memory=False,\n ):\n dtype = ivy.dtype(self._ivy_array) if dtype is None else dtype\n device = ivy.dev(self._ivy_array) if device is None else device\n _data = ivy.empty(size, dtype=dtype, device=device)\n return torch_frontend.tensor(_data)\n\n def unfold(self, dimension, size, step):\n slices = []\n for i in range(0, self._ivy_array.shape[dimension] - size + 1, step):\n slices.append(self._ivy_array[i : i + size])\n return torch_frontend.stack(slices)\n\n def long(self, memory_format=None):\n self._ivy_array = ivy.astype(self.ivy_array, ivy.int64, copy=False)\n return self\n\n def max(self, dim=None, keepdim=False):\n return torch_frontend.max(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def is_cuda(self):\n return \"gpu\" in ivy.dev(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def pow(self, exponent):\n return torch_frontend.pow(self._ivy_array, exponent)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def pow_(self, exponent):\n self._ivy_array = self.pow(exponent).ivy_array\n return self\n\n def size(self, dim=None):\n shape = ivy.shape(self._ivy_array)\n if dim is None:\n return shape\n else:\n try:\n return shape[dim]\n except IndexError:\n raise IndexError(\n \"Dimension out of range (expected to be in range of [{}, {}], \"\n \"but got {}\".format(len(shape), len(shape) - 1, dim)\n )\n\n def matmul(self, other):\n return torch_frontend.matmul(self._ivy_array, other)\n\n def argwhere(self):\n return torch_frontend.argwhere(self._ivy_array)\n\n def argmax(self, dim=None, keepdim=False):\n return torch_frontend.argmax(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def argmin(self, dim=None, keepdim=False):\n return torch_frontend.argmin(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def argsort(self, dim=-1, descending=False):\n return torch_frontend.argsort(self._ivy_array, dim=dim, descending=descending)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def ceil(self):\n return torch_frontend.ceil(self._ivy_array)\n\n def min(self, dim=None, keepdim=False):\n return torch_frontend.min(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def permute(self, *args, dims=None):\n if args and dims:\n raise TypeError(\"permute() got multiple values for argument 'dims'\")\n if dims is not None:\n return torch_frontend.permute(self._ivy_array, dims)\n if args:\n if isinstance(args[0], (tuple, list)):\n dims = args[0]\n return torch_frontend.permute(self._ivy_array, dims)\n else:\n return torch_frontend.permute(self._ivy_array, args)\n return torch_frontend.permute(self._ivy_array)\n\n def mean(self, dim=None, keepdim=False):\n return torch_frontend.mean(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def transpose(self, dim0, dim1):\n return torch_frontend.transpose(self._ivy_array, dim0=dim0, dim1=dim1)\n\n def transpose_(self, dim0, dim1):\n self._ivy_array = self.transpose(dim0, dim1).ivy_array\n return self\n\n def flatten(self, start_dim=0, end_dim=-1):\n return torch_frontend.flatten(self._ivy_array, start_dim, end_dim)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cumsum(self, dim, dtype):\n return torch_frontend.cumsum(self._ivy_array, dim, dtype=dtype)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cumsum_(self, dim, *, dtype=None):\n self._ivy_array = self.cumsum(dim, dtype).ivy_array\n return self\n\n def inverse(self):\n return torch_frontend.inverse(self._ivy_array)\n\n def neg(self):\n return torch_frontend.negative(self._ivy_array)\n\n def int(self, memory_format=None):\n self._ivy_array = ivy.astype(self.ivy_array, ivy.int32, copy=False)\n return self\n\n def bool(self, memory_format=None):\n self._ivy_array = ivy.astype(self.ivy_array, ivy.bool, copy=False)\n return self\n\n def type(self, dtype=None, non_blocking=False, **kwargs):\n if ivy.exists(dtype):\n self._ivy_array = ivy.astype(self._ivy_array, dtype)\n return self\n else:\n return str(self._ivy_array.dtype)\n\n def type_as(self, other):\n if self.dtype != other.dtype:\n self._ivy_array = ivy.astype(self._ivy_array, other.dtype)\n return self\n else:\n pass\n\n def byte(self, memory_format=None):\n self._ivy_array = ivy.astype(self.ivy_array, ivy.uint8, copy=False)\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def ne(self, other):\n return torch_frontend.ne(self._ivy_array, other)\n\n def squeeze(self, dim):\n return torch_frontend.squeeze(self._ivy_array, dim)\n\n def flip(self, dims):\n return torch_frontend.flip(self._ivy_array, dims)\n\n def fliplr(self):\n return torch_frontend.fliplr(self._ivy_array)\n\n def sort(self, dim=-1, descending=False):\n return torch_frontend.sort(self._ivy_array, dim=dim, descending=descending)\n\n def tril(self, diagonal=0):\n return torch_frontend.tril(self._ivy_array, diagonal=diagonal)\n\n def index_select(self, dim, index):\n return torch_frontend.index_select(self._ivy_array, dim, index)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\", \"float16\")}, \"torch\")\n def clamp(self, min=None, max=None, *, out=None):\n if min is not None and max is not None and ivy.all(min > max):\n return torch_frontend.tensor(ivy.array(self._ivy_array).full_like(max))\n return torch_frontend.clamp(self._ivy_array, min=min, max=max, out=out)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\", \"float16\")}, \"torch\")\n def clamp_(self, min=None, max=None, *, out=None):\n self._ivy_array = self.clamp(min=min, max=max, out=out).ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def sqrt(self):\n return torch_frontend.sqrt(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def sqrt_(self):\n self._ivy_array = self.sqrt().ivy_array\n return self\n\n def where(self, condition, other):\n # TODO: replace with torch_frontend.where when it's added\n return torch_frontend.tensor(ivy.where(condition, self._ivy_array, other))\n\n def clone(self, memory_format=None):\n return torch_frontend.tensor(ivy.array(self._ivy_array, copy=True))\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def acosh(self):\n return torch_frontend.acosh(self._ivy_array)\n\n def real(self):\n return torch_frontend.real(self._ivy_array)\n\n def masked_fill(self, mask, value):\n # TODO: replace with torch_frontend.where when it's added\n return torch_frontend.tensor(ivy.where(mask, value, self._ivy_array))\n\n def masked_fill_(self, mask, value):\n self._ivy_array = self.masked_fill(mask, value).ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def acosh_(self):\n self._ivy_array = self.acosh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def numpy(self):\n return np_frontend_array(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def sigmoid(self):\n return torch_frontend.sigmoid(self.ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def softmax(self, dim=None, dtype=None):\n return torch_frontend.nn.functional.softmax(\n self._ivy_array, dim=dim, dtype=dtype\n )\n\n def repeat(self, *args, repeats=None):\n if args and repeats:\n raise ivy.utils.exceptions.IvyException(\n \"repeat() got multiple values for argument 'repeats'\"\n )\n if args:\n if isinstance(args[0], (tuple, list)):\n repeats = args[0]\n else:\n repeats = args\n elif not isinstance(repeats, (tuple, list)):\n raise ivy.utils.exceptions.IvyException(\n \"repeat(): argument 'repeats' must be tuple of ints\"\n )\n\n return torch_frontend.tile(self._ivy_array, repeats)\n\n def unbind(self, dim=0):\n return torch_frontend.unbind(self._ivy_array, dim=dim)\n\n def bitwise_and_(self, other):\n self.ivy_array = self.bitwise_and(other).ivy_array\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def atan2_(self, other):\n self._ivy_array = self.atan2(other).ivy_array\n return self\n\n def fmin(self, other, out=None):\n return torch_frontend.fmin(self._ivy_array, other, out=out)\n\n # Special Methods #\n # -------------------#\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __add__(self, other):\n return self.add(other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __mod__(self, other):\n return torch_frontend.remainder(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __pow__(self, exponent):\n return self.pow(exponent)\n\n def __long__(self, memory_format=None):\n return self.long()\n\n def __getitem__(self, query, /):\n ret = ivy.get_item(self._ivy_array, query)\n return torch_frontend.Tensor(ret, _init_overload=True)\n\n def __setitem__(self, key, value):\n if hasattr(value, \"ivy_array\"):\n value = (\n ivy.to_scalar(value.ivy_array)\n if value.shape == ()\n else ivy.to_list(value)\n )\n self._ivy_array[key] = value\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __radd__(self, other):\n return torch_frontend.add(other, self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __mul__(self, other):\n return torch_frontend.mul(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __rmul__(self, other):\n return torch_frontend.mul(other, self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __sub__(self, other):\n return torch_frontend.subtract(self._ivy_array, other)\n\n def __truediv__(self, other):\n return torch_frontend.div(self._ivy_array, other)\n\n def __iadd__(self, other):\n ret = torch_frontend.add(self._ivy_array, other)\n self.ivy_array = ivy.inplace_update(\n self._ivy_array, ivy.astype(ret.ivy_array, self.dtype)\n )\n return self\n\n def __imod__(self, other):\n ret = torch_frontend.remainder(self._ivy_array, other)\n self.ivy_array = ivy.inplace_update(\n self._ivy_array, ivy.astype(ret.ivy_array, self.dtype)\n )\n return self\n\n def __imul__(self, other):\n ret = torch_frontend.mul(self._ivy_array, other)\n self.ivy_array = ivy.inplace_update(\n self._ivy_array, ivy.astype(ret.ivy_array, self.dtype)\n )\n return self\n\n def __isub__(self, other):\n ret = torch_frontend.subtract(self._ivy_array, other)\n self.ivy_array = ivy.inplace_update(\n self._ivy_array, ivy.astype(ret.ivy_array, self.dtype)\n )\n return self\n\n def __itruediv__(self, other):\n ret = torch_frontend.div(self._ivy_array, other)\n self.ivy_array = ivy.inplace_update(\n self._ivy_array, ivy.astype(ret.ivy_array, self.dtype)\n )\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __eq__(self, other):\n return torch_frontend.equal(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __gt__(self, other):\n return torch_frontend.greater(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __ne__(self, other):\n return self.ne(other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __rsub__(self, other):\n return torch_frontend.subtract(other, self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __lt__(self, other):\n return torch_frontend.less(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __or__(self, other):\n return torch_frontend.bitwise_or(self._ivy_array, other)\n\n def __invert__(self):\n return torch_frontend.bitwise_not(self._ivy_array)\n\n def __and__(self, other):\n return torch_frontend.bitwise_and(self, other)\n\n # Method aliases\n absolute, absolute_ = abs, abs_\n ndimension = dim\n\n def bitwise_xor(self, other, *, out=None):\n return torch_frontend.bitwise_xor(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cumprod(self, dim, dtype):\n return torch_frontend.cumprod(self._ivy_array, dim, dtype=dtype)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def exp(self, *, out=None):\n return torch_frontend.exp(self._ivy_array)\n \n def mul(self, other, *, out=None):\n return torch_frontend.mul(self._ivy_array, other)\n", "path": "ivy/functional/frontends/torch/tensor.py" } ]
diff --git a/ivy/functional/frontends/torch/tensor.py b/ivy/functional/frontends/torch/tensor.py index e652125384f9b..ce9f3630d2753 100644 --- a/ivy/functional/frontends/torch/tensor.py +++ b/ivy/functional/frontends/torch/tensor.py @@ -890,3 +890,6 @@ def cumprod(self, dim, dtype): @with_unsupported_dtypes({"1.11.0 and below": ("bfloat16",)}, "torch") def exp(self, *, out=None): return torch_frontend.exp(self._ivy_array) + + def mul(self, other, *, out=None): + return torch_frontend.mul(self._ivy_array, other) diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py index 5fa8523d009fb..d8f30a5c4058a 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py @@ -6026,3 +6026,39 @@ def test_torch_instance_exp( frontend=frontend, on_device=on_device, ) + + +# mul +@handle_frontend_method( + class_tree=CLASS_TREE, + init_tree="torch.tensor", + method_name="mul", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("numeric"), + num_arrays=2, + ), +) +def test_torch_instance_mul( + dtype_and_x, + frontend_method_data, + init_flags, + method_flags, + frontend, + on_device, +): + input_dtype, x = dtype_and_x + helpers.test_frontend_method( + init_input_dtypes=input_dtype, + init_all_as_kwargs_np={ + "data": x[0], + }, + method_input_dtypes=input_dtype, + method_all_as_kwargs_np={ + "other": x[1], + }, + frontend_method_data=frontend_method_data, + init_flags=init_flags, + method_flags=method_flags, + frontend=frontend, + on_device=on_device, + )
napari__napari-680
Napari viewer closes unexpectetly when pressing the 3D button ## 🐛 Bug Hi guys, I use the following code to automatically display a 3D stack with 2 Channels and 10 Timepoints in Napari. This works fine and the viewer opens up just fine. When I then press the 3D button the viewer is gone and I get a long error stack: See below: Any help is welcome. Sebi ![image](https://user-images.githubusercontent.com/3833249/68606163-216f8a00-04ae-11ea-906c-726f3195e4fe.png) ``` def show_napari(array, metadata, verbose=True): import napari with napari.gui_qt(): # create scalefcator with all ones scalefactors = [1] * len(array.shape) # initialize the napari viewer viewer = napari.Viewer() if metadata['ImageType'] == 'czi': # find position of dimensions posZ = metadata['Axes'].find('Z') posC = metadata['Axes'].find('C') posT = metadata['Axes'].find('T') # get the scalefactors from the metadata scalef = get_scalefactor(metadata) scalefactors[posZ] = scalef['zx'] if verbose: print('Dim PosT : ', posT) print('Dim PosZ : ', posZ) print('Dim PosC : ', posC) print('Scale Factors XYZ: ', scalefactors) # add all channels as layers for ch in range(metadata['SizeC']): chname = metadata['Channels'][ch] # cut out channel channel = array.take(ch, axis=posC) print(channel.shape) # actually show the image array print('Adding Channel: ', chname) viewer.add_image(channel, name=chname, scale=scalefactors) ``` Error message: ``` (1, 10, 2, 15, 256, 256, 1) BTCZYX0 (1, 10, 2, 15, 256, 256) Dim PosT : 1 Dim PosZ : 3 Dim PosC : 2 Scale Factors XYZ: [1, 1, 1, 3.516, 1, 1] (1, 10, 15, 256, 256) Adding Channel: AF555 (1, 10, 15, 256, 256) Adding Channel: AF488 WARNING: Traceback (most recent call last): File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\_qt\qt_viewer_buttons.py", line 173, in <lambda> lambda state=self: self.change_ndisplay(state) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\_qt\qt_viewer_buttons.py", line 178, in change_ndisplay self.viewer.dims.ndisplay = 3 File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\components\dims.py", line 205, in ndisplay self.events.ndisplay() File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\util\event.py", line 508, in __call__ self._invoke_callback(cb, event) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\util\event.py", line 529, in _invoke_callback cb_event=(cb, event), File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\util\event.py", line 523, in _invoke_callback cb(event) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\components\viewer_model.py", line 89, in <lambda> self.dims.events.ndisplay.connect(lambda e: self._update_layers()) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\components\viewer_model.py", line 1018, in _update_layers layer.dims.ndisplay = self.dims.ndisplay File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\components\dims.py", line 205, in ndisplay self.events.ndisplay() File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\util\event.py", line 508, in __call__ self._invoke_callback(cb, event) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\util\event.py", line 529, in _invoke_callback cb_event=(cb, event), File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\util\event.py", line 523, in _invoke_callback cb(event) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\layers\base\base.py", line 174, in <lambda> self.dims.events.ndisplay.connect(lambda e: self._update_dims()) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\layers\base\base.py", line 372, in _update_dims self._set_view_slice() File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\layers\image\image.py", line 502, in _set_view_slice self.events.set_data() File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\util\event.py", line 508, in __call__ self._invoke_callback(cb, event) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\util\event.py", line 529, in _invoke_callback cb_event=(cb, event), File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\util\event.py", line 523, in _invoke_callback cb(event) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\_vispy\vispy_base_layer.py", line 46, in <lambda> self.layer.events.set_data.connect(lambda e: self._on_data_change()) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\_vispy\vispy_image_layer.py", line 59, in _on_data_change self._on_display_change() File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\_vispy\vispy_image_layer.py", line 48, in _on_display_change self.reset() File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\_vispy\vispy_image_layer.py", line 216, in reset self._reset_base() File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\_vispy\vispy_base_layer.py", line 165, in _reset_base self._on_scale_change() File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\_vispy\vispy_image_layer.py", line 123, in _on_scale_change self.layer.position = self._transform_position(self._position) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\_vispy\vispy_base_layer.py", line 153, in _transform_position transform.map(list(position))[: len(self.layer.dims.displayed)] File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\vispy\visuals\transforms\chain.py", line 148, in map coords = tr.map(coords) File "<decorator-gen-4>", line 2, in imap File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\vispy\visuals\transforms\_util.py", line 111, in arg_to_vec4 arg = as_vec4(arg) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\vispy\visuals\transforms\_util.py", line 81, in as_vec4 % obj.shape) TypeError: not all arguments converted during string formatting WARNING:vispy:Traceback (most recent call last): File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\_qt\qt_viewer_buttons.py", line 173, in <lambda> lambda state=self: self.change_ndisplay(state) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\_qt\qt_viewer_buttons.py", line 178, in change_ndisplay self.viewer.dims.ndisplay = 3 File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\components\dims.py", line 205, in ndisplay self.events.ndisplay() File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\util\event.py", line 508, in __call__ self._invoke_callback(cb, event) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\util\event.py", line 529, in _invoke_callback cb_event=(cb, event), File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\util\event.py", line 523, in _invoke_callback cb(event) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\components\viewer_model.py", line 89, in <lambda> self.dims.events.ndisplay.connect(lambda e: self._update_layers()) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\components\viewer_model.py", line 1018, in _update_layers layer.dims.ndisplay = self.dims.ndisplay File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\components\dims.py", line 205, in ndisplay self.events.ndisplay() File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\util\event.py", line 508, in __call__ self._invoke_callback(cb, event) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\util\event.py", line 529, in _invoke_callback cb_event=(cb, event), File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\util\event.py", line 523, in _invoke_callback cb(event) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\layers\base\base.py", line 174, in <lambda> self.dims.events.ndisplay.connect(lambda e: self._update_dims()) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\layers\base\base.py", line 372, in _update_dims self._set_view_slice() File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\layers\image\image.py", line 502, in _set_view_slice self.events.set_data() File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\util\event.py", line 508, in __call__ self._invoke_callback(cb, event) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\util\event.py", line 529, in _invoke_callback cb_event=(cb, event), File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\util\event.py", line 523, in _invoke_callback cb(event) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\_vispy\vispy_base_layer.py", line 46, in <lambda> self.layer.events.set_data.connect(lambda e: self._on_data_change()) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\_vispy\vispy_image_layer.py", line 59, in _on_data_change self._on_display_change() File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\_vispy\vispy_image_layer.py", line 48, in _on_display_change self.reset() File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\_vispy\vispy_image_layer.py", line 216, in reset self._reset_base() File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\_vispy\vispy_base_layer.py", line 165, in _reset_base self._on_scale_change() File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\_vispy\vispy_image_layer.py", line 123, in _on_scale_change self.layer.position = self._transform_position(self._position) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\napari\_vispy\vispy_base_layer.py", line 153, in _transform_position transform.map(list(position))[: len(self.layer.dims.displayed)] File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\vispy\visuals\transforms\chain.py", line 148, in map coords = tr.map(coords) File "<decorator-gen-4>", line 2, in imap File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\vispy\visuals\transforms\_util.py", line 111, in arg_to_vec4 arg = as_vec4(arg) File "C:\ProgramData\Anaconda3\envs\imageanalysis\lib\site-packages\vispy\visuals\transforms\_util.py", line 81, in as_vec4 % obj.shape) TypeError: not all arguments converted during string formatting ```
[ { "content": "from vispy.visuals.transforms import STTransform\nfrom abc import ABC, abstractmethod\n\n\nclass VispyBaseLayer(ABC):\n \"\"\"Base object for individual layer views\n\n Meant to be subclassed.\n\n Parameters\n ----------\n layer : napari.layers.Layer\n Layer model.\n node : vispy.scene.VisualNode\n Central node with which to interact with the visual.\n\n Attributes\n ----------\n layer : napari.layers.Layer\n Layer model.\n node : vispy.scene.VisualNode\n Central node with which to interact with the visual.\n scale : sequence of float\n Scale factors for the layer visual in the scenecanvas.\n translate : sequence of float\n Translation values for the layer visual in the scenecanvas.\n scale_factor : float\n Conversion factor from canvas coordinates to image coordinates, which\n depends on the current zoom level.\n\n Extended Summary\n ----------\n _master_transform : vispy.visuals.transforms.STTransform\n Transform positioning the layer visual inside the scenecanvas.\n \"\"\"\n\n def __init__(self, layer, node):\n super().__init__()\n\n self.layer = layer\n self.node = node\n self._position = (0,) * self.layer.ndim\n self.camera = None\n\n self.layer.events.refresh.connect(lambda e: self.node.update())\n self.layer.events.set_data.connect(lambda e: self._on_data_change())\n\n self.layer.events.visible.connect(lambda e: self._on_visible_change())\n self.layer.events.opacity.connect(lambda e: self._on_opacity_change())\n self.layer.events.blending.connect(\n lambda e: self._on_blending_change()\n )\n self.layer.events.scale.connect(lambda e: self._on_scale_change())\n self.layer.events.translate.connect(\n lambda e: self._on_translate_change()\n )\n\n @property\n def _master_transform(self):\n \"\"\"vispy.visuals.transforms.STTransform:\n Central node's firstmost transform.\n \"\"\"\n # whenever a new parent is set, the transform is reset\n # to a NullTransform so we reset it here\n if not isinstance(self.node.transform, STTransform):\n self.node.transform = STTransform()\n\n return self.node.transform\n\n @property\n def order(self):\n \"\"\"int: Order in which the visual is drawn in the scenegraph.\n\n Lower values are closer to the viewer.\n \"\"\"\n return self.node.order\n\n @order.setter\n def order(self, order):\n self.node.order = order\n\n @property\n def scale(self):\n \"\"\"sequence of float: Scale factors.\"\"\"\n return self._master_transform.scale\n\n @scale.setter\n def scale(self, scale):\n self._master_transform.scale = scale\n\n @property\n def translate(self):\n \"\"\"sequence of float: Translation values.\"\"\"\n return self._master_transform.translate\n\n @translate.setter\n def translate(self, translate):\n self._master_transform.translate = translate\n\n @property\n def scale_factor(self):\n \"\"\"float: Conversion factor from canvas coordinates to image\n coordinates, which depends on the current zoom level.\n \"\"\"\n transform = self.node.canvas.scene.node_transform(self.node)\n scale_factor = transform.map([1, 1])[0] - transform.map([0, 0])[0]\n return scale_factor\n\n @abstractmethod\n def _on_data_change(self):\n raise NotImplementedError()\n\n def _on_visible_change(self):\n self.node.visible = self.layer.visible\n\n def _on_opacity_change(self):\n self.node.opacity = self.layer.opacity\n\n def _on_blending_change(self):\n self.node.set_gl_state(self.layer.blending)\n self.node.update()\n\n def _on_scale_change(self):\n self.scale = [\n self.layer.scale[d] for d in self.layer.dims.displayed[::-1]\n ]\n self.layer.position = self._transform_position(self._position)\n\n def _on_translate_change(self):\n self.translate = [\n self.layer.translate[d] + self.layer.translate_grid[d]\n for d in self.layer.dims.displayed[::-1]\n ]\n self.layer.position = self._transform_position(self._position)\n\n def _transform_position(self, position):\n \"\"\"Transform cursor position from canvas space (x, y) into image space.\n\n Parameters\n -------\n position : 2-tuple\n Cursor position in canvase (x, y).\n\n Returns\n -------\n coords : tuple\n Coordinates of cursor in image space for displayed dimensions only\n \"\"\"\n if self.node.canvas is not None:\n transform = self.node.canvas.scene.node_transform(self.node)\n # Map and offset position so that pixel center is at 0\n mapped_position = (\n transform.map(list(position))[: len(self.layer.dims.displayed)]\n - 0.5\n )\n coords = tuple(mapped_position[::-1])\n else:\n coords = (0,) * len(self.layer.dims.displayed)\n return coords\n\n def _reset_base(self):\n self._on_visible_change()\n self._on_opacity_change()\n self._on_blending_change()\n self._on_scale_change()\n self._on_translate_change()\n\n def on_mouse_move(self, event):\n \"\"\"Called whenever mouse moves over canvas.\"\"\"\n if event.pos is None:\n return\n self._position = list(event.pos)\n self.layer.position = self._transform_position(self._position)\n self.layer.on_mouse_move(event)\n\n def on_mouse_press(self, event):\n \"\"\"Called whenever mouse pressed in canvas.\n \"\"\"\n if event.pos is None:\n return\n self._position = list(event.pos)\n self.layer.position = self._transform_position(self._position)\n self.layer.on_mouse_press(event)\n\n def on_mouse_release(self, event):\n \"\"\"Called whenever mouse released in canvas.\n \"\"\"\n if event.pos is None:\n return\n self._position = list(event.pos)\n self.layer.position = self._transform_position(self._position)\n self.layer.on_mouse_release(event)\n\n def on_draw(self, event):\n \"\"\"Called whenever the canvas is drawn.\n \"\"\"\n self.layer.scale_factor = self.scale_factor\n", "path": "napari/_vispy/vispy_base_layer.py" } ]
[ { "content": "from vispy.visuals.transforms import STTransform\nfrom abc import ABC, abstractmethod\n\n\nclass VispyBaseLayer(ABC):\n \"\"\"Base object for individual layer views\n\n Meant to be subclassed.\n\n Parameters\n ----------\n layer : napari.layers.Layer\n Layer model.\n node : vispy.scene.VisualNode\n Central node with which to interact with the visual.\n\n Attributes\n ----------\n layer : napari.layers.Layer\n Layer model.\n node : vispy.scene.VisualNode\n Central node with which to interact with the visual.\n scale : sequence of float\n Scale factors for the layer visual in the scenecanvas.\n translate : sequence of float\n Translation values for the layer visual in the scenecanvas.\n scale_factor : float\n Conversion factor from canvas coordinates to image coordinates, which\n depends on the current zoom level.\n\n Extended Summary\n ----------\n _master_transform : vispy.visuals.transforms.STTransform\n Transform positioning the layer visual inside the scenecanvas.\n \"\"\"\n\n def __init__(self, layer, node):\n super().__init__()\n\n self.layer = layer\n self.node = node\n self._position = (0,) * self.layer.dims.ndisplay\n self.camera = None\n\n self.layer.events.refresh.connect(lambda e: self.node.update())\n self.layer.events.set_data.connect(lambda e: self._on_data_change())\n\n self.layer.events.visible.connect(lambda e: self._on_visible_change())\n self.layer.events.opacity.connect(lambda e: self._on_opacity_change())\n self.layer.events.blending.connect(\n lambda e: self._on_blending_change()\n )\n self.layer.events.scale.connect(lambda e: self._on_scale_change())\n self.layer.events.translate.connect(\n lambda e: self._on_translate_change()\n )\n\n @property\n def _master_transform(self):\n \"\"\"vispy.visuals.transforms.STTransform:\n Central node's firstmost transform.\n \"\"\"\n # whenever a new parent is set, the transform is reset\n # to a NullTransform so we reset it here\n if not isinstance(self.node.transform, STTransform):\n self.node.transform = STTransform()\n\n return self.node.transform\n\n @property\n def order(self):\n \"\"\"int: Order in which the visual is drawn in the scenegraph.\n\n Lower values are closer to the viewer.\n \"\"\"\n return self.node.order\n\n @order.setter\n def order(self, order):\n self.node.order = order\n\n @property\n def scale(self):\n \"\"\"sequence of float: Scale factors.\"\"\"\n return self._master_transform.scale\n\n @scale.setter\n def scale(self, scale):\n self._master_transform.scale = scale\n\n @property\n def translate(self):\n \"\"\"sequence of float: Translation values.\"\"\"\n return self._master_transform.translate\n\n @translate.setter\n def translate(self, translate):\n self._master_transform.translate = translate\n\n @property\n def scale_factor(self):\n \"\"\"float: Conversion factor from canvas coordinates to image\n coordinates, which depends on the current zoom level.\n \"\"\"\n transform = self.node.canvas.scene.node_transform(self.node)\n scale_factor = transform.map([1, 1])[0] - transform.map([0, 0])[0]\n return scale_factor\n\n @abstractmethod\n def _on_data_change(self):\n raise NotImplementedError()\n\n def _on_visible_change(self):\n self.node.visible = self.layer.visible\n\n def _on_opacity_change(self):\n self.node.opacity = self.layer.opacity\n\n def _on_blending_change(self):\n self.node.set_gl_state(self.layer.blending)\n self.node.update()\n\n def _on_scale_change(self):\n self.scale = [\n self.layer.scale[d] for d in self.layer.dims.displayed[::-1]\n ]\n self.layer.position = self._transform_position(self._position)\n\n def _on_translate_change(self):\n self.translate = [\n self.layer.translate[d] + self.layer.translate_grid[d]\n for d in self.layer.dims.displayed[::-1]\n ]\n self.layer.position = self._transform_position(self._position)\n\n def _transform_position(self, position):\n \"\"\"Transform cursor position from canvas space (x, y) into image space.\n\n Parameters\n -------\n position : 2-tuple\n Cursor position in canvase (x, y).\n\n Returns\n -------\n coords : tuple\n Coordinates of cursor in image space for displayed dimensions only\n \"\"\"\n if self.node.canvas is not None:\n transform = self.node.canvas.scene.node_transform(self.node)\n # Map and offset position so that pixel center is at 0\n mapped_position = (\n transform.map(list(position))[: len(self.layer.dims.displayed)]\n - 0.5\n )\n coords = tuple(mapped_position[::-1])\n else:\n coords = (0,) * len(self.layer.dims.displayed)\n return coords\n\n def _reset_base(self):\n self._on_visible_change()\n self._on_opacity_change()\n self._on_blending_change()\n self._on_scale_change()\n self._on_translate_change()\n\n def on_mouse_move(self, event):\n \"\"\"Called whenever mouse moves over canvas.\"\"\"\n if event.pos is None:\n return\n self._position = list(event.pos)\n self.layer.position = self._transform_position(self._position)\n self.layer.on_mouse_move(event)\n\n def on_mouse_press(self, event):\n \"\"\"Called whenever mouse pressed in canvas.\n \"\"\"\n if event.pos is None:\n return\n self._position = list(event.pos)\n self.layer.position = self._transform_position(self._position)\n self.layer.on_mouse_press(event)\n\n def on_mouse_release(self, event):\n \"\"\"Called whenever mouse released in canvas.\n \"\"\"\n if event.pos is None:\n return\n self._position = list(event.pos)\n self.layer.position = self._transform_position(self._position)\n self.layer.on_mouse_release(event)\n\n def on_draw(self, event):\n \"\"\"Called whenever the canvas is drawn.\n \"\"\"\n self.layer.scale_factor = self.scale_factor\n", "path": "napari/_vispy/vispy_base_layer.py" } ]
diff --git a/napari/_vispy/vispy_base_layer.py b/napari/_vispy/vispy_base_layer.py index b315736b206..1ab86e1b452 100644 --- a/napari/_vispy/vispy_base_layer.py +++ b/napari/_vispy/vispy_base_layer.py @@ -39,7 +39,7 @@ def __init__(self, layer, node): self.layer = layer self.node = node - self._position = (0,) * self.layer.ndim + self._position = (0,) * self.layer.dims.ndisplay self.camera = None self.layer.events.refresh.connect(lambda e: self.node.update()) diff --git a/napari/tests/test_advanced.py b/napari/tests/test_advanced.py index 81de93d0c47..22e7a0ef63a 100644 --- a/napari/tests/test_advanced.py +++ b/napari/tests/test_advanced.py @@ -36,6 +36,34 @@ def test_4D_5D_images(qtbot): viewer.window.close() +def test_5D_image_3D_rendering(qtbot): + """Test 3D rendering of a 5D image.""" + np.random.seed(0) + viewer = Viewer() + view = viewer.window.qt_viewer + qtbot.addWidget(view) + + # add 4D image data + data = np.random.random((2, 10, 12, 13, 14)) + viewer.add_image(data) + assert np.all(viewer.layers[0].data == data) + assert len(viewer.layers) == 1 + assert viewer.dims.ndim == 5 + assert viewer.dims.ndisplay == 2 + assert viewer.layers[0]._data_view.ndim == 2 + assert view.dims.nsliders == viewer.dims.ndim + assert np.sum(view.dims._displayed_sliders) == 3 + + # switch to 3D rendering + viewer.dims.ndisplay = 3 + assert viewer.dims.ndisplay == 3 + assert viewer.layers[0]._data_view.ndim == 3 + assert np.sum(view.dims._displayed_sliders) == 2 + + # Close the viewer + viewer.window.close() + + def test_change_image_dims(qtbot): """Test changing the dims and shape of an image layer in place and checking the numbers of sliders and their ranges changes appropriately.
sktime__sktime-4010
[BUG] Tensorflow failing on macOS When following development environment setup guide in sktime documentation, conda installation was not able to run properly on macOS doe to missing tensorflow-macos dependency. To reproduce the issue run on macOS: ```shell $ conda create -n sktime-dev python=3.8 $ conda activate sktime-dev $ pip install -e ."[all_extras,dev]" $ make test zsh: illegal hardware instruction ``` A factor that can complicate things is that tensorflow-macos cannot be installed using Python 3.8. Possible solutions can be adding a warning to sktime documentation or adjusting pyproject.toml to tackle this issue. The expected behavior would be to run successfully all tests after initial installation.
[ { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"Configuration file for the Sphinx documentation builder.\"\"\"\n\nimport os\nimport sys\nfrom importlib import import_module\n\nimport sktime\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nON_READTHEDOCS = os.environ.get(\"READTHEDOCS\") == \"True\"\nif not ON_READTHEDOCS:\n sys.path.insert(0, os.path.abspath(\"../..\"))\n\n# -- Project information -----------------------------------------------------\nproject = \"sktime\"\ncopyright = \"2019 - 2021 (BSD-3-Clause License)\"\nauthor = \"sktime developers\"\n\n# The full version, including alpha/beta/rc tags\nCURRENT_VERSION = f\"v{sktime.__version__}\"\n\n# If on readthedocs, and we're building the latest version, update tag to generate\n# correct links in notebooks\nif ON_READTHEDOCS:\n READTHEDOCS_VERSION = os.environ.get(\"READTHEDOCS_VERSION\")\n if READTHEDOCS_VERSION == \"latest\":\n CURRENT_VERSION = \"main\"\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"numpydoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.linkcode\", # link to GitHub source code via linkcode_resolve()\n \"nbsphinx\", # integrates example notebooks\n \"sphinx_gallery.load_style\",\n \"myst_parser\",\n \"sphinx_design\",\n \"sphinx_issues\",\n]\n\n# Recommended by sphinx_design when using the MyST Parser\nmyst_enable_extensions = [\"colon_fence\"]\n\n# Notebook thumbnails\nnbsphinx_thumbnails = {\n \"examples/02_classification\": \"examples/img/tsc.png\",\n}\n\n# Use bootstrap CSS from theme.\npanels_add_bootstrap_css = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\nsource_suffix = {\n \".rst\": \"restructuredtext\",\n \".md\": \"markdown\",\n}\n\n# The main toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\n \"_build\",\n \".ipynb_checkpoints\",\n \"Thumbs.db\",\n \".DS_Store\",\n]\n\nadd_module_names = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# see http://stackoverflow.com/q/12206334/562769\nnumpydoc_show_class_members = True\n# this is needed for some reason...\n# see https://github.com/numpy/numpydoc/issues/69\nnumpydoc_class_members_toctree = False\n\nnumpydoc_validation_checks = {\"all\"}\n\n# generate autosummary even if no references\nautosummary_generate = True\n\n# Members and inherited-members default to showing methods and attributes from a\n# class or those inherited.\n# Member-order orders the documentation in the order of how the members are defined in\n# the source code.\nautodoc_default_options = {\n \"members\": True,\n \"inherited-members\": True,\n \"member-order\": \"bysource\",\n}\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\nadd_function_parentheses = False\n\n# When building HTML using the sphinx.ext.mathjax (enabled by default),\n# Myst-Parser injects the tex2jax_ignore (MathJax v2) and mathjax_ignore (MathJax v3)\n# classes in to the top-level section of each MyST document, and adds some default\n# configuration. This ensures that MathJax processes only math, identified by the\n# dollarmath and amsmath extensions, or specified in math directives. We here silence\n# the corresponding warning that this override happens.\nsuppress_warnings = [\"myst.mathjax\"]\n\n# Link to GitHub repo for github_issues extension\nissues_github_path = \"sktime/sktime\"\n\n\ndef linkcode_resolve(domain, info):\n \"\"\"Return URL to source code corresponding.\n\n Parameters\n ----------\n domain : str\n info : dict\n\n Returns\n -------\n url : str\n \"\"\"\n\n def find_source():\n # try to find the file and line number, based on code from numpy:\n # https://github.com/numpy/numpy/blob/main/doc/source/conf.py#L286\n obj = sys.modules[info[\"module\"]]\n for part in info[\"fullname\"].split(\".\"):\n obj = getattr(obj, part)\n import inspect\n import os\n\n fn = inspect.getsourcefile(obj)\n fn = os.path.relpath(fn, start=os.path.dirname(sktime.__file__))\n source, lineno = inspect.getsourcelines(obj)\n return fn, lineno, lineno + len(source) - 1\n\n if domain != \"py\" or not info[\"module\"]:\n return None\n try:\n filename = \"sktime/%s#L%d-L%d\" % find_source()\n except Exception:\n filename = info[\"module\"].replace(\".\", \"/\") + \".py\"\n return \"https://github.com/sktime/sktime/blob/%s/%s\" % (\n CURRENT_VERSION,\n filename,\n )\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n\nhtml_theme = \"pydata_sphinx_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n\nhtml_theme_options = {\n \"icon_links\": [\n {\n \"name\": \"GitHub\",\n \"url\": \"https://github.com/sktime/sktime\",\n \"icon\": \"fab fa-github\",\n },\n {\n \"name\": \"Slack\",\n \"url\": \"https://join.slack.com/t/sktime-group/shared_invite/zt-1cghagwee-sqLJ~eHWGYgzWbqUX937ig\", # noqa: E501\n \"icon\": \"fab fa-slack\",\n },\n {\n \"name\": \"Discord\",\n \"url\": \"https://discord.com/invite/gqSab2K\",\n \"icon\": \"fab fa-discord\",\n },\n {\n \"name\": \"LinkedIn\",\n \"url\": \"https://www.linkedin.com/company/sktime/\",\n \"icon\": \"fab fa-linkedin\",\n },\n {\n \"name\": \"Twitter\",\n \"url\": \"https://twitter.com/sktime_toolbox\",\n \"icon\": \"fab fa-twitter\",\n },\n ],\n \"favicons\": [\n {\n \"rel\": \"icon\",\n \"sizes\": \"16x16\",\n \"href\": \"images/sktime-favicon.ico\",\n }\n ],\n \"show_prev_next\": False,\n \"use_edit_page_button\": False,\n \"navbar_start\": [\"navbar-logo\"],\n \"navbar_center\": [\"navbar-nav\"],\n \"navbar_end\": [\"navbar-icon-links\"],\n}\nhtml_logo = \"images/sktime-logo-text-horizontal.png\"\nhtml_context = {\n \"github_user\": \"sktime\",\n \"github_repo\": \"sktime\",\n \"github_version\": \"main\",\n \"doc_path\": \"docs/source/\",\n}\nhtml_favicon = \"images/sktime-favicon.ico\"\nhtml_sidebars = {\n \"**\": [\"search-field.html\", \"sidebar-nav-bs.html\", \"sidebar-ethical-ads.html\"]\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\nhtml_css_files = [\"css/custom.css\"]\nhtml_js_files = [\n \"js/dynamic_table.js\",\n]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\nhtml_show_sourcelink = False\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"sktimedoc\"\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n # Latex figure (float) alignment\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"sktime.tex\", \"sktime Documentation\", \"sktime developers\", \"manual\"),\n]\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"sktime\", \"sktime Documentation\", [author], 1)]\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"sktime\",\n \"sktime Documentation\",\n author,\n \"sktime\",\n \"One line description of project.\",\n \"Miscellaneous\",\n ),\n]\n\n\ndef _make_estimator_overview(app):\n \"\"\"Make estimator overview table.\"\"\"\n import pandas as pd\n\n from sktime.registry import all_estimators\n\n def _process_author_info(author_info):\n \"\"\"\n Process author information from source code files.\n\n Parameters\n ----------\n author_info : str\n Author information string from source code files.\n\n Returns\n -------\n author_info : str\n Preprocessed author information.\n\n Notes\n -----\n A list of author names is turned into a string.\n Multiple author names will be separated by a comma,\n with the final name always preceded by \"&\".\n \"\"\"\n if isinstance(author_info, list):\n if len(author_info) > 1:\n return \", \".join(author_info[:-1]) + \" & \" + author_info[-1]\n else:\n return author_info[0]\n else:\n return author_info\n\n def _does_not_start_with_underscore(input_string):\n return not input_string.startswith(\"_\")\n\n # creates dataframe as df\n COLNAMES = [\"Class Name\", \"Estimator Type\", \"Authors\"]\n\n df = pd.DataFrame([], columns=COLNAMES)\n\n for modname, modclass in all_estimators():\n algorithm_type = \"::\".join(str(modclass).split(\".\")[1:-2])\n try:\n author_info = _process_author_info(modclass.__author__)\n except AttributeError:\n try:\n author_info = _process_author_info(\n import_module(modclass.__module__).__author__\n )\n except AttributeError:\n author_info = \"no author info\"\n\n # includes part of class string\n modpath = str(modclass)[8:-2]\n path_parts = modpath.split(\".\")\n # joins strings excluding starting with '_'\n clean_path = \".\".join(list(filter(_does_not_start_with_underscore, path_parts)))\n # adds html link reference\n modname = str(\n '<a href=\"https://www.sktime.org/en/latest/api_reference'\n + \"/auto_generated/\"\n + clean_path\n + '.html\">'\n + modname\n + \"</a>\"\n )\n\n record = pd.DataFrame([modname, algorithm_type, author_info], index=COLNAMES).T\n df = pd.concat([df, record], ignore_index=True)\n with open(\"estimator_overview_table.md\", \"w\") as file:\n df.to_markdown(file, index=False)\n\n\ndef setup(app):\n \"\"\"Set up sphinx builder.\n\n Parameters\n ----------\n app : Sphinx application object\n \"\"\"\n\n def adds(pth):\n print(\"Adding stylesheet: %s\" % pth) # noqa: T201, T001\n app.add_css_file(pth)\n\n adds(\"fields.css\") # for parameters, etc.\n\n app.connect(\"builder-inited\", _make_estimator_overview)\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for nbsphinx extension ---------------------------------------\nnbsphinx_execute = \"never\" # always # whether to run notebooks\nnbsphinx_allow_errors = False # False\nnbsphinx_timeout = 600 # seconds, set to -1 to disable timeout\n\n# add Binder launch buttom at the top\ncurrent_file = \"{{ env.doc2path( env.docname, base=None) }}\"\n\n# make sure Binder points to latest stable release, not main\nbinder_url = f\"https://mybinder.org/v2/gh/sktime/sktime/{CURRENT_VERSION}?filepath={current_file}\" # noqa\nnbsphinx_prolog = f\"\"\"\n.. |binder| image:: https://mybinder.org/badge_logo.svg\n.. _Binder: {binder_url}\n\n|Binder|_\n\"\"\"\n\n# add link to original notebook at the bottom\nnotebook_url = (\n f\"https://github.com/sktime/sktime/tree/{CURRENT_VERSION}/{current_file}\" # noqa\n)\nnbsphinx_epilog = f\"\"\"\n----\n\nGenerated using nbsphinx_. The Jupyter notebook can be found here_.\n\n.. _here: {notebook_url}\n.. _nbsphinx: https://nbsphinx.readthedocs.io/\n\"\"\"\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/{.major}\".format(sys.version_info), None),\n \"numpy\": (\"https://docs.scipy.org/doc/numpy/\", None),\n \"scipy\": (\"https://docs.scipy.org/doc/scipy/reference\", None),\n \"matplotlib\": (\"https://matplotlib.org/\", None),\n \"pandas\": (\"https://pandas.pydata.org/pandas-docs/stable/\", None),\n \"joblib\": (\"https://joblib.readthedocs.io/en/latest/\", None),\n \"scikit-learn\": (\"https://scikit-learn.org/stable/\", None),\n \"statsmodels\": (\"https://www.statsmodels.org/stable/\", None),\n}\n\n# -- Options for _todo extension ----------------------------------------------\ntodo_include_todos = False\n", "path": "docs/source/conf.py" } ]
[ { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"Configuration file for the Sphinx documentation builder.\"\"\"\n\nimport os\nimport sys\nfrom importlib import import_module\n\nimport sktime\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nON_READTHEDOCS = os.environ.get(\"READTHEDOCS\") == \"True\"\nif not ON_READTHEDOCS:\n sys.path.insert(0, os.path.abspath(\"../..\"))\n\n# -- Project information -----------------------------------------------------\nproject = \"sktime\"\ncopyright = \"2019 - 2021 (BSD-3-Clause License)\"\nauthor = \"sktime developers\"\n\n# The full version, including alpha/beta/rc tags\nCURRENT_VERSION = f\"v{sktime.__version__}\"\n\n# If on readthedocs, and we're building the latest version, update tag to generate\n# correct links in notebooks\nif ON_READTHEDOCS:\n READTHEDOCS_VERSION = os.environ.get(\"READTHEDOCS_VERSION\")\n if READTHEDOCS_VERSION == \"latest\":\n CURRENT_VERSION = \"main\"\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.autosectionlabel\",\n \"numpydoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.linkcode\", # link to GitHub source code via linkcode_resolve()\n \"nbsphinx\", # integrates example notebooks\n \"sphinx_gallery.load_style\",\n \"myst_parser\",\n \"sphinx_design\",\n \"sphinx_issues\",\n]\n\n# Recommended by sphinx_design when using the MyST Parser\nmyst_enable_extensions = [\"colon_fence\"]\n\n# Notebook thumbnails\nnbsphinx_thumbnails = {\n \"examples/02_classification\": \"examples/img/tsc.png\",\n}\n\n# Use bootstrap CSS from theme.\npanels_add_bootstrap_css = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\nsource_suffix = {\n \".rst\": \"restructuredtext\",\n \".md\": \"markdown\",\n}\n\n# The main toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\n \"_build\",\n \".ipynb_checkpoints\",\n \"Thumbs.db\",\n \".DS_Store\",\n]\n\nadd_module_names = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# see http://stackoverflow.com/q/12206334/562769\nnumpydoc_show_class_members = True\n# this is needed for some reason...\n# see https://github.com/numpy/numpydoc/issues/69\nnumpydoc_class_members_toctree = False\n\nnumpydoc_validation_checks = {\"all\"}\n\n# generate autosummary even if no references\nautosummary_generate = True\n\n# Members and inherited-members default to showing methods and attributes from a\n# class or those inherited.\n# Member-order orders the documentation in the order of how the members are defined in\n# the source code.\nautodoc_default_options = {\n \"members\": True,\n \"inherited-members\": True,\n \"member-order\": \"bysource\",\n}\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\nadd_function_parentheses = False\n\n# When building HTML using the sphinx.ext.mathjax (enabled by default),\n# Myst-Parser injects the tex2jax_ignore (MathJax v2) and mathjax_ignore (MathJax v3)\n# classes in to the top-level section of each MyST document, and adds some default\n# configuration. This ensures that MathJax processes only math, identified by the\n# dollarmath and amsmath extensions, or specified in math directives. We here silence\n# the corresponding warning that this override happens.\nsuppress_warnings = [\"myst.mathjax\"]\n\n# Link to GitHub repo for github_issues extension\nissues_github_path = \"sktime/sktime\"\n\n\ndef linkcode_resolve(domain, info):\n \"\"\"Return URL to source code corresponding.\n\n Parameters\n ----------\n domain : str\n info : dict\n\n Returns\n -------\n url : str\n \"\"\"\n\n def find_source():\n # try to find the file and line number, based on code from numpy:\n # https://github.com/numpy/numpy/blob/main/doc/source/conf.py#L286\n obj = sys.modules[info[\"module\"]]\n for part in info[\"fullname\"].split(\".\"):\n obj = getattr(obj, part)\n import inspect\n import os\n\n fn = inspect.getsourcefile(obj)\n fn = os.path.relpath(fn, start=os.path.dirname(sktime.__file__))\n source, lineno = inspect.getsourcelines(obj)\n return fn, lineno, lineno + len(source) - 1\n\n if domain != \"py\" or not info[\"module\"]:\n return None\n try:\n filename = \"sktime/%s#L%d-L%d\" % find_source()\n except Exception:\n filename = info[\"module\"].replace(\".\", \"/\") + \".py\"\n return \"https://github.com/sktime/sktime/blob/%s/%s\" % (\n CURRENT_VERSION,\n filename,\n )\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n\nhtml_theme = \"pydata_sphinx_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n\nhtml_theme_options = {\n \"icon_links\": [\n {\n \"name\": \"GitHub\",\n \"url\": \"https://github.com/sktime/sktime\",\n \"icon\": \"fab fa-github\",\n },\n {\n \"name\": \"Slack\",\n \"url\": \"https://join.slack.com/t/sktime-group/shared_invite/zt-1cghagwee-sqLJ~eHWGYgzWbqUX937ig\", # noqa: E501\n \"icon\": \"fab fa-slack\",\n },\n {\n \"name\": \"Discord\",\n \"url\": \"https://discord.com/invite/gqSab2K\",\n \"icon\": \"fab fa-discord\",\n },\n {\n \"name\": \"LinkedIn\",\n \"url\": \"https://www.linkedin.com/company/sktime/\",\n \"icon\": \"fab fa-linkedin\",\n },\n {\n \"name\": \"Twitter\",\n \"url\": \"https://twitter.com/sktime_toolbox\",\n \"icon\": \"fab fa-twitter\",\n },\n ],\n \"favicons\": [\n {\n \"rel\": \"icon\",\n \"sizes\": \"16x16\",\n \"href\": \"images/sktime-favicon.ico\",\n }\n ],\n \"show_prev_next\": False,\n \"use_edit_page_button\": False,\n \"navbar_start\": [\"navbar-logo\"],\n \"navbar_center\": [\"navbar-nav\"],\n \"navbar_end\": [\"navbar-icon-links\"],\n}\nhtml_logo = \"images/sktime-logo-text-horizontal.png\"\nhtml_context = {\n \"github_user\": \"sktime\",\n \"github_repo\": \"sktime\",\n \"github_version\": \"main\",\n \"doc_path\": \"docs/source/\",\n}\nhtml_favicon = \"images/sktime-favicon.ico\"\nhtml_sidebars = {\n \"**\": [\"search-field.html\", \"sidebar-nav-bs.html\", \"sidebar-ethical-ads.html\"]\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\nhtml_css_files = [\"css/custom.css\"]\nhtml_js_files = [\n \"js/dynamic_table.js\",\n]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\nhtml_show_sourcelink = False\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"sktimedoc\"\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n # Latex figure (float) alignment\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"sktime.tex\", \"sktime Documentation\", \"sktime developers\", \"manual\"),\n]\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"sktime\", \"sktime Documentation\", [author], 1)]\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"sktime\",\n \"sktime Documentation\",\n author,\n \"sktime\",\n \"One line description of project.\",\n \"Miscellaneous\",\n ),\n]\n\n\ndef _make_estimator_overview(app):\n \"\"\"Make estimator overview table.\"\"\"\n import pandas as pd\n\n from sktime.registry import all_estimators\n\n def _process_author_info(author_info):\n \"\"\"\n Process author information from source code files.\n\n Parameters\n ----------\n author_info : str\n Author information string from source code files.\n\n Returns\n -------\n author_info : str\n Preprocessed author information.\n\n Notes\n -----\n A list of author names is turned into a string.\n Multiple author names will be separated by a comma,\n with the final name always preceded by \"&\".\n \"\"\"\n if isinstance(author_info, list):\n if len(author_info) > 1:\n return \", \".join(author_info[:-1]) + \" & \" + author_info[-1]\n else:\n return author_info[0]\n else:\n return author_info\n\n def _does_not_start_with_underscore(input_string):\n return not input_string.startswith(\"_\")\n\n # creates dataframe as df\n COLNAMES = [\"Class Name\", \"Estimator Type\", \"Authors\"]\n\n df = pd.DataFrame([], columns=COLNAMES)\n\n for modname, modclass in all_estimators():\n algorithm_type = \"::\".join(str(modclass).split(\".\")[1:-2])\n try:\n author_info = _process_author_info(modclass.__author__)\n except AttributeError:\n try:\n author_info = _process_author_info(\n import_module(modclass.__module__).__author__\n )\n except AttributeError:\n author_info = \"no author info\"\n\n # includes part of class string\n modpath = str(modclass)[8:-2]\n path_parts = modpath.split(\".\")\n # joins strings excluding starting with '_'\n clean_path = \".\".join(list(filter(_does_not_start_with_underscore, path_parts)))\n # adds html link reference\n modname = str(\n '<a href=\"https://www.sktime.org/en/latest/api_reference'\n + \"/auto_generated/\"\n + clean_path\n + '.html\">'\n + modname\n + \"</a>\"\n )\n\n record = pd.DataFrame([modname, algorithm_type, author_info], index=COLNAMES).T\n df = pd.concat([df, record], ignore_index=True)\n with open(\"estimator_overview_table.md\", \"w\") as file:\n df.to_markdown(file, index=False)\n\n\ndef setup(app):\n \"\"\"Set up sphinx builder.\n\n Parameters\n ----------\n app : Sphinx application object\n \"\"\"\n\n def adds(pth):\n print(\"Adding stylesheet: %s\" % pth) # noqa: T201, T001\n app.add_css_file(pth)\n\n adds(\"fields.css\") # for parameters, etc.\n\n app.connect(\"builder-inited\", _make_estimator_overview)\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for nbsphinx extension ---------------------------------------\nnbsphinx_execute = \"never\" # always # whether to run notebooks\nnbsphinx_allow_errors = False # False\nnbsphinx_timeout = 600 # seconds, set to -1 to disable timeout\n\n# add Binder launch buttom at the top\ncurrent_file = \"{{ env.doc2path( env.docname, base=None) }}\"\n\n# make sure Binder points to latest stable release, not main\nbinder_url = f\"https://mybinder.org/v2/gh/sktime/sktime/{CURRENT_VERSION}?filepath={current_file}\" # noqa\nnbsphinx_prolog = f\"\"\"\n.. |binder| image:: https://mybinder.org/badge_logo.svg\n.. _Binder: {binder_url}\n\n|Binder|_\n\"\"\"\n\n# add link to original notebook at the bottom\nnotebook_url = (\n f\"https://github.com/sktime/sktime/tree/{CURRENT_VERSION}/{current_file}\" # noqa\n)\nnbsphinx_epilog = f\"\"\"\n----\n\nGenerated using nbsphinx_. The Jupyter notebook can be found here_.\n\n.. _here: {notebook_url}\n.. _nbsphinx: https://nbsphinx.readthedocs.io/\n\"\"\"\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/{.major}\".format(sys.version_info), None),\n \"numpy\": (\"https://docs.scipy.org/doc/numpy/\", None),\n \"scipy\": (\"https://docs.scipy.org/doc/scipy/reference\", None),\n \"matplotlib\": (\"https://matplotlib.org/\", None),\n \"pandas\": (\"https://pandas.pydata.org/pandas-docs/stable/\", None),\n \"joblib\": (\"https://joblib.readthedocs.io/en/latest/\", None),\n \"scikit-learn\": (\"https://scikit-learn.org/stable/\", None),\n \"statsmodels\": (\"https://www.statsmodels.org/stable/\", None),\n}\n\n# -- Options for _todo extension ----------------------------------------------\ntodo_include_todos = False\n", "path": "docs/source/conf.py" } ]
diff --git a/.all-contributorsrc b/.all-contributorsrc index 87bcf4d0fd1..016c6a1081f 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -1937,6 +1937,16 @@ "contributions": [ "doc" ] + }, + { + "login": "dainelli98", + "name": "Daniel Martín Martínez", + "avatar_url": "https://avatars.githubusercontent.com/dainelli98", + "profile": "https://www.linkedin.com/in/daniel-martin-martinez", + "contributions": [ + "doc", + "bug" + ] } ] } diff --git a/docs/source/conf.py b/docs/source/conf.py index a659e87ac68..998e31c161c 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -42,6 +42,7 @@ extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosummary", + "sphinx.ext.autosectionlabel", "numpydoc", "sphinx.ext.intersphinx", "sphinx.ext.linkcode", # link to GitHub source code via linkcode_resolve() diff --git a/docs/source/installation.rst b/docs/source/installation.rst index a89d3b5e46b..22f6fb4ce43 100644 --- a/docs/source/installation.rst +++ b/docs/source/installation.rst @@ -37,6 +37,11 @@ To install ``sktime`` with maximum dependencies, including soft dependencies, in pip install sktime[all_extras] +.. warning:: + Some of the dependencies included in ``all_extras`` do not work on mac ARM-based processors, such + as M1, M2, M1Pro, M1Max or M1Ultra. This may cause an error during installation. Mode details can + be found in the :ref:`troubleshooting section<Dependency error on mac ARM>` below. + Installing sktime from conda ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -171,7 +176,7 @@ In the ``anaconda prompt`` terminal: 2. Create new environment with python 3.8: :code:`conda create -n sktime-dev python=3.8` .. warning:: - If you already have an environment called "sktime-dev" from a previous attempt you will first need to remove this + If you already have an environment called "sktime-dev" from a previous attempt you will first need to remove this. 3. Activate the environment: :code:`conda activate sktime-dev` @@ -214,6 +219,28 @@ your environment is activated and linked to whatever IDE you are using. If you Notebooks, follow `these instructions <https://janakiev.com/blog/jupyter-virtual-envs/>`_ for adding your virtual environment as a new kernel for your notebook. +Installing ``all_extras`` on mac with ARM processor +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +If you are using a mac with an ARM processor, you may encounter an error when installing +``sktime[all_extras]``. This is due to the fact that some libraries included in ``all_extras`` +are not compatible with ARM-based processors. + +The workaround is not to install some of the packages in ``all_extras`` and install ARM compatible +replacements for others: + +* Do not install the following packages: + * ``esig`` + * ``prophet`` + * ``tsfresh`` + * ``tslearn`` +* Replace ``tensorflow`` package with the following packages: + * ``tensorflow-macos`` + * ``tensorflow-metal`` (optional) + +Also, ARM-based processors have issues when installing packages distributed as source distributions +instead of Python wheels. To avoid this issue when installing a package you can try installing it +through conda or use a prior version of the package that was distributed as a wheel. + Other Startup Resources ----------------------- diff --git a/estimator_overview_table.md b/estimator_overview_table.md new file mode 100644 index 00000000000..e69de29bb2d
OpenMined__PySyft-4035
Return invalid dtype when MPC is applied to Other Dtype Tensor ## Description When MPC is applied to the int tensor, it must be int but float return. ## How to Reproduce ```python x = torch.tensor([1, 2, 3]) print(x.dtype) # torch.int64 x = share(bob, alice, crypto_provider=theo) print(x.dtype) # torch.float32 # should be torch.int64 print(x.get().dtype) # torch.int64 ``` ## Expected Behavior should be `torch.int64` ## Screenshots ![image](https://user-images.githubusercontent.com/39186433/89849067-a9f89380-dbc2-11ea-84aa-6bf791a46b78.png) ## System Information - OS: MAC - OS Version: Catalina - Language Version: Python3.7 - Package Manager Version: Conda 4.8.3 - Browser (if applicable): [e.g. Google Chrome] - Browser Version (if applicable): [e.g. 81.0.4044.138] ## Additional Context
[ { "content": "from typing import Union, List\nimport weakref\nimport warnings\n\nimport torch\n\nimport syft\nfrom syft.generic.frameworks.hook import hook_args\nfrom syft.generic.frameworks.overload import overloaded\nfrom syft.frameworks.torch.tensors.interpreters.paillier import PaillierTensor\nfrom syft.messaging.message import TensorCommandMessage\nfrom syft.generic.frameworks.types import FrameworkTensor\nfrom syft.generic.abstract.tensor import AbstractTensor\nfrom syft.generic.abstract.hookable import hookable\nfrom syft.generic.pointers.pointer_tensor import PointerTensor\nfrom syft.generic.utils import memorize\nfrom syft.workers.base import BaseWorker\n\nfrom syft.exceptions import PureFrameworkTensorFoundError\nfrom syft.exceptions import InvalidTensorForRemoteGet\n\n\ndef _get_maximum_precision():\n \"\"\"This function returns the maximum value allowed for precision fractions before the\n chain decides to use LPT.\n\n This function can be overridden if the setup requires the use of LargePrecisionTensor\n from a smaller precision.\n\n The default value is the size of torch.long\n\n Returns:\n The maximum value for precision allowed in this setup\n \"\"\"\n return default_pytorch_maximum_precision()\n\n\ndef default_pytorch_maximum_precision():\n \"\"\"Dealing with integers > 2**63-1 is not fun with precision tensors.\n \"\"\"\n return 63\n\n\nclass TorchTensor(AbstractTensor):\n \"\"\"Add methods to this tensor to have them added to every torch.Tensor object.\n\n This tensor is simply a more convenient way to add custom functions to\n all Torch tensor types. When you add a function to this tensor, it will\n be added to EVERY native torch tensor type (i.e. torch.Torch) automatically\n by the TorchHook (which is in frameworks/torch/hook.py).\n\n Note: all methods from AbstractTensor will also be included because this\n tensor extends AbstractTensor. So, if you're looking for a method on\n the native torch tensor API but it's not listed here, you might try\n checking AbstractTensor.\n \"\"\"\n\n origin = None\n id_at_origin = None\n\n def trigger_origin_backward_hook(self, origin: str, id_at_origin: int):\n \"\"\"\n This hook is triggered when a tensor which was received from a sender has\n a gradient update. It will send back to this sender and his original tensor\n this gradient value to be set remotely. Also, because this is triggered during\n backward(), the backward command is also forwarded back.\n\n Args:\n origin (str): id of the worker where this tensor comes from\n id_at_origin (int): what was its original id\n \"\"\"\n\n def trigger_origin_backward(grad):\n \"\"\"\n The function setting back the gradient and calling backward\n\n Args:\n grad: the gradient tensor being set\n \"\"\"\n\n location = self.owner.get_worker(origin)\n\n # set gradient at the origin\n message = TensorCommandMessage.computation(\"set_grad\", id_at_origin, (grad,), {}, None)\n self.owner.send_msg(message=message, location=location)\n\n # call backward()\n message = TensorCommandMessage.computation(\"backward\", id_at_origin, (grad,), {}, None)\n self.owner.send_msg(message=message, location=location)\n\n return trigger_origin_backward\n\n def set_grad(self, grad):\n self.grad = grad\n\n @property\n def tags(self):\n if self.has_child():\n return self.child.tags\n else:\n if not hasattr(self, \"_tags\"):\n self._tags = None\n return self._tags\n\n @tags.setter\n def tags(self, new_tags):\n if self.has_child():\n if new_tags is not None:\n self.child.tags = set(new_tags)\n else:\n self.child.tags = set()\n else:\n self._tags = new_tags\n\n @property\n def description(self):\n if self.has_child():\n return self.child.description\n else:\n if not hasattr(self, \"_description\"):\n self._description = None\n return self._description\n\n @description.setter\n def description(self, new_desc):\n if self.has_child():\n self.child.description = new_desc\n else:\n self._description = new_desc\n\n @property\n def shape(self):\n if self.is_wrapper:\n return self.child.shape\n else:\n return self.native_shape\n\n @property\n def data(self):\n if self.is_wrapper:\n return self.child.data\n else:\n return self.native_data\n\n @property\n def grad(self):\n if self.is_wrapper:\n child_grad = self.child.grad\n if child_grad is None:\n return None\n else:\n if child_grad.is_wrapper:\n return child_grad\n else:\n return child_grad.wrap()\n else:\n to_return = self.native_grad\n\n # good to ensure that the ID stays consistent\n # not 100% this is required but it's at least\n # good practice\n try:\n to_return.id = self.grad_id\n except AttributeError:\n if to_return is not None and hasattr(to_return, \"id\"):\n self.grad_id = to_return.id\n\n return to_return\n\n @grad.setter\n def grad(self, new_grad):\n\n # If grad is not a pure torch tensor you need to store the chain in a\n # specific place otherwise it will get deleted\n if new_grad is not None and (\n not isinstance(new_grad, torch.Tensor) or hasattr(new_grad, \"child\")\n ):\n self.child.grad = new_grad # .wrap()\n else:\n if hasattr(self, \"native_grad\"):\n with torch.no_grad():\n self.native_grad = new_grad\n elif new_grad is not None:\n self.native_grad = new_grad\n return self\n\n def __str__(self) -> str:\n if self.has_child():\n if self.is_wrapper:\n return \"(Wrapper)>\" + self.child.__str__()\n else:\n return type(self).__name__ + \">\" + self.child.__str__()\n else:\n return self.native___str__()\n\n def __repr__(self) -> str:\n if self.has_child():\n if self.is_wrapper:\n return \"(Wrapper)>\" + self.child.__str__()\n else:\n return type(self).__name__ + \">\" + self.child.__repr__()\n else:\n out = self.native___repr__()\n\n big_repr = False\n\n if self.tags is not None and len(self.tags):\n big_repr = True\n out += \"\\n\\tTags: \"\n for tag in self.tags:\n out += str(tag) + \" \"\n\n if self.description is not None:\n big_repr = True\n out += \"\\n\\tDescription: \" + str(self.description).split(\"\\n\")[0] + \"...\"\n\n if big_repr:\n out += \"\\n\\tShape: \" + str(self.shape)\n\n return out\n\n def __eq__(self, other):\n return self.eq(other)\n\n @property\n def id(self):\n if self.is_wrapper:\n return self.child.id\n else:\n try:\n return self._id\n except AttributeError:\n self._id = syft.ID_PROVIDER.pop()\n return self._id\n\n @property\n def gc(self):\n return self.garbage_collection\n\n @gc.setter\n def gc(self, flag):\n self.garbage_collection = flag\n\n @property\n def disable_gc(self):\n self.child.garbage_collect_data = False\n self.garbage_collection = False\n return self\n\n @property\n def garbage_collection(self):\n if not self.has_child():\n if hasattr(self, \"ptr\") and self.ptr is not None:\n self.child = self.ptr\n self.child.garbage_collect_data = True\n return self.child.garbage_collect_data\n\n @garbage_collection.setter\n def garbage_collection(self, flag):\n if not self.has_child():\n if hasattr(self, \"ptr\") and self.ptr is not None:\n self.child = self.ptr\n self.child.garbage_collect_data = flag\n\n @id.setter\n def id(self, new_id):\n if self.is_wrapper:\n self.child.id = new_id\n else:\n self._id = new_id\n\n def get_class_attributes(self):\n \"\"\"\n Return class attributes for torch tensors\n \"\"\"\n return {\"type\": self.dtype}\n\n def _is_parameter(self):\n \"\"\"\n Utility method to test if the tensor is in fact a Parameter\n \"\"\"\n return isinstance(self, torch.nn.Parameter)\n\n @staticmethod\n @overloaded.module\n def torch(module):\n @overloaded.module\n def nn(module):\n \"\"\"\n The syntax is the same, so @overloaded.module handles recursion\n Note that we don't need to add the @staticmethod decorator\n \"\"\"\n\n module.nn = nn # Handles all the overloading properly\n\n @staticmethod\n @overloaded.module\n def native_torch(module):\n def roll(tensor, shifts, **kwargs):\n if isinstance(shifts, FrameworkTensor):\n shifts = int(shifts.item())\n return torch.native_roll(tensor, shifts, **kwargs)\n\n module.roll = roll\n\n @classmethod\n def handle_func_command(cls, command):\n \"\"\"\n Operates as a router for functions. A function call always starts\n by being handled here and 3 scenarii must be considered:\n Real Torch tensor:\n The arguments of the function are real tensors so we should\n run the native torch command\n Torch wrapper:\n The arguments are just wrappers at the top of a chain\n (ex: wrapper>LoggingTensor>Torch tensor), so just forward\n the instruction to the next layer type in the chain (in\n the example above to LoggingTensor.handle_func_command),\n get the response and replace a wrapper on top of all tensors\n found in the response.\n Syft Tensor:\n The arguments are syft tensors of same type: this can happen\n if at any node of the chain where some function is forwarded,\n the handle_func_command modify the function and make a new\n call but keeps the arguments \"un-wrapped\". Making a new call\n means that by default the command is treated here in the\n global router.\n :param command: instruction of a function command: (command name,\n <no self>, arguments[, kwargs_])\n :return: the response of the function command\n \"\"\"\n cmd, _, args_, kwargs_ = command\n\n try: # will work if tensors are wrappers\n # Replace all torch tensor with their child attribute\n # Note that we return also args_type which helps handling case 3 in the docstring\n new_args, new_kwargs, new_type, args_type = hook_args.unwrap_args_from_function(\n cmd, args_, kwargs_, return_args_type=True\n )\n # This handles case 3: it redirects the command to the appropriate class depending\n # of the syft type of the arguments and returns\n if args_type not in FrameworkTensor:\n return args_type.handle_func_command(command)\n # build the new command\n new_command = (cmd, None, new_args, new_kwargs)\n\n # Check that the function has not been overwritten\n try:\n # Try to get recursively the attributes in cmd = \"<attr1>.<attr2>.<attr3>...\"\n command = cls.rgetattr(cls, cmd)\n return command(*args_, **kwargs_)\n except AttributeError:\n pass\n\n # Send it to the appropriate class and get the response\n try:\n response = new_type.handle_func_command(new_command)\n except RuntimeError:\n # Change the library path to avoid errors on layers like AvgPooling\n list_new_command = list(new_command)\n list_new_command[0] = cls._fix_torch_library(new_command[0])\n new_command = tuple(list_new_command)\n response = new_type.handle_func_command(new_command)\n\n # Put back the wrappers where needed\n response = hook_args.hook_response(cmd, response, wrap_type=args_type)\n except PureFrameworkTensorFoundError: # means that it's not a wrapper but a pure tensor\n\n # Check that the function has not been overwritten\n try:\n # Try to get recursively the attributes in cmd = \"<attr1>.<attr2>.<attr3>...\"\n command = cls.rgetattr(cls, f\"native_{cmd}\")\n return command(*args_, **kwargs_)\n except AttributeError:\n pass\n\n # Run the native function with the new args\n # Note the the cmd should already be checked upon reception by the worker\n # in the execute_command function\n try:\n response = cls._get_response(cmd, args_, kwargs_)\n except AttributeError:\n # Change the library path to avoid errors on layers like AvgPooling\n cmd = cls._fix_torch_library(cmd)\n response = cls._get_response(cmd, args_, kwargs_)\n\n return response\n\n @staticmethod\n @memorize\n def _get_method(cmd):\n module = syft.local_worker.hook\n segments = cmd.split(\".\")\n submodules = segments[:-1]\n command = segments[-1]\n\n for sm in submodules:\n module = getattr(module, sm)\n\n try:\n command_method = getattr(module, f\"native_{command}\")\n except AttributeError: # the function isn't overloaded\n command_method = getattr(module, command)\n\n return command_method\n\n @staticmethod\n def _get_response(cmd, args_, kwargs_):\n \"\"\"\n Return the evaluation of the cmd string parameter\n \"\"\"\n command_method = TorchTensor._get_method(cmd)\n\n if isinstance(args_, tuple):\n response = command_method(*args_, **kwargs_)\n else:\n response = command_method(args_, **kwargs_)\n\n return response\n\n def _fix_torch_library(cmd):\n \"\"\"\n Change the cmd string parameter to use nn.functional path to avoid errors.\n \"\"\"\n if \"_C._nn\" in cmd:\n cmd = cmd.replace(\"_C._nn\", \"nn.functional\")\n return cmd\n\n @hookable\n def send(\n self,\n *location,\n inplace: bool = False,\n user: object = None,\n local_autograd: bool = False,\n requires_grad: bool = False,\n preinitialize_grad: bool = False,\n no_wrap: bool = False,\n garbage_collect_data: bool = True,\n ):\n \"\"\"Gets the pointer to a new remote object.\n\n One of the most commonly used methods in PySyft, this method serializes the object upon\n which it is called (self), sends the object to a remote worker, creates a pointer to\n that worker, and then returns that pointer from this function.\n\n Args:\n location: The BaseWorker object which you want to send this object to. Note that\n this is never actually the BaseWorker but instead a class which instantiates the\n BaseWorker abstraction.\n inplace: if true, return the same object instance, else a new wrapper\n user (object,optional): User credentials to be verified.\n local_autograd: Use autograd system on the local machine instead of PyTorch's\n autograd on the workers.\n requires_grad: Default to False. If true, whenever the remote value of this tensor\n will have its gradient updated (for example when calling .backward()), a call\n will be made to set back the local gradient value.\n preinitialize_grad: Initialize gradient for AutogradTensors to a tensor\n no_wrap: If True, wrap() is called on the created pointer\n garbage_collect_data: argument passed down to create_pointer()\n\n Returns:\n A torch.Tensor[PointerTensor] pointer to self. Note that this\n object will likely be wrapped by a torch.Tensor wrapper.\n\n Raises:\n SendNotPermittedError: Raised if send is not permitted on this tensor.\n \"\"\"\n\n # If you send a pointer p1, you want the pointer to pointer p2 to control\n # the garbage collection and not the remaining old p1 (here self). Because if\n # p2 is not GCed, GCing p1 shouldn't delete the remote tensor, but if you\n # want to do so, as p2 is not GCed, you can still do `del p2`.\n # This allows to chain multiple .send().send() calls.\n\n if len(location) == 1:\n\n location = location[0]\n\n if self.has_child() and isinstance(self.child, PointerTensor):\n self.child.garbage_collect_data = False\n if self._is_parameter():\n self.data.child.garbage_collect_data = False\n\n ptr = self.owner.send(\n self,\n location,\n local_autograd=local_autograd,\n requires_grad=requires_grad,\n preinitialize_grad=preinitialize_grad,\n garbage_collect_data=garbage_collect_data,\n )\n\n ptr.description = self.description\n ptr.tags = self.tags\n\n # The last pointer should control remote GC, not the previous self.ptr\n if hasattr(self, \"ptr\") and self.ptr is not None:\n ptr_ = self.ptr()\n if ptr_ is not None:\n ptr_.garbage_collect_data = False\n\n # we need to cache this weak reference to the pointer so that\n # if this method gets called multiple times we can simply re-use\n # the same pointer which was previously created\n self.ptr = weakref.ref(ptr)\n\n if self._is_parameter():\n if inplace:\n self.is_wrapper = True\n with torch.no_grad():\n self.set_()\n self.data = ptr\n output = self\n else:\n if no_wrap:\n raise ValueError(\"Parameters can't accept no_wrap=True\")\n wrapper = torch.Tensor()\n param_wrapper = torch.nn.Parameter(wrapper)\n param_wrapper.is_wrapper = True\n with torch.no_grad():\n param_wrapper.set_()\n param_wrapper.data = ptr\n output = param_wrapper\n else:\n if inplace:\n self.is_wrapper = True\n self.set_()\n self.child = ptr\n return self\n else:\n output = ptr if no_wrap else ptr.wrap()\n\n if self.requires_grad:\n # This is for AutogradTensor to work on MultiPointerTensors\n # With pre-initialized gradients, this should get it from AutogradTensor.grad\n if preinitialize_grad:\n grad = output.child.grad\n else:\n grad = output.attr(\"grad\")\n\n output.grad = grad\n\n # Because of the way PyTorch works, .grad is prone to\n # create entirely new Python objects for the tensor, which\n # inadvertently deletes our custom attributes (like .child)\n # But, if we keep a backup reference around, PyTorch seems\n # to re-use it, which means .grad keeps the attributes we\n # want it to keep. #HackAlert\n output.backup_grad = grad\n\n if local_autograd:\n output = syft.AutogradTensor(data=output, preinitialize_grad=preinitialize_grad).on(\n output\n )\n\n else:\n\n children = []\n for loc in location:\n children.append(self.clone().send(loc, no_wrap=True))\n\n output = syft.MultiPointerTensor(children=children)\n\n if not no_wrap:\n output = output.wrap()\n\n return output\n\n def send_(self, *location, **kwargs):\n \"\"\"\n Calls send() with inplace option, but only with a single location\n :param location: workers locations\n :return:\n \"\"\"\n if len(location) > 1:\n raise NotImplementedError(\"Inplace send to several workers is currently not supported.\")\n\n return self.send(*location, inplace=True, **kwargs)\n\n def create_pointer(\n self,\n location: BaseWorker = None,\n id_at_location: (str or int) = None,\n owner: BaseWorker = None,\n ptr_id: (str or int) = None,\n garbage_collect_data: bool = True,\n shape=None,\n **kwargs,\n ) -> PointerTensor:\n \"\"\"Creates a pointer to the \"self\" torch.Tensor object.\n\n Returns:\n A PointerTensor pointer to self. Note that this\n object will likely be wrapped by a torch.Tensor wrapper.\n \"\"\"\n if id_at_location is None:\n id_at_location = self.id\n\n if ptr_id is None:\n if location is not None and location.id != self.owner.id:\n ptr_id = self.id\n else:\n ptr_id = syft.ID_PROVIDER.pop()\n\n if shape is None:\n shape = self.shape\n\n ptr = syft.PointerTensor.create_pointer(\n self, location, id_at_location, owner, ptr_id, garbage_collect_data, shape\n )\n\n return ptr\n\n def mid_get(self):\n \"\"\"This method calls .get() on a child pointer and correctly registers the results\"\"\"\n if not self.has_child():\n raise InvalidTensorForRemoteGet(self)\n\n self.child.mid_get()\n\n def remote_get(self):\n \"\"\"Assuming .child is a PointerTensor, this method calls .get() on the tensor\n that the .child is pointing to (which should also be a PointerTensor)\n\n TODO: make this kind of message forwarding generic?\n \"\"\"\n if not self.has_child():\n raise InvalidTensorForRemoteGet(self)\n\n self.child.remote_get()\n\n return self\n\n def get(self, *args, inplace: bool = False, user=None, reason: str = \"\", **kwargs):\n \"\"\"Requests the tensor/chain being pointed to, be serialized and return\n Args:\n args: args to forward to worker\n inplace: if true, return the same object instance, else a new wrapper\n kwargs: kwargs to forward to worker\n Raises:\n GetNotPermittedError: Raised if get is not permitted on this tensor\n \"\"\"\n\n # If it is a local tensor/chain, we don't need to verify permissions\n if not isinstance(self.child, syft.PointerTensor):\n tensor = self.child.get(*args, **kwargs)\n else: # Remote tensor/chain\n tensor = self.child.get(*args, user=user, reason=reason, **kwargs)\n\n # Clean the wrapper\n delattr(self, \"child\")\n\n # Parameters use .data instead of children\n # so we need to have special support to make sure\n # that Parmeters operate inline (because they're\n # typically being managed inside of a model/optimizer\n # so not using the same wrapper can cause the model/\n # optimizer to lose track of where the actual weights\n # are.\n if isinstance(self, torch.nn.Parameter):\n self.is_wrapper = tensor.data.is_wrapper\n if inplace:\n self.data = tensor.data\n self.grad = tensor.grad\n return self\n else:\n return tensor\n\n if inplace:\n self.set_(tensor)\n if hasattr(tensor, \"child\"):\n self.child = tensor.child\n else:\n self.is_wrapper = False\n return self\n else:\n return tensor\n\n def get_(self, *args, **kwargs):\n \"\"\"\n Calls get() with inplace option set to True\n \"\"\"\n return self.get(*args, inplace=True, **kwargs)\n\n def allow(self, user=None) -> bool:\n \"\"\" This function returns will return True if it isn't a PrivateTensor, otherwise it will\n return the result of PrivateTensor's allow method.\n\n Args:\n user (object,optional): User credentials to be verified.\n\n Returns:\n boolean: If it is a public tensor/ allowed user, returns true, otherwise it returns\n false.\n \"\"\"\n # If it is a wrapper\n if self.is_wrapper:\n current_tensor = self.child\n\n # Verify permissions for each element on the tensor chain.\n while hasattr(current_tensor, \"child\"):\n\n # If it has a list of allowed users, verify permissions,\n # otherwise (public tensors) go to the next.\n if hasattr(current_tensor, \"allowed_users\"):\n allow = current_tensor.allow(user)\n if not allow:\n return False\n\n # Go to next element on the tensor chain\n current_tensor = current_tensor.child\n return True\n\n def move(self, location: BaseWorker, requires_grad: bool = False):\n \"\"\"\n Move acts on a pointer to A to move the remote value to B (=location).\n\n Note a A will keep a copy of his value that he sent to B. This follows the\n .send() paradigm where the local worker keeps a copy of the value he sends.\n\n Args:\n location: the worker where the remote value should be moved\n requires_grad: see send() for details\n\n Returns:\n A pointer to the worker location\n \"\"\"\n new_ptr = self.child.move(location, requires_grad)\n # We get the owner from self.child because the owner of a wrapper is\n # not reliable and sometimes end up being the syft.local_worker\n self.child.owner.register_obj(self)\n if isinstance(new_ptr, PointerTensor):\n return new_ptr.wrap()\n else:\n return new_ptr\n\n def move_(self, location: BaseWorker, requires_grad: bool = False):\n \"\"\"\n Inplace version of move\n \"\"\"\n new_ptr = self.move(location, requires_grad)\n self.child = new_ptr\n return self\n\n def remote_send(self, location):\n return self.child.remote_send(location).wrap()\n\n def attr(self, attr_name):\n \"\"\"\"\"\"\n\n if self.is_wrapper:\n attr_val = self.child.attr(attr_name)\n\n if attr_name == \"grad\":\n self.grad = attr_val\n else:\n attr_val = getattr(self, attr_name)\n\n return attr_val\n\n def clone(self, *args, **kwargs):\n \"\"\"\n Clone should keep ids unchanged, contrary to copy\n \"\"\"\n cloned_tensor = self.native_clone(*args, **kwargs)\n cloned_tensor.id = self.id\n cloned_tensor.owner = self.owner\n cloned_tensor.is_wrapper = self.is_wrapper\n\n if self.has_child():\n cloned_tensor.child = self.child.clone(*args, **kwargs)\n\n return cloned_tensor\n\n def float_prec(self):\n if isinstance(self.child, PointerTensor):\n self.child = self.child.float_precision()\n return self\n\n return self.child.float_precision()\n\n float_precision = float_prec\n\n def float_prec_(self):\n tensor = self.float_prec()\n if hasattr(tensor, \"child\"):\n self.child = tensor.child\n elif self._is_parameter():\n self.is_wrapper = False\n self.data = tensor\n self.data.is_wrapper = False\n else:\n del self.child\n self.set_(tensor)\n self.is_wrapper = False\n return self\n\n float_precision_ = float_prec_\n\n def private_tensor(self, *args, allowed_users: List[str], no_wrap: bool = False, **kwargs):\n \"\"\"\n Convert a tensor or syft tensor to private tensor\n\n Args:\n *args (tuple): args to transmit to the private tensor.\n allowed_users (list): List of allowed users.\n no_wrap (bool): if True, we don't add a wrapper on top of the private tensor\n **kwargs (dict): kwargs to transmit to the private tensor\n \"\"\"\n\n if not kwargs.get(\"owner\"):\n kwargs[\"owner\"] = self.owner\n\n if self.is_wrapper:\n self.child = (\n syft.PrivateTensor(tags=self.tags, *args, **kwargs)\n .on(self.child, wrap=False)\n .register_credentials(tuple(allowed_users))\n )\n if no_wrap:\n return self.child\n else:\n return self\n\n private_tensor = (\n syft.PrivateTensor(tags=self.tags, *args, **kwargs)\n .on(self, wrap=False)\n .register_credentials(tuple(allowed_users))\n )\n if not no_wrap:\n private_tensor = private_tensor.wrap()\n\n return private_tensor\n\n def fix_prec(self, *args, no_wrap: bool = False, **kwargs):\n \"\"\"\n Convert a tensor or syft tensor to fixed precision\n\n Args:\n *args (tuple): args to transmit to the fixed precision tensor\n no_wrap (bool): if True, we don't add a wrapper on top of the fixed precision tensor\n **kwargs (dict): kwargs to transmit to the fixed precision tensor\n \"\"\"\n\n if not kwargs.get(\"owner\"):\n kwargs[\"owner\"] = self.owner\n\n if self.is_wrapper:\n child = self.child.fix_prec(*args, **kwargs)\n if no_wrap:\n return child\n else:\n return child.wrap()\n\n base = kwargs.get(\"base\", 10)\n prec_fractional = kwargs.get(\"precision_fractional\", 3)\n\n max_precision = _get_maximum_precision()\n fpt_tensor = syft.FixedPrecisionTensor(*args, **kwargs).on(self, wrap=False).fix_precision()\n\n if not no_wrap:\n fpt_tensor = fpt_tensor.wrap()\n\n return fpt_tensor\n\n fix_precision = fix_prec\n\n def fix_prec_(self, *args, **kwargs):\n \"\"\"\n Performs an inplace transformation to fixed precision and change self to\n be a wrapper\n\n Args:\n *args: args to transmit to fix_prec\n **kwargs: kwargs to transmit to fix_prec\n\n Returns:\n self seen as a wrapper\n \"\"\"\n # We specify id to make sure the inplace op doesn't change the tensor id\n self.child = self.fix_prec(*args, no_wrap=True, id=self.id, **kwargs)\n self.is_wrapper = True\n return self\n\n fix_precision_ = fix_prec_\n\n def share(\n self,\n *owners: List[BaseWorker],\n protocol: str = \"snn\",\n field: Union[int, None] = None,\n dtype: Union[str, None] = None,\n crypto_provider: Union[BaseWorker, None] = None,\n requires_grad: bool = False,\n no_wrap: bool = False,\n ):\n \"\"\"This is a pass through method which calls .share on the child.\n\n Args:\n owners (list): A list of BaseWorker objects determining who to send shares to.\n protocol (str): the crypto protocol used to perform the computations ('snn' or 'fss')\n field (int or None): The arithmetic field where live the shares.\n dtype (str or None): The dtype of shares\n crypto_provider (BaseWorker or None): The worker providing the crypto primitives.\n requires_grad (bool): Should we add AutogradTensor to allow gradient computation,\n default is False.\n \"\"\"\n if protocol == \"falcon\":\n shared_tensor = syft.ReplicatedSharingTensor(owner=self.owner).share_secret(\n self, owners\n )\n return shared_tensor\n if self.has_child():\n chain = self.child\n\n kwargs_ = (\n {\"requires_grad\": requires_grad} if isinstance(chain, syft.PointerTensor) else {}\n )\n shared_tensor = chain.share(\n *owners,\n protocol=protocol,\n field=field,\n dtype=dtype,\n crypto_provider=crypto_provider,\n **kwargs_,\n )\n else:\n if self.type() == \"torch.FloatTensor\":\n raise TypeError(\"FloatTensor cannot be additively shared, Use fix_precision.\")\n\n shared_tensor = (\n syft.AdditiveSharingTensor(\n protocol=protocol,\n field=field,\n dtype=dtype,\n crypto_provider=crypto_provider,\n owner=self.owner,\n )\n .on(self.copy(), wrap=False)\n .share_secret(*owners)\n )\n\n if requires_grad and not isinstance(shared_tensor, syft.PointerTensor):\n shared_tensor = syft.AutogradTensor().on(shared_tensor, wrap=False)\n\n if not no_wrap:\n shared_tensor = shared_tensor.wrap()\n\n return shared_tensor\n\n def share_(self, *args, **kwargs):\n \"\"\"\n Allows to call .share() as an inplace operation\n \"\"\"\n if self.has_child():\n requires_grad = kwargs.get(\"requires_grad\", False)\n # Reset the requires_grad kwargs if the call is local\n if not isinstance(self.child, syft.PointerTensor):\n kwargs[\"requires_grad\"] = False\n\n shared_tensor = self.child.share_(*args, **kwargs)\n\n if requires_grad and not isinstance(shared_tensor, syft.PointerTensor):\n shared_tensor = syft.AutogradTensor().on(shared_tensor, wrap=False)\n\n self.child = shared_tensor\n return self\n else:\n return self.share(*args, **kwargs) # TODO change to inplace\n\n def combine(self, *pointers):\n \"\"\"This method will combine the child pointer with another list of pointers\n\n Args:\n *pointers a list of pointers to be combined into a MultiPointerTensor\n\n \"\"\"\n\n assert isinstance(self.child, PointerTensor)\n\n ps = list(pointers)\n ps.append(self)\n\n return syft.combine_pointers(*ps)\n\n def torch_type(self):\n\n if isinstance(self, torch.Tensor) and not self.is_wrapper:\n return self.type()\n else:\n return self.child.torch_type()\n\n def encrypt(self, protocol=\"mpc\", **kwargs):\n \"\"\"\n This method will encrypt each value in the tensor using Multi Party\n Computation (default) or Paillier Homomorphic Encryption\n\n Args:\n protocol (str): Currently supports 'mpc' for Multi Party\n Computation and 'paillier' for Paillier Homomorphic Encryption\n **kwargs:\n With Respect to MPC accepts:\n workers (list): Parties involved in the sharing of the Tensor\n crypto_provider (syft.VirtualWorker): Worker responsible for the\n generation of the random numbers for encryption\n requires_grad (bool): If true, whenever the remote value of this tensor\n will have its gradient updated (for example when calling .backward()),\n a call will be made to set back the local gradient value.\n no_wrap (bool): If True, wrap() is called on the created pointer\n Keyword Args: To be parsed as kwargs for the .fix_prec() method\n\n With Respect to Paillier accepts:\n public_key (phe.paillier.PaillierPublicKey): Can be obtained using\n ```public_key, private_key = sy.frameworks.torch.he.paillier.keygen()```\n Returns:\n An encrypted version of the Tensor following the protocol specified\n\n Raises:\n NotImplementedError: If protocols other than the ones mentioned above are queried\n\n \"\"\"\n if protocol.lower() == \"mpc\":\n workers = kwargs.pop(\"workers\")\n crypto_provider = kwargs.pop(\"crypto_provider\")\n requires_grad = kwargs.pop(\"requires_grad\", False)\n no_wrap = kwargs.pop(\"no_wrap\", False)\n kwargs_fix_prec = kwargs # Rest of kwargs for fix_prec method\n\n x_shared = self.fix_prec(**kwargs_fix_prec).share(\n *workers,\n crypto_provider=crypto_provider,\n requires_grad=requires_grad,\n no_wrap=no_wrap,\n )\n return x_shared\n\n elif protocol.lower() == \"paillier\":\n public_key = kwargs.get(\"public_key\")\n\n x = self.copy()\n x_encrypted = PaillierTensor().on(x) # Instantiate the class\n x_encrypted.child.encrypt_(public_key) # Perform Homomorphic Encryption\n\n return x_encrypted\n\n else:\n raise NotImplementedError(\n \"Currently the .encrypt() method only supports Paillier Homomorphic \"\n \"Encryption and Secure Multi-Party Computation\"\n )\n\n def decrypt(self, **kwargs):\n \"\"\"\n This method will decrypt each value in the tensor using Multi Party\n Computation (default) or Paillier Homomorphic Encryption\n\n Args:\n **kwargs:\n With Respect to MPC accepts:\n None\n\n With Respect to Paillier accepts:\n private_key (phe.paillier.PaillierPrivateKey): Can be obtained using\n ```public_key, private_key = sy.frameworks.torch.he.paillier.keygen()```\n Returns:\n An decrypted version of the Tensor following the protocol guessed from its type\n\n Raises:\n NotImplementedError: If protocols other than the ones mentioned above are queried\n\n \"\"\"\n\n protocol = kwargs.get(\"protocol\", None)\n if protocol:\n warnings.warn(\"protocol should no longer be used in decrypt\")\n\n if isinstance(self.child, (syft.FixedPrecisionTensor, syft.AutogradTensor)):\n x_encrypted = self.copy()\n x_decrypted = x_encrypted.get().float_prec()\n return x_decrypted\n\n elif isinstance(self.child, PaillierTensor):\n # self.copy() not required as PaillierTensor's decrypt method is not inplace\n private_key = kwargs.get(\"private_key\")\n return self.child.decrypt(private_key)\n\n else:\n raise NotImplementedError(\n \"Currently the .decrypt() method only supports Paillier Homomorphic \"\n \"Encryption and Secure Multi-Party Computation\"\n )\n\n def numpy_tensor(self):\n \"\"\"This method will cast the current tensor to one with numpy as the underlying\n representation. The tensor chain will be Wrapper > NumpyTensor > np.ndarray\"\"\"\n\n if not self.is_wrapper:\n return syft.NumpyTensor(self.numpy())\n else:\n raise Exception(\n \"Can only cast a data tensor to NumpyTensor. You called this \",\n \"on a wrapper. Add NumpyTensor to the chain by hand if you want \"\n \"this functionality.\",\n )\n", "path": "syft/frameworks/torch/tensors/interpreters/native.py" } ]
[ { "content": "from typing import Union, List\nimport weakref\nimport warnings\n\nimport torch\n\nimport syft\nfrom syft.generic.frameworks.hook import hook_args\nfrom syft.generic.frameworks.overload import overloaded\nfrom syft.frameworks.torch.tensors.interpreters.paillier import PaillierTensor\nfrom syft.messaging.message import TensorCommandMessage\nfrom syft.generic.frameworks.types import FrameworkTensor\nfrom syft.generic.abstract.tensor import AbstractTensor\nfrom syft.generic.abstract.hookable import hookable\nfrom syft.generic.pointers.pointer_tensor import PointerTensor\nfrom syft.generic.utils import memorize\nfrom syft.workers.base import BaseWorker\n\nfrom syft.exceptions import PureFrameworkTensorFoundError\nfrom syft.exceptions import InvalidTensorForRemoteGet\n\n\ndef _get_maximum_precision():\n \"\"\"This function returns the maximum value allowed for precision fractions before the\n chain decides to use LPT.\n\n This function can be overridden if the setup requires the use of LargePrecisionTensor\n from a smaller precision.\n\n The default value is the size of torch.long\n\n Returns:\n The maximum value for precision allowed in this setup\n \"\"\"\n return default_pytorch_maximum_precision()\n\n\ndef default_pytorch_maximum_precision():\n \"\"\"Dealing with integers > 2**63-1 is not fun with precision tensors.\n \"\"\"\n return 63\n\n\nclass TorchTensor(AbstractTensor):\n \"\"\"Add methods to this tensor to have them added to every torch.Tensor object.\n\n This tensor is simply a more convenient way to add custom functions to\n all Torch tensor types. When you add a function to this tensor, it will\n be added to EVERY native torch tensor type (i.e. torch.Torch) automatically\n by the TorchHook (which is in frameworks/torch/hook.py).\n\n Note: all methods from AbstractTensor will also be included because this\n tensor extends AbstractTensor. So, if you're looking for a method on\n the native torch tensor API but it's not listed here, you might try\n checking AbstractTensor.\n \"\"\"\n\n origin = None\n id_at_origin = None\n\n def trigger_origin_backward_hook(self, origin: str, id_at_origin: int):\n \"\"\"\n This hook is triggered when a tensor which was received from a sender has\n a gradient update. It will send back to this sender and his original tensor\n this gradient value to be set remotely. Also, because this is triggered during\n backward(), the backward command is also forwarded back.\n\n Args:\n origin (str): id of the worker where this tensor comes from\n id_at_origin (int): what was its original id\n \"\"\"\n\n def trigger_origin_backward(grad):\n \"\"\"\n The function setting back the gradient and calling backward\n\n Args:\n grad: the gradient tensor being set\n \"\"\"\n\n location = self.owner.get_worker(origin)\n\n # set gradient at the origin\n message = TensorCommandMessage.computation(\"set_grad\", id_at_origin, (grad,), {}, None)\n self.owner.send_msg(message=message, location=location)\n\n # call backward()\n message = TensorCommandMessage.computation(\"backward\", id_at_origin, (grad,), {}, None)\n self.owner.send_msg(message=message, location=location)\n\n return trigger_origin_backward\n\n def set_grad(self, grad):\n self.grad = grad\n\n @property\n def tags(self):\n if self.has_child():\n return self.child.tags\n else:\n if not hasattr(self, \"_tags\"):\n self._tags = None\n return self._tags\n\n @tags.setter\n def tags(self, new_tags):\n if self.has_child():\n if new_tags is not None:\n self.child.tags = set(new_tags)\n else:\n self.child.tags = set()\n else:\n self._tags = new_tags\n\n @property\n def description(self):\n if self.has_child():\n return self.child.description\n else:\n if not hasattr(self, \"_description\"):\n self._description = None\n return self._description\n\n @description.setter\n def description(self, new_desc):\n if self.has_child():\n self.child.description = new_desc\n else:\n self._description = new_desc\n\n @property\n def shape(self):\n if self.is_wrapper:\n return self.child.shape\n else:\n return self.native_shape\n\n @property\n def data(self):\n if self.is_wrapper:\n return self.child.data\n else:\n return self.native_data\n\n @property\n def grad(self):\n if self.is_wrapper:\n child_grad = self.child.grad\n if child_grad is None:\n return None\n else:\n if child_grad.is_wrapper:\n return child_grad\n else:\n return child_grad.wrap()\n else:\n to_return = self.native_grad\n\n # good to ensure that the ID stays consistent\n # not 100% this is required but it's at least\n # good practice\n try:\n to_return.id = self.grad_id\n except AttributeError:\n if to_return is not None and hasattr(to_return, \"id\"):\n self.grad_id = to_return.id\n\n return to_return\n\n @grad.setter\n def grad(self, new_grad):\n\n # If grad is not a pure torch tensor you need to store the chain in a\n # specific place otherwise it will get deleted\n if new_grad is not None and (\n not isinstance(new_grad, torch.Tensor) or hasattr(new_grad, \"child\")\n ):\n self.child.grad = new_grad # .wrap()\n else:\n if hasattr(self, \"native_grad\"):\n with torch.no_grad():\n self.native_grad = new_grad\n elif new_grad is not None:\n self.native_grad = new_grad\n return self\n\n def __str__(self) -> str:\n if self.has_child():\n if self.is_wrapper:\n return \"(Wrapper)>\" + self.child.__str__()\n else:\n return type(self).__name__ + \">\" + self.child.__str__()\n else:\n return self.native___str__()\n\n def __repr__(self) -> str:\n if self.has_child():\n if self.is_wrapper:\n return \"(Wrapper)>\" + self.child.__str__()\n else:\n return type(self).__name__ + \">\" + self.child.__repr__()\n else:\n out = self.native___repr__()\n\n big_repr = False\n\n if self.tags is not None and len(self.tags):\n big_repr = True\n out += \"\\n\\tTags: \"\n for tag in self.tags:\n out += str(tag) + \" \"\n\n if self.description is not None:\n big_repr = True\n out += \"\\n\\tDescription: \" + str(self.description).split(\"\\n\")[0] + \"...\"\n\n if big_repr:\n out += \"\\n\\tShape: \" + str(self.shape)\n\n return out\n\n def __eq__(self, other):\n return self.eq(other)\n\n @property\n def id(self):\n if self.is_wrapper:\n return self.child.id\n else:\n try:\n return self._id\n except AttributeError:\n self._id = syft.ID_PROVIDER.pop()\n return self._id\n\n @property\n def gc(self):\n return self.garbage_collection\n\n @gc.setter\n def gc(self, flag):\n self.garbage_collection = flag\n\n @property\n def disable_gc(self):\n self.child.garbage_collect_data = False\n self.garbage_collection = False\n return self\n\n @property\n def garbage_collection(self):\n if not self.has_child():\n if hasattr(self, \"ptr\") and self.ptr is not None:\n self.child = self.ptr\n self.child.garbage_collect_data = True\n return self.child.garbage_collect_data\n\n @garbage_collection.setter\n def garbage_collection(self, flag):\n if not self.has_child():\n if hasattr(self, \"ptr\") and self.ptr is not None:\n self.child = self.ptr\n self.child.garbage_collect_data = flag\n\n @id.setter\n def id(self, new_id):\n if self.is_wrapper:\n self.child.id = new_id\n else:\n self._id = new_id\n\n def get_class_attributes(self):\n \"\"\"\n Return class attributes for torch tensors\n \"\"\"\n return {\"type\": self.dtype}\n\n def _is_parameter(self):\n \"\"\"\n Utility method to test if the tensor is in fact a Parameter\n \"\"\"\n return isinstance(self, torch.nn.Parameter)\n\n @staticmethod\n @overloaded.module\n def torch(module):\n @overloaded.module\n def nn(module):\n \"\"\"\n The syntax is the same, so @overloaded.module handles recursion\n Note that we don't need to add the @staticmethod decorator\n \"\"\"\n\n module.nn = nn # Handles all the overloading properly\n\n @staticmethod\n @overloaded.module\n def native_torch(module):\n def roll(tensor, shifts, **kwargs):\n if isinstance(shifts, FrameworkTensor):\n shifts = int(shifts.item())\n return torch.native_roll(tensor, shifts, **kwargs)\n\n module.roll = roll\n\n @classmethod\n def handle_func_command(cls, command):\n \"\"\"\n Operates as a router for functions. A function call always starts\n by being handled here and 3 scenarii must be considered:\n Real Torch tensor:\n The arguments of the function are real tensors so we should\n run the native torch command\n Torch wrapper:\n The arguments are just wrappers at the top of a chain\n (ex: wrapper>LoggingTensor>Torch tensor), so just forward\n the instruction to the next layer type in the chain (in\n the example above to LoggingTensor.handle_func_command),\n get the response and replace a wrapper on top of all tensors\n found in the response.\n Syft Tensor:\n The arguments are syft tensors of same type: this can happen\n if at any node of the chain where some function is forwarded,\n the handle_func_command modify the function and make a new\n call but keeps the arguments \"un-wrapped\". Making a new call\n means that by default the command is treated here in the\n global router.\n :param command: instruction of a function command: (command name,\n <no self>, arguments[, kwargs_])\n :return: the response of the function command\n \"\"\"\n cmd, _, args_, kwargs_ = command\n\n try: # will work if tensors are wrappers\n # Replace all torch tensor with their child attribute\n # Note that we return also args_type which helps handling case 3 in the docstring\n new_args, new_kwargs, new_type, args_type = hook_args.unwrap_args_from_function(\n cmd, args_, kwargs_, return_args_type=True\n )\n # This handles case 3: it redirects the command to the appropriate class depending\n # of the syft type of the arguments and returns\n if args_type not in FrameworkTensor:\n return args_type.handle_func_command(command)\n # build the new command\n new_command = (cmd, None, new_args, new_kwargs)\n\n # Check that the function has not been overwritten\n try:\n # Try to get recursively the attributes in cmd = \"<attr1>.<attr2>.<attr3>...\"\n command = cls.rgetattr(cls, cmd)\n return command(*args_, **kwargs_)\n except AttributeError:\n pass\n\n # Send it to the appropriate class and get the response\n try:\n response = new_type.handle_func_command(new_command)\n except RuntimeError:\n # Change the library path to avoid errors on layers like AvgPooling\n list_new_command = list(new_command)\n list_new_command[0] = cls._fix_torch_library(new_command[0])\n new_command = tuple(list_new_command)\n response = new_type.handle_func_command(new_command)\n\n # Put back the wrappers where needed\n response = hook_args.hook_response(cmd, response, wrap_type=args_type)\n except PureFrameworkTensorFoundError: # means that it's not a wrapper but a pure tensor\n\n # Check that the function has not been overwritten\n try:\n # Try to get recursively the attributes in cmd = \"<attr1>.<attr2>.<attr3>...\"\n command = cls.rgetattr(cls, f\"native_{cmd}\")\n return command(*args_, **kwargs_)\n except AttributeError:\n pass\n\n # Run the native function with the new args\n # Note the the cmd should already be checked upon reception by the worker\n # in the execute_command function\n try:\n response = cls._get_response(cmd, args_, kwargs_)\n except AttributeError:\n # Change the library path to avoid errors on layers like AvgPooling\n cmd = cls._fix_torch_library(cmd)\n response = cls._get_response(cmd, args_, kwargs_)\n\n return response\n\n @staticmethod\n @memorize\n def _get_method(cmd):\n module = syft.local_worker.hook\n segments = cmd.split(\".\")\n submodules = segments[:-1]\n command = segments[-1]\n\n for sm in submodules:\n module = getattr(module, sm)\n\n try:\n command_method = getattr(module, f\"native_{command}\")\n except AttributeError: # the function isn't overloaded\n command_method = getattr(module, command)\n\n return command_method\n\n @staticmethod\n def _get_response(cmd, args_, kwargs_):\n \"\"\"\n Return the evaluation of the cmd string parameter\n \"\"\"\n command_method = TorchTensor._get_method(cmd)\n\n if isinstance(args_, tuple):\n response = command_method(*args_, **kwargs_)\n else:\n response = command_method(args_, **kwargs_)\n\n return response\n\n def _fix_torch_library(cmd):\n \"\"\"\n Change the cmd string parameter to use nn.functional path to avoid errors.\n \"\"\"\n if \"_C._nn\" in cmd:\n cmd = cmd.replace(\"_C._nn\", \"nn.functional\")\n return cmd\n\n @hookable\n def send(\n self,\n *location,\n inplace: bool = False,\n user: object = None,\n local_autograd: bool = False,\n requires_grad: bool = False,\n preinitialize_grad: bool = False,\n no_wrap: bool = False,\n garbage_collect_data: bool = True,\n ):\n \"\"\"Gets the pointer to a new remote object.\n\n One of the most commonly used methods in PySyft, this method serializes the object upon\n which it is called (self), sends the object to a remote worker, creates a pointer to\n that worker, and then returns that pointer from this function.\n\n Args:\n location: The BaseWorker object which you want to send this object to. Note that\n this is never actually the BaseWorker but instead a class which instantiates the\n BaseWorker abstraction.\n inplace: if true, return the same object instance, else a new wrapper\n user (object,optional): User credentials to be verified.\n local_autograd: Use autograd system on the local machine instead of PyTorch's\n autograd on the workers.\n requires_grad: Default to False. If true, whenever the remote value of this tensor\n will have its gradient updated (for example when calling .backward()), a call\n will be made to set back the local gradient value.\n preinitialize_grad: Initialize gradient for AutogradTensors to a tensor\n no_wrap: If True, wrap() is called on the created pointer\n garbage_collect_data: argument passed down to create_pointer()\n\n Returns:\n A torch.Tensor[PointerTensor] pointer to self. Note that this\n object will likely be wrapped by a torch.Tensor wrapper.\n\n Raises:\n SendNotPermittedError: Raised if send is not permitted on this tensor.\n \"\"\"\n\n # If you send a pointer p1, you want the pointer to pointer p2 to control\n # the garbage collection and not the remaining old p1 (here self). Because if\n # p2 is not GCed, GCing p1 shouldn't delete the remote tensor, but if you\n # want to do so, as p2 is not GCed, you can still do `del p2`.\n # This allows to chain multiple .send().send() calls.\n\n if len(location) == 1:\n\n location = location[0]\n\n if self.has_child() and isinstance(self.child, PointerTensor):\n self.child.garbage_collect_data = False\n if self._is_parameter():\n self.data.child.garbage_collect_data = False\n\n ptr = self.owner.send(\n self,\n location,\n local_autograd=local_autograd,\n requires_grad=requires_grad,\n preinitialize_grad=preinitialize_grad,\n garbage_collect_data=garbage_collect_data,\n )\n\n ptr.description = self.description\n ptr.tags = self.tags\n\n # The last pointer should control remote GC, not the previous self.ptr\n if hasattr(self, \"ptr\") and self.ptr is not None:\n ptr_ = self.ptr()\n if ptr_ is not None:\n ptr_.garbage_collect_data = False\n\n # we need to cache this weak reference to the pointer so that\n # if this method gets called multiple times we can simply re-use\n # the same pointer which was previously created\n self.ptr = weakref.ref(ptr)\n\n if self._is_parameter():\n if inplace:\n self.is_wrapper = True\n with torch.no_grad():\n self.set_()\n self.data = ptr\n output = self\n else:\n if no_wrap:\n raise ValueError(\"Parameters can't accept no_wrap=True\")\n wrapper = torch.Tensor()\n param_wrapper = torch.nn.Parameter(wrapper)\n param_wrapper.is_wrapper = True\n with torch.no_grad():\n param_wrapper.set_()\n param_wrapper.data = ptr\n output = param_wrapper\n else:\n if inplace:\n self.is_wrapper = True\n self.set_()\n self.child = ptr\n return self\n else:\n output = ptr if no_wrap else ptr.wrap()\n\n if self.requires_grad:\n # This is for AutogradTensor to work on MultiPointerTensors\n # With pre-initialized gradients, this should get it from AutogradTensor.grad\n if preinitialize_grad:\n grad = output.child.grad\n else:\n grad = output.attr(\"grad\")\n\n output.grad = grad\n\n # Because of the way PyTorch works, .grad is prone to\n # create entirely new Python objects for the tensor, which\n # inadvertently deletes our custom attributes (like .child)\n # But, if we keep a backup reference around, PyTorch seems\n # to re-use it, which means .grad keeps the attributes we\n # want it to keep. #HackAlert\n output.backup_grad = grad\n\n if local_autograd:\n output = syft.AutogradTensor(data=output, preinitialize_grad=preinitialize_grad).on(\n output\n )\n\n else:\n\n children = []\n for loc in location:\n children.append(self.clone().send(loc, no_wrap=True))\n\n output = syft.MultiPointerTensor(children=children)\n\n if not no_wrap:\n output = output.wrap()\n\n return output\n\n def send_(self, *location, **kwargs):\n \"\"\"\n Calls send() with inplace option, but only with a single location\n :param location: workers locations\n :return:\n \"\"\"\n if len(location) > 1:\n raise NotImplementedError(\"Inplace send to several workers is currently not supported.\")\n\n return self.send(*location, inplace=True, **kwargs)\n\n def create_pointer(\n self,\n location: BaseWorker = None,\n id_at_location: (str or int) = None,\n owner: BaseWorker = None,\n ptr_id: (str or int) = None,\n garbage_collect_data: bool = True,\n shape=None,\n **kwargs,\n ) -> PointerTensor:\n \"\"\"Creates a pointer to the \"self\" torch.Tensor object.\n\n Returns:\n A PointerTensor pointer to self. Note that this\n object will likely be wrapped by a torch.Tensor wrapper.\n \"\"\"\n if id_at_location is None:\n id_at_location = self.id\n\n if ptr_id is None:\n if location is not None and location.id != self.owner.id:\n ptr_id = self.id\n else:\n ptr_id = syft.ID_PROVIDER.pop()\n\n if shape is None:\n shape = self.shape\n\n ptr = syft.PointerTensor.create_pointer(\n self, location, id_at_location, owner, ptr_id, garbage_collect_data, shape\n )\n\n return ptr\n\n def mid_get(self):\n \"\"\"This method calls .get() on a child pointer and correctly registers the results\"\"\"\n if not self.has_child():\n raise InvalidTensorForRemoteGet(self)\n\n self.child.mid_get()\n\n def remote_get(self):\n \"\"\"Assuming .child is a PointerTensor, this method calls .get() on the tensor\n that the .child is pointing to (which should also be a PointerTensor)\n\n TODO: make this kind of message forwarding generic?\n \"\"\"\n if not self.has_child():\n raise InvalidTensorForRemoteGet(self)\n\n self.child.remote_get()\n\n return self\n\n def get(self, *args, inplace: bool = False, user=None, reason: str = \"\", **kwargs):\n \"\"\"Requests the tensor/chain being pointed to, be serialized and return\n Args:\n args: args to forward to worker\n inplace: if true, return the same object instance, else a new wrapper\n kwargs: kwargs to forward to worker\n Raises:\n GetNotPermittedError: Raised if get is not permitted on this tensor\n \"\"\"\n\n # If it is a local tensor/chain, we don't need to verify permissions\n if not isinstance(self.child, syft.PointerTensor):\n tensor = self.child.get(*args, **kwargs)\n else: # Remote tensor/chain\n tensor = self.child.get(*args, user=user, reason=reason, **kwargs)\n\n # Clean the wrapper\n delattr(self, \"child\")\n\n # Parameters use .data instead of children\n # so we need to have special support to make sure\n # that Parmeters operate inline (because they're\n # typically being managed inside of a model/optimizer\n # so not using the same wrapper can cause the model/\n # optimizer to lose track of where the actual weights\n # are.\n if isinstance(self, torch.nn.Parameter):\n self.is_wrapper = tensor.data.is_wrapper\n if inplace:\n self.data = tensor.data\n self.grad = tensor.grad\n return self\n else:\n return tensor\n\n if inplace:\n self.set_(tensor)\n if hasattr(tensor, \"child\"):\n self.child = tensor.child\n else:\n self.is_wrapper = False\n return self\n else:\n return tensor\n\n def get_(self, *args, **kwargs):\n \"\"\"\n Calls get() with inplace option set to True\n \"\"\"\n return self.get(*args, inplace=True, **kwargs)\n\n def allow(self, user=None) -> bool:\n \"\"\" This function returns will return True if it isn't a PrivateTensor, otherwise it will\n return the result of PrivateTensor's allow method.\n\n Args:\n user (object,optional): User credentials to be verified.\n\n Returns:\n boolean: If it is a public tensor/ allowed user, returns true, otherwise it returns\n false.\n \"\"\"\n # If it is a wrapper\n if self.is_wrapper:\n current_tensor = self.child\n\n # Verify permissions for each element on the tensor chain.\n while hasattr(current_tensor, \"child\"):\n\n # If it has a list of allowed users, verify permissions,\n # otherwise (public tensors) go to the next.\n if hasattr(current_tensor, \"allowed_users\"):\n allow = current_tensor.allow(user)\n if not allow:\n return False\n\n # Go to next element on the tensor chain\n current_tensor = current_tensor.child\n return True\n\n def move(self, location: BaseWorker, requires_grad: bool = False):\n \"\"\"\n Move acts on a pointer to A to move the remote value to B (=location).\n\n Note a A will keep a copy of his value that he sent to B. This follows the\n .send() paradigm where the local worker keeps a copy of the value he sends.\n\n Args:\n location: the worker where the remote value should be moved\n requires_grad: see send() for details\n\n Returns:\n A pointer to the worker location\n \"\"\"\n new_ptr = self.child.move(location, requires_grad)\n # We get the owner from self.child because the owner of a wrapper is\n # not reliable and sometimes end up being the syft.local_worker\n self.child.owner.register_obj(self)\n if isinstance(new_ptr, PointerTensor):\n return new_ptr.wrap()\n else:\n return new_ptr\n\n def move_(self, location: BaseWorker, requires_grad: bool = False):\n \"\"\"\n Inplace version of move\n \"\"\"\n new_ptr = self.move(location, requires_grad)\n self.child = new_ptr\n return self\n\n def remote_send(self, location):\n return self.child.remote_send(location).wrap()\n\n def attr(self, attr_name):\n \"\"\"\"\"\"\n\n if self.is_wrapper:\n attr_val = self.child.attr(attr_name)\n\n if attr_name == \"grad\":\n self.grad = attr_val\n else:\n attr_val = getattr(self, attr_name)\n\n return attr_val\n\n def clone(self, *args, **kwargs):\n \"\"\"\n Clone should keep ids unchanged, contrary to copy\n \"\"\"\n cloned_tensor = self.native_clone(*args, **kwargs)\n cloned_tensor.id = self.id\n cloned_tensor.owner = self.owner\n cloned_tensor.is_wrapper = self.is_wrapper\n\n if self.has_child():\n cloned_tensor.child = self.child.clone(*args, **kwargs)\n\n return cloned_tensor\n\n def float_prec(self):\n if isinstance(self.child, PointerTensor):\n self.child = self.child.float_precision()\n return self\n\n return self.child.float_precision()\n\n float_precision = float_prec\n\n def float_prec_(self):\n tensor = self.float_prec()\n if hasattr(tensor, \"child\"):\n self.child = tensor.child\n elif self._is_parameter():\n self.is_wrapper = False\n self.data = tensor\n self.data.is_wrapper = False\n else:\n del self.child\n self.set_(tensor)\n self.is_wrapper = False\n return self\n\n float_precision_ = float_prec_\n\n def private_tensor(self, *args, allowed_users: List[str], no_wrap: bool = False, **kwargs):\n \"\"\"\n Convert a tensor or syft tensor to private tensor\n\n Args:\n *args (tuple): args to transmit to the private tensor.\n allowed_users (list): List of allowed users.\n no_wrap (bool): if True, we don't add a wrapper on top of the private tensor\n **kwargs (dict): kwargs to transmit to the private tensor\n \"\"\"\n\n if not kwargs.get(\"owner\"):\n kwargs[\"owner\"] = self.owner\n\n if self.is_wrapper:\n self.child = (\n syft.PrivateTensor(tags=self.tags, *args, **kwargs)\n .on(self.child, wrap=False)\n .register_credentials(tuple(allowed_users))\n )\n if no_wrap:\n return self.child\n else:\n return self\n\n private_tensor = (\n syft.PrivateTensor(tags=self.tags, *args, **kwargs)\n .on(self, wrap=False)\n .register_credentials(tuple(allowed_users))\n )\n if not no_wrap:\n private_tensor = private_tensor.wrap()\n\n return private_tensor\n\n def fix_prec(self, *args, no_wrap: bool = False, **kwargs):\n \"\"\"\n Convert a tensor or syft tensor to fixed precision\n\n Args:\n *args (tuple): args to transmit to the fixed precision tensor\n no_wrap (bool): if True, we don't add a wrapper on top of the fixed precision tensor\n **kwargs (dict): kwargs to transmit to the fixed precision tensor\n \"\"\"\n\n if not kwargs.get(\"owner\"):\n kwargs[\"owner\"] = self.owner\n\n if self.is_wrapper:\n child = self.child.fix_prec(*args, **kwargs)\n if no_wrap:\n return child\n else:\n return child.wrap()\n\n base = kwargs.get(\"base\", 10)\n prec_fractional = kwargs.get(\"precision_fractional\", 3)\n\n max_precision = _get_maximum_precision()\n fpt_tensor = syft.FixedPrecisionTensor(*args, **kwargs).on(self, wrap=False).fix_precision()\n\n if not no_wrap:\n fpt_tensor = fpt_tensor.wrap()\n\n return fpt_tensor\n\n fix_precision = fix_prec\n\n def fix_prec_(self, *args, **kwargs):\n \"\"\"\n Performs an inplace transformation to fixed precision and change self to\n be a wrapper\n\n Args:\n *args: args to transmit to fix_prec\n **kwargs: kwargs to transmit to fix_prec\n\n Returns:\n self seen as a wrapper\n \"\"\"\n # We specify id to make sure the inplace op doesn't change the tensor id\n self.child = self.fix_prec(*args, no_wrap=True, id=self.id, **kwargs)\n self.is_wrapper = True\n return self\n\n fix_precision_ = fix_prec_\n\n def share(\n self,\n *owners: List[BaseWorker],\n protocol: str = \"snn\",\n field: Union[int, None] = None,\n dtype: Union[str, None] = None,\n crypto_provider: Union[BaseWorker, None] = None,\n requires_grad: bool = False,\n no_wrap: bool = False,\n ):\n \"\"\"This is a pass through method which calls .share on the child.\n\n Args:\n owners (list): A list of BaseWorker objects determining who to send shares to.\n protocol (str): the crypto protocol used to perform the computations ('snn' or 'fss')\n field (int or None): The arithmetic field where live the shares.\n dtype (str or None): The dtype of shares\n crypto_provider (BaseWorker or None): The worker providing the crypto primitives.\n requires_grad (bool): Should we add AutogradTensor to allow gradient computation,\n default is False.\n \"\"\"\n if protocol == \"falcon\":\n shared_tensor = syft.ReplicatedSharingTensor(owner=self.owner).share_secret(\n self, owners\n )\n return shared_tensor\n if self.has_child():\n chain = self.child\n\n kwargs_ = (\n {\"requires_grad\": requires_grad} if isinstance(chain, syft.PointerTensor) else {}\n )\n shared_tensor = chain.share(\n *owners,\n protocol=protocol,\n field=field,\n dtype=dtype,\n crypto_provider=crypto_provider,\n **kwargs_,\n )\n else:\n if self.type() == \"torch.FloatTensor\":\n raise TypeError(\"FloatTensor cannot be additively shared, Use fix_precision.\")\n\n shared_tensor = (\n syft.AdditiveSharingTensor(\n protocol=protocol,\n field=field,\n dtype=dtype,\n crypto_provider=crypto_provider,\n owner=self.owner,\n )\n .on(self.copy(), wrap=False)\n .share_secret(*owners)\n )\n\n if requires_grad and not isinstance(shared_tensor, syft.PointerTensor):\n shared_tensor = syft.AutogradTensor().on(shared_tensor, wrap=False)\n\n if not no_wrap:\n shared_tensor = shared_tensor.wrap(type=self.dtype)\n\n return shared_tensor\n\n def share_(self, *args, **kwargs):\n \"\"\"\n Allows to call .share() as an inplace operation\n \"\"\"\n if self.has_child():\n requires_grad = kwargs.get(\"requires_grad\", False)\n # Reset the requires_grad kwargs if the call is local\n if not isinstance(self.child, syft.PointerTensor):\n kwargs[\"requires_grad\"] = False\n\n shared_tensor = self.child.share_(*args, **kwargs)\n\n if requires_grad and not isinstance(shared_tensor, syft.PointerTensor):\n shared_tensor = syft.AutogradTensor().on(shared_tensor, wrap=False)\n\n self.child = shared_tensor\n return self\n else:\n return self.share(*args, **kwargs) # TODO change to inplace\n\n def combine(self, *pointers):\n \"\"\"This method will combine the child pointer with another list of pointers\n\n Args:\n *pointers a list of pointers to be combined into a MultiPointerTensor\n\n \"\"\"\n\n assert isinstance(self.child, PointerTensor)\n\n ps = list(pointers)\n ps.append(self)\n\n return syft.combine_pointers(*ps)\n\n def torch_type(self):\n\n if isinstance(self, torch.Tensor) and not self.is_wrapper:\n return self.type()\n else:\n return self.child.torch_type()\n\n def encrypt(self, protocol=\"mpc\", **kwargs):\n \"\"\"\n This method will encrypt each value in the tensor using Multi Party\n Computation (default) or Paillier Homomorphic Encryption\n\n Args:\n protocol (str): Currently supports 'mpc' for Multi Party\n Computation and 'paillier' for Paillier Homomorphic Encryption\n **kwargs:\n With Respect to MPC accepts:\n workers (list): Parties involved in the sharing of the Tensor\n crypto_provider (syft.VirtualWorker): Worker responsible for the\n generation of the random numbers for encryption\n requires_grad (bool): If true, whenever the remote value of this tensor\n will have its gradient updated (for example when calling .backward()),\n a call will be made to set back the local gradient value.\n no_wrap (bool): If True, wrap() is called on the created pointer\n Keyword Args: To be parsed as kwargs for the .fix_prec() method\n\n With Respect to Paillier accepts:\n public_key (phe.paillier.PaillierPublicKey): Can be obtained using\n ```public_key, private_key = sy.frameworks.torch.he.paillier.keygen()```\n Returns:\n An encrypted version of the Tensor following the protocol specified\n\n Raises:\n NotImplementedError: If protocols other than the ones mentioned above are queried\n\n \"\"\"\n if protocol.lower() == \"mpc\":\n workers = kwargs.pop(\"workers\")\n crypto_provider = kwargs.pop(\"crypto_provider\")\n requires_grad = kwargs.pop(\"requires_grad\", False)\n no_wrap = kwargs.pop(\"no_wrap\", False)\n kwargs_fix_prec = kwargs # Rest of kwargs for fix_prec method\n\n x_shared = self.fix_prec(**kwargs_fix_prec).share(\n *workers,\n crypto_provider=crypto_provider,\n requires_grad=requires_grad,\n no_wrap=no_wrap,\n )\n return x_shared\n\n elif protocol.lower() == \"paillier\":\n public_key = kwargs.get(\"public_key\")\n\n x = self.copy()\n x_encrypted = PaillierTensor().on(x) # Instantiate the class\n x_encrypted.child.encrypt_(public_key) # Perform Homomorphic Encryption\n\n return x_encrypted\n\n else:\n raise NotImplementedError(\n \"Currently the .encrypt() method only supports Paillier Homomorphic \"\n \"Encryption and Secure Multi-Party Computation\"\n )\n\n def decrypt(self, **kwargs):\n \"\"\"\n This method will decrypt each value in the tensor using Multi Party\n Computation (default) or Paillier Homomorphic Encryption\n\n Args:\n **kwargs:\n With Respect to MPC accepts:\n None\n\n With Respect to Paillier accepts:\n private_key (phe.paillier.PaillierPrivateKey): Can be obtained using\n ```public_key, private_key = sy.frameworks.torch.he.paillier.keygen()```\n Returns:\n An decrypted version of the Tensor following the protocol guessed from its type\n\n Raises:\n NotImplementedError: If protocols other than the ones mentioned above are queried\n\n \"\"\"\n\n protocol = kwargs.get(\"protocol\", None)\n if protocol:\n warnings.warn(\"protocol should no longer be used in decrypt\")\n\n if isinstance(self.child, (syft.FixedPrecisionTensor, syft.AutogradTensor)):\n x_encrypted = self.copy()\n x_decrypted = x_encrypted.get().float_prec()\n return x_decrypted\n\n elif isinstance(self.child, PaillierTensor):\n # self.copy() not required as PaillierTensor's decrypt method is not inplace\n private_key = kwargs.get(\"private_key\")\n return self.child.decrypt(private_key)\n\n else:\n raise NotImplementedError(\n \"Currently the .decrypt() method only supports Paillier Homomorphic \"\n \"Encryption and Secure Multi-Party Computation\"\n )\n\n def numpy_tensor(self):\n \"\"\"This method will cast the current tensor to one with numpy as the underlying\n representation. The tensor chain will be Wrapper > NumpyTensor > np.ndarray\"\"\"\n\n if not self.is_wrapper:\n return syft.NumpyTensor(self.numpy())\n else:\n raise Exception(\n \"Can only cast a data tensor to NumpyTensor. You called this \",\n \"on a wrapper. Add NumpyTensor to the chain by hand if you want \"\n \"this functionality.\",\n )\n", "path": "syft/frameworks/torch/tensors/interpreters/native.py" } ]
diff --git a/syft/frameworks/torch/tensors/interpreters/native.py b/syft/frameworks/torch/tensors/interpreters/native.py index f847e3d9b13..11ebeac534f 100644 --- a/syft/frameworks/torch/tensors/interpreters/native.py +++ b/syft/frameworks/torch/tensors/interpreters/native.py @@ -945,7 +945,7 @@ def share( shared_tensor = syft.AutogradTensor().on(shared_tensor, wrap=False) if not no_wrap: - shared_tensor = shared_tensor.wrap() + shared_tensor = shared_tensor.wrap(type=self.dtype) return shared_tensor diff --git a/test/torch/tensors/test_additive_shared.py b/test/torch/tensors/test_additive_shared.py index 6bd9bfb9b5d..4384976beb4 100644 --- a/test/torch/tensors/test_additive_shared.py +++ b/test/torch/tensors/test_additive_shared.py @@ -41,6 +41,7 @@ def test_share_get(workers, protocol, dtype, n_workers): t = torch.tensor([1, 2, 3]) x = t.share(*share_holders[:n_workers], **kwargs) + assert t.dtype == x.dtype x = x.get() assert (x == t).all()
quantumlib__Cirq-4780
Fix deprecation warning for newly added `ClassicallyControlledOperation` **Description of the issue** The following deprecation warning is emitted on running the json serialization test and should be fixed. ```python ~/quantum/Cirq/cirq-core/cirq/protocols/json_serialization.py:283: DeprecationWarning: Found 'cirq_type': 'ClassicallyControlledOperation' in _json_dict_. Custom values of this field are not permitted, and will produce an error starting in Cirq v0.15. ``` **Cirq version** 0.14dev
[ { "content": "# Copyright 2021 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import (\n AbstractSet,\n Any,\n Dict,\n FrozenSet,\n List,\n Optional,\n Sequence,\n TYPE_CHECKING,\n Tuple,\n Union,\n)\n\nimport sympy\n\nfrom cirq import protocols, value\nfrom cirq.ops import raw_types\n\nif TYPE_CHECKING:\n import cirq\n\n\[email protected]_equality\nclass ClassicallyControlledOperation(raw_types.Operation):\n \"\"\"Augments existing operations to be conditionally executed.\n\n An operation that is classically controlled is executed iff all conditions\n evaluate to True. Currently the only condition type is a measurement key.\n A measurement key evaluates to True iff any qubit in the corresponding\n measurement operation evaluated to a non-zero value.\n\n This object is typically created via\n `operation.with_classical_controls(*conditions)`.\n \"\"\"\n\n def __init__(\n self,\n sub_operation: 'cirq.Operation',\n conditions: Sequence[Union[str, 'cirq.MeasurementKey', 'cirq.Condition', sympy.Basic]],\n ):\n \"\"\"Initializes a `ClassicallyControlledOperation`.\n\n Multiple consecutive `ClassicallyControlledOperation` layers are\n squashed when possible, so one should not depend on a specific number\n of layers.\n\n Args:\n sub_operation: The operation to gate with a classical control\n condition.\n conditions: A sequence of measurement keys, or strings that can be\n parsed into measurement keys.\n\n Raises:\n ValueError: If an unsupported gate is being classically\n controlled.\n \"\"\"\n if protocols.measurement_key_objs(sub_operation):\n raise ValueError(\n f'Cannot conditionally run operations with measurements: {sub_operation}'\n )\n conditions = tuple(conditions)\n if isinstance(sub_operation, ClassicallyControlledOperation):\n conditions += sub_operation._conditions\n sub_operation = sub_operation._sub_operation\n conds: List['cirq.Condition'] = []\n for c in conditions:\n if isinstance(c, str):\n c = value.MeasurementKey.parse_serialized(c)\n if isinstance(c, value.MeasurementKey):\n c = value.KeyCondition(c)\n if isinstance(c, sympy.Basic):\n c = value.SympyCondition(c)\n conds.append(c)\n self._conditions: Tuple['cirq.Condition', ...] = tuple(conds)\n self._sub_operation: 'cirq.Operation' = sub_operation\n\n @property\n def classical_controls(self) -> FrozenSet['cirq.Condition']:\n return frozenset(self._conditions).union(self._sub_operation.classical_controls)\n\n def without_classical_controls(self) -> 'cirq.Operation':\n return self._sub_operation.without_classical_controls()\n\n @property\n def qubits(self):\n return self._sub_operation.qubits\n\n def with_qubits(self, *new_qubits):\n return self._sub_operation.with_qubits(*new_qubits).with_classical_controls(\n *self._conditions\n )\n\n def _decompose_(self):\n result = protocols.decompose_once(self._sub_operation, NotImplemented)\n if result is NotImplemented:\n return NotImplemented\n\n return [ClassicallyControlledOperation(op, self._conditions) for op in result]\n\n def _value_equality_values_(self):\n return (frozenset(self._conditions), self._sub_operation)\n\n def __str__(self) -> str:\n keys = ', '.join(map(str, self._conditions))\n return f'{self._sub_operation}.with_classical_controls({keys})'\n\n def __repr__(self):\n return (\n f'cirq.ClassicallyControlledOperation('\n f'{self._sub_operation!r}, {list(self._conditions)!r})'\n )\n\n def _is_parameterized_(self) -> bool:\n return protocols.is_parameterized(self._sub_operation)\n\n def _parameter_names_(self) -> AbstractSet[str]:\n return protocols.parameter_names(self._sub_operation)\n\n def _resolve_parameters_(\n self, resolver: 'cirq.ParamResolver', recursive: bool\n ) -> 'ClassicallyControlledOperation':\n new_sub_op = protocols.resolve_parameters(self._sub_operation, resolver, recursive)\n return new_sub_op.with_classical_controls(*self._conditions)\n\n def _circuit_diagram_info_(\n self, args: 'cirq.CircuitDiagramInfoArgs'\n ) -> Optional['protocols.CircuitDiagramInfo']:\n sub_args = protocols.CircuitDiagramInfoArgs(\n known_qubit_count=args.known_qubit_count,\n known_qubits=args.known_qubits,\n use_unicode_characters=args.use_unicode_characters,\n precision=args.precision,\n label_map=args.label_map,\n )\n sub_info = protocols.circuit_diagram_info(self._sub_operation, sub_args, None)\n if sub_info is None:\n return NotImplemented # coverage: ignore\n\n control_count = len({k for c in self._conditions for k in c.keys})\n wire_symbols = sub_info.wire_symbols + ('^',) * control_count\n if any(not isinstance(c, value.KeyCondition) for c in self._conditions):\n wire_symbols = (\n wire_symbols[0]\n + '(conditions=['\n + ', '.join(str(c) for c in self._conditions)\n + '])',\n ) + wire_symbols[1:]\n exponent_qubit_index = None\n if sub_info.exponent_qubit_index is not None:\n exponent_qubit_index = sub_info.exponent_qubit_index + control_count\n elif sub_info.exponent is not None:\n exponent_qubit_index = control_count\n return protocols.CircuitDiagramInfo(\n wire_symbols=wire_symbols,\n exponent=sub_info.exponent,\n exponent_qubit_index=exponent_qubit_index,\n )\n\n def _json_dict_(self) -> Dict[str, Any]:\n return {\n 'cirq_type': self.__class__.__name__,\n 'conditions': self._conditions,\n 'sub_operation': self._sub_operation,\n }\n\n def _act_on_(self, args: 'cirq.ActOnArgs') -> bool:\n if all(c.resolve(args.log_of_measurement_results) for c in self._conditions):\n protocols.act_on(self._sub_operation, args)\n return True\n\n def _with_measurement_key_mapping_(\n self, key_map: Dict[str, str]\n ) -> 'ClassicallyControlledOperation':\n conditions = [protocols.with_measurement_key_mapping(c, key_map) for c in self._conditions]\n sub_operation = protocols.with_measurement_key_mapping(self._sub_operation, key_map)\n sub_operation = self._sub_operation if sub_operation is NotImplemented else sub_operation\n return sub_operation.with_classical_controls(*conditions)\n\n def _with_key_path_prefix_(self, prefix: Tuple[str, ...]) -> 'ClassicallyControlledOperation':\n conditions = [protocols.with_key_path_prefix(c, prefix) for c in self._conditions]\n sub_operation = protocols.with_key_path_prefix(self._sub_operation, prefix)\n sub_operation = self._sub_operation if sub_operation is NotImplemented else sub_operation\n return sub_operation.with_classical_controls(*conditions)\n\n def _with_rescoped_keys_(\n self,\n path: Tuple[str, ...],\n bindable_keys: FrozenSet['cirq.MeasurementKey'],\n ) -> 'ClassicallyControlledOperation':\n conds = [protocols.with_rescoped_keys(c, path, bindable_keys) for c in self._conditions]\n sub_operation = protocols.with_rescoped_keys(self._sub_operation, path, bindable_keys)\n return sub_operation.with_classical_controls(*conds)\n\n def _control_keys_(self) -> FrozenSet['cirq.MeasurementKey']:\n local_keys: FrozenSet['cirq.MeasurementKey'] = frozenset(\n k for condition in self._conditions for k in condition.keys\n )\n return local_keys.union(protocols.control_keys(self._sub_operation))\n\n def _qasm_(self, args: 'cirq.QasmArgs') -> Optional[str]:\n args.validate_version('2.0')\n all_keys = \" && \".join(c.qasm for c in self._conditions)\n return args.format('if ({0}) {1}', all_keys, protocols.qasm(self._sub_operation, args=args))\n", "path": "cirq-core/cirq/ops/classically_controlled_operation.py" } ]
[ { "content": "# Copyright 2021 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import (\n AbstractSet,\n Any,\n Dict,\n FrozenSet,\n List,\n Optional,\n Sequence,\n TYPE_CHECKING,\n Tuple,\n Union,\n)\n\nimport sympy\n\nfrom cirq import protocols, value\nfrom cirq.ops import raw_types\n\nif TYPE_CHECKING:\n import cirq\n\n\[email protected]_equality\nclass ClassicallyControlledOperation(raw_types.Operation):\n \"\"\"Augments existing operations to be conditionally executed.\n\n An operation that is classically controlled is executed iff all conditions\n evaluate to True. Currently the only condition type is a measurement key.\n A measurement key evaluates to True iff any qubit in the corresponding\n measurement operation evaluated to a non-zero value.\n\n This object is typically created via\n `operation.with_classical_controls(*conditions)`.\n \"\"\"\n\n def __init__(\n self,\n sub_operation: 'cirq.Operation',\n conditions: Sequence[Union[str, 'cirq.MeasurementKey', 'cirq.Condition', sympy.Basic]],\n ):\n \"\"\"Initializes a `ClassicallyControlledOperation`.\n\n Multiple consecutive `ClassicallyControlledOperation` layers are\n squashed when possible, so one should not depend on a specific number\n of layers.\n\n Args:\n sub_operation: The operation to gate with a classical control\n condition.\n conditions: A sequence of measurement keys, or strings that can be\n parsed into measurement keys.\n\n Raises:\n ValueError: If an unsupported gate is being classically\n controlled.\n \"\"\"\n if protocols.measurement_key_objs(sub_operation):\n raise ValueError(\n f'Cannot conditionally run operations with measurements: {sub_operation}'\n )\n conditions = tuple(conditions)\n if isinstance(sub_operation, ClassicallyControlledOperation):\n conditions += sub_operation._conditions\n sub_operation = sub_operation._sub_operation\n conds: List['cirq.Condition'] = []\n for c in conditions:\n if isinstance(c, str):\n c = value.MeasurementKey.parse_serialized(c)\n if isinstance(c, value.MeasurementKey):\n c = value.KeyCondition(c)\n if isinstance(c, sympy.Basic):\n c = value.SympyCondition(c)\n conds.append(c)\n self._conditions: Tuple['cirq.Condition', ...] = tuple(conds)\n self._sub_operation: 'cirq.Operation' = sub_operation\n\n @property\n def classical_controls(self) -> FrozenSet['cirq.Condition']:\n return frozenset(self._conditions).union(self._sub_operation.classical_controls)\n\n def without_classical_controls(self) -> 'cirq.Operation':\n return self._sub_operation.without_classical_controls()\n\n @property\n def qubits(self):\n return self._sub_operation.qubits\n\n def with_qubits(self, *new_qubits):\n return self._sub_operation.with_qubits(*new_qubits).with_classical_controls(\n *self._conditions\n )\n\n def _decompose_(self):\n result = protocols.decompose_once(self._sub_operation, NotImplemented)\n if result is NotImplemented:\n return NotImplemented\n\n return [ClassicallyControlledOperation(op, self._conditions) for op in result]\n\n def _value_equality_values_(self):\n return (frozenset(self._conditions), self._sub_operation)\n\n def __str__(self) -> str:\n keys = ', '.join(map(str, self._conditions))\n return f'{self._sub_operation}.with_classical_controls({keys})'\n\n def __repr__(self):\n return (\n f'cirq.ClassicallyControlledOperation('\n f'{self._sub_operation!r}, {list(self._conditions)!r})'\n )\n\n def _is_parameterized_(self) -> bool:\n return protocols.is_parameterized(self._sub_operation)\n\n def _parameter_names_(self) -> AbstractSet[str]:\n return protocols.parameter_names(self._sub_operation)\n\n def _resolve_parameters_(\n self, resolver: 'cirq.ParamResolver', recursive: bool\n ) -> 'ClassicallyControlledOperation':\n new_sub_op = protocols.resolve_parameters(self._sub_operation, resolver, recursive)\n return new_sub_op.with_classical_controls(*self._conditions)\n\n def _circuit_diagram_info_(\n self, args: 'cirq.CircuitDiagramInfoArgs'\n ) -> Optional['protocols.CircuitDiagramInfo']:\n sub_args = protocols.CircuitDiagramInfoArgs(\n known_qubit_count=args.known_qubit_count,\n known_qubits=args.known_qubits,\n use_unicode_characters=args.use_unicode_characters,\n precision=args.precision,\n label_map=args.label_map,\n )\n sub_info = protocols.circuit_diagram_info(self._sub_operation, sub_args, None)\n if sub_info is None:\n return NotImplemented # coverage: ignore\n\n control_count = len({k for c in self._conditions for k in c.keys})\n wire_symbols = sub_info.wire_symbols + ('^',) * control_count\n if any(not isinstance(c, value.KeyCondition) for c in self._conditions):\n wire_symbols = (\n wire_symbols[0]\n + '(conditions=['\n + ', '.join(str(c) for c in self._conditions)\n + '])',\n ) + wire_symbols[1:]\n exponent_qubit_index = None\n if sub_info.exponent_qubit_index is not None:\n exponent_qubit_index = sub_info.exponent_qubit_index + control_count\n elif sub_info.exponent is not None:\n exponent_qubit_index = control_count\n return protocols.CircuitDiagramInfo(\n wire_symbols=wire_symbols,\n exponent=sub_info.exponent,\n exponent_qubit_index=exponent_qubit_index,\n )\n\n def _json_dict_(self) -> Dict[str, Any]:\n return {\n 'conditions': self._conditions,\n 'sub_operation': self._sub_operation,\n }\n\n def _act_on_(self, args: 'cirq.ActOnArgs') -> bool:\n if all(c.resolve(args.log_of_measurement_results) for c in self._conditions):\n protocols.act_on(self._sub_operation, args)\n return True\n\n def _with_measurement_key_mapping_(\n self, key_map: Dict[str, str]\n ) -> 'ClassicallyControlledOperation':\n conditions = [protocols.with_measurement_key_mapping(c, key_map) for c in self._conditions]\n sub_operation = protocols.with_measurement_key_mapping(self._sub_operation, key_map)\n sub_operation = self._sub_operation if sub_operation is NotImplemented else sub_operation\n return sub_operation.with_classical_controls(*conditions)\n\n def _with_key_path_prefix_(self, prefix: Tuple[str, ...]) -> 'ClassicallyControlledOperation':\n conditions = [protocols.with_key_path_prefix(c, prefix) for c in self._conditions]\n sub_operation = protocols.with_key_path_prefix(self._sub_operation, prefix)\n sub_operation = self._sub_operation if sub_operation is NotImplemented else sub_operation\n return sub_operation.with_classical_controls(*conditions)\n\n def _with_rescoped_keys_(\n self,\n path: Tuple[str, ...],\n bindable_keys: FrozenSet['cirq.MeasurementKey'],\n ) -> 'ClassicallyControlledOperation':\n conds = [protocols.with_rescoped_keys(c, path, bindable_keys) for c in self._conditions]\n sub_operation = protocols.with_rescoped_keys(self._sub_operation, path, bindable_keys)\n return sub_operation.with_classical_controls(*conds)\n\n def _control_keys_(self) -> FrozenSet['cirq.MeasurementKey']:\n local_keys: FrozenSet['cirq.MeasurementKey'] = frozenset(\n k for condition in self._conditions for k in condition.keys\n )\n return local_keys.union(protocols.control_keys(self._sub_operation))\n\n def _qasm_(self, args: 'cirq.QasmArgs') -> Optional[str]:\n args.validate_version('2.0')\n all_keys = \" && \".join(c.qasm for c in self._conditions)\n return args.format('if ({0}) {1}', all_keys, protocols.qasm(self._sub_operation, args=args))\n", "path": "cirq-core/cirq/ops/classically_controlled_operation.py" } ]
diff --git a/cirq-core/cirq/ops/classically_controlled_operation.py b/cirq-core/cirq/ops/classically_controlled_operation.py index 74a4c3dbb53..3ac6f18bb93 100644 --- a/cirq-core/cirq/ops/classically_controlled_operation.py +++ b/cirq-core/cirq/ops/classically_controlled_operation.py @@ -171,7 +171,6 @@ def _circuit_diagram_info_( def _json_dict_(self) -> Dict[str, Any]: return { - 'cirq_type': self.__class__.__name__, 'conditions': self._conditions, 'sub_operation': self._sub_operation, } diff --git a/docs/dev/serialization.md b/docs/dev/serialization.md index 48d285b971a..4b8983e3b79 100644 --- a/docs/dev/serialization.md +++ b/docs/dev/serialization.md @@ -89,10 +89,8 @@ There are several steps needed to support an object's serialization and deserial and pass `cirq-core/cirq/protocols/json_serialization_test.py`: 1. The object should have a `_json_dict_` method that returns a dictionary -containing a `"cirq_type"` key as well as keys for each of the value's -attributes. If these keys do not match the names of the class' initializer -arguments, a `_from_json_dict_` class method must also be defined. -Typically the `"cirq_type"` will be the name of your class. +containing keys for each of the value's attributes. If these keys do not match the names of +the class' initializer arguments, a `_from_json_dict_` class method must also be defined. 2. In `class_resolver_dictionary` within the packages's `json_resolver_cache.py` file, for each serializable class, the `cirq_type` of the class should be mapped to the imported class
django__channels-1951
HttpCommunicator does not raise exception from consumer If `WebsocketCommunicator` encounters an error, it shows the exception raised by the underlying consumer. In contrast, `HttpCommunicator` just shows a `TimeoutError`, which is not useful for debugging. Example tests: ```py from channels.generic.http import AsyncHttpConsumer from channels.generic.websocket import AsyncWebsocketConsumer from channels.testing import HttpCommunicator from channels.testing import WebsocketCommunicator from django.test import TestCase class HttpConsumer(AsyncHttpConsumer): async def handle(self, body): 1 / 0 class WebsocketConsumer(AsyncWebsocketConsumer): async def connect(self): 1 / 0 class ConsumerTests(TestCase): async def test_http(self): communicator = HttpCommunicator(HttpConsumer.as_asgi(), "GET", "/") await communicator.get_response() async def test_websocket(self): communicator = WebsocketCommunicator(WebsocketConsumer.as_asgi(), "/") connected, subprotocol = await communicator.connect() ``` Output: ``` $ python manage.py test Found 2 test(s). Creating test database for alias 'default'... Destroying old test database for alias 'default'... System check identified no issues (0 silenced). <Task finished name='Task-2' coro=<HttpConsumer() done, defined at /Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/channels/consumer.py:92> result=None> E<Task finished name='Task-7' coro=<WebsocketConsumer() done, defined at /Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/channels/consumer.py:92> exception=ZeroDivisionError('division by zero')> E ====================================================================== ERROR: test_http (example.tests.ConsumerTests) ---------------------------------------------------------------------- Traceback (most recent call last): File "/Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/asgiref/testing.py", line 74, in receive_output return await self.output_queue.get() File "/Users/chainz/.pyenv/versions/3.10.8/lib/python3.10/asyncio/queues.py", line 159, in get await getter asyncio.exceptions.CancelledError During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/asgiref/sync.py", line 218, in __call__ return call_result.result() File "/Users/chainz/.pyenv/versions/3.10.8/lib/python3.10/concurrent/futures/_base.py", line 451, in result return self.__get_result() File "/Users/chainz/.pyenv/versions/3.10.8/lib/python3.10/concurrent/futures/_base.py", line 403, in __get_result raise self._exception File "/Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/asgiref/sync.py", line 284, in main_wrap result = await self.awaitable(*args, **kwargs) File "/Users/chainz/tmp/channelstest/example/tests.py", line 21, in test_http await communicator.get_response() File "/Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/channels/testing/http.py", line 42, in get_response response_start = await self.receive_output(timeout) File "/Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/asgiref/testing.py", line 86, in receive_output raise e File "/Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/asgiref/testing.py", line 73, in receive_output async with async_timeout(timeout): File "/Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/asgiref/timeout.py", line 65, in __aexit__ self._do_exit(exc_type) File "/Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/asgiref/timeout.py", line 102, in _do_exit raise asyncio.TimeoutError asyncio.exceptions.TimeoutError ====================================================================== ERROR: test_websocket (example.tests.ConsumerTests) ---------------------------------------------------------------------- Traceback (most recent call last): File "/Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/asgiref/testing.py", line 74, in receive_output return await self.output_queue.get() File "/Users/chainz/.pyenv/versions/3.10.8/lib/python3.10/asyncio/queues.py", line 159, in get await getter asyncio.exceptions.CancelledError During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/asgiref/testing.py", line 73, in receive_output async with async_timeout(timeout): File "/Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/asgiref/timeout.py", line 65, in __aexit__ self._do_exit(exc_type) File "/Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/asgiref/timeout.py", line 102, in _do_exit raise asyncio.TimeoutError asyncio.exceptions.TimeoutError During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/asgiref/sync.py", line 218, in __call__ return call_result.result() File "/Users/chainz/.pyenv/versions/3.10.8/lib/python3.10/concurrent/futures/_base.py", line 451, in result return self.__get_result() File "/Users/chainz/.pyenv/versions/3.10.8/lib/python3.10/concurrent/futures/_base.py", line 403, in __get_result raise self._exception File "/Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/asgiref/sync.py", line 284, in main_wrap result = await self.awaitable(*args, **kwargs) File "/Users/chainz/tmp/channelstest/example/tests.py", line 25, in test_websocket connected, subprotocol = await communicator.connect() File "/Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/channels/testing/websocket.py", line 36, in connect response = await self.receive_output(timeout) File "/Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/asgiref/testing.py", line 79, in receive_output self.future.result() File "/Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/channels/consumer.py", line 94, in app return await consumer(scope, receive, send) File "/Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/channels/consumer.py", line 62, in __call__ await await_many_dispatch([receive], self.dispatch) File "/Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/channels/utils.py", line 50, in await_many_dispatch await dispatch(result) File "/Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/channels/consumer.py", line 73, in dispatch await handler(message) File "/Users/chainz/tmp/channelstest/venv/lib/python3.10/site-packages/channels/generic/websocket.py", line 173, in websocket_connect await self.connect() File "/Users/chainz/tmp/channelstest/example/tests.py", line 15, in connect 1 / 0 ZeroDivisionError: division by zero ---------------------------------------------------------------------- Ran 2 tests in 2.016s FAILED (errors=2) Destroying test database for alias 'default'... ``` Please also try and include, if you can: (Channels 4.0.0, with Django 4.1.3, on Python 3.10.8)
[ { "content": "from channels.consumer import AsyncConsumer\n\nfrom ..exceptions import StopConsumer\n\n\nclass AsyncHttpConsumer(AsyncConsumer):\n \"\"\"\n Async HTTP consumer. Provides basic primitives for building asynchronous\n HTTP endpoints.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.body = []\n\n async def send_headers(self, *, status=200, headers=None):\n \"\"\"\n Sets the HTTP response status and headers. Headers may be provided as\n a list of tuples or as a dictionary.\n\n Note that the ASGI spec requires that the protocol server only starts\n sending the response to the client after ``self.send_body`` has been\n called the first time.\n \"\"\"\n if headers is None:\n headers = []\n elif isinstance(headers, dict):\n headers = list(headers.items())\n\n await self.send(\n {\"type\": \"http.response.start\", \"status\": status, \"headers\": headers}\n )\n\n async def send_body(self, body, *, more_body=False):\n \"\"\"\n Sends a response body to the client. The method expects a bytestring.\n\n Set ``more_body=True`` if you want to send more body content later.\n The default behavior closes the response, and further messages on\n the channel will be ignored.\n \"\"\"\n assert isinstance(body, bytes), \"Body is not bytes\"\n await self.send(\n {\"type\": \"http.response.body\", \"body\": body, \"more_body\": more_body}\n )\n\n async def send_response(self, status, body, **kwargs):\n \"\"\"\n Sends a response to the client. This is a thin wrapper over\n ``self.send_headers`` and ``self.send_body``, and everything said\n above applies here as well. This method may only be called once.\n \"\"\"\n await self.send_headers(status=status, **kwargs)\n await self.send_body(body)\n\n async def handle(self, body):\n \"\"\"\n Receives the request body as a bytestring. Response may be composed\n using the ``self.send*`` methods; the return value of this method is\n thrown away.\n \"\"\"\n raise NotImplementedError(\n \"Subclasses of AsyncHttpConsumer must provide a handle() method.\"\n )\n\n async def disconnect(self):\n \"\"\"\n Overrideable place to run disconnect handling. Do not send anything\n from here.\n \"\"\"\n pass\n\n async def http_request(self, message):\n \"\"\"\n Async entrypoint - concatenates body fragments and hands off control\n to ``self.handle`` when the body has been completely received.\n \"\"\"\n if \"body\" in message:\n self.body.append(message[\"body\"])\n if not message.get(\"more_body\"):\n try:\n await self.handle(b\"\".join(self.body))\n finally:\n await self.disconnect()\n raise StopConsumer()\n\n async def http_disconnect(self, message):\n \"\"\"\n Let the user do their cleanup and close the consumer.\n \"\"\"\n await self.disconnect()\n raise StopConsumer()\n", "path": "channels/generic/http.py" } ]
[ { "content": "from channels.consumer import AsyncConsumer\n\nfrom ..exceptions import StopConsumer\n\n\nclass AsyncHttpConsumer(AsyncConsumer):\n \"\"\"\n Async HTTP consumer. Provides basic primitives for building asynchronous\n HTTP endpoints.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.body = []\n\n async def send_headers(self, *, status=200, headers=None):\n \"\"\"\n Sets the HTTP response status and headers. Headers may be provided as\n a list of tuples or as a dictionary.\n\n Note that the ASGI spec requires that the protocol server only starts\n sending the response to the client after ``self.send_body`` has been\n called the first time.\n \"\"\"\n if headers is None:\n headers = []\n elif isinstance(headers, dict):\n headers = list(headers.items())\n\n await self.send(\n {\"type\": \"http.response.start\", \"status\": status, \"headers\": headers}\n )\n\n async def send_body(self, body, *, more_body=False):\n \"\"\"\n Sends a response body to the client. The method expects a bytestring.\n\n Set ``more_body=True`` if you want to send more body content later.\n The default behavior closes the response, and further messages on\n the channel will be ignored.\n \"\"\"\n assert isinstance(body, bytes), \"Body is not bytes\"\n await self.send(\n {\"type\": \"http.response.body\", \"body\": body, \"more_body\": more_body}\n )\n\n async def send_response(self, status, body, **kwargs):\n \"\"\"\n Sends a response to the client. This is a thin wrapper over\n ``self.send_headers`` and ``self.send_body``, and everything said\n above applies here as well. This method may only be called once.\n \"\"\"\n await self.send_headers(status=status, **kwargs)\n await self.send_body(body)\n\n async def handle(self, body):\n \"\"\"\n Receives the request body as a bytestring. Response may be composed\n using the ``self.send*`` methods; the return value of this method is\n thrown away.\n \"\"\"\n raise NotImplementedError(\n \"Subclasses of AsyncHttpConsumer must provide a handle() method.\"\n )\n\n async def disconnect(self):\n \"\"\"\n Overrideable place to run disconnect handling. Do not send anything\n from here.\n \"\"\"\n pass\n\n async def http_request(self, message):\n \"\"\"\n Async entrypoint - concatenates body fragments and hands off control\n to ``self.handle`` when the body has been completely received.\n \"\"\"\n if \"body\" in message:\n self.body.append(message[\"body\"])\n if not message.get(\"more_body\"):\n try:\n await self.handle(b\"\".join(self.body))\n finally:\n await self.disconnect()\n raise StopConsumer()\n\n async def http_disconnect(self, message):\n \"\"\"\n Let the user do their cleanup and close the consumer.\n \"\"\"\n await self.disconnect()\n raise StopConsumer()\n", "path": "channels/generic/http.py" } ]
diff --git a/channels/generic/http.py b/channels/generic/http.py index 8bbf35236..909e85704 100644 --- a/channels/generic/http.py +++ b/channels/generic/http.py @@ -81,7 +81,7 @@ async def http_request(self, message): await self.handle(b"".join(self.body)) finally: await self.disconnect() - raise StopConsumer() + raise StopConsumer() async def http_disconnect(self, message): """ diff --git a/tests/test_generic_http.py b/tests/test_generic_http.py index 85ecdd041..bfb889c0e 100644 --- a/tests/test_generic_http.py +++ b/tests/test_generic_http.py @@ -38,6 +38,19 @@ async def handle(self, body): assert response["headers"] == [(b"Content-Type", b"application/json")] [email protected] +async def test_error(): + class TestConsumer(AsyncHttpConsumer): + async def handle(self, body): + raise AssertionError("Error correctly raised") + + communicator = HttpCommunicator(TestConsumer(), "GET", "/") + with pytest.raises(AssertionError) as excinfo: + await communicator.get_response(timeout=0.05) + + assert str(excinfo.value) == "Error correctly raised" + + @pytest.mark.asyncio async def test_per_scope_consumers(): """
sql-machine-learning__elasticdl-1666
Parse arguments(flag) in ps/server.go
[ { "content": "import argparse\nfrom itertools import chain\n\nfrom elasticdl.python.common.constants import DistributionStrategy\nfrom elasticdl.python.common.log_utils import default_logger as logger\n\nMODEL_SPEC_GROUP = [\n \"dataset_fn\",\n \"eval_metrics_fn\",\n \"model_def\",\n \"model_params\",\n \"optimizer\",\n \"loss\",\n \"output\",\n \"minibatch_size\",\n \"grads_to_wait\",\n \"num_epochs\",\n \"tensorboard_log_dir\",\n \"training_data\",\n]\n\nEVALUATION_GROUP = [\n \"evaluation_steps\",\n \"validation_data\",\n \"evaluation_start_delay_secs\",\n \"evaluation_throttle_secs\",\n]\n\nPREDICTION_GROUP = [\"prediction_data\", \"prediction_outputs_processor\"]\n\nCHECKPOINT_GROUP = [\n \"checkpoint_dir_for_init\",\n \"checkpoint_steps\",\n \"keep_checkpoint_max\",\n \"checkpoint_dir\",\n]\n\nALL_ARGS_GROUPS = [\n MODEL_SPEC_GROUP,\n EVALUATION_GROUP,\n PREDICTION_GROUP,\n CHECKPOINT_GROUP,\n]\n\n\ndef pos_int(arg):\n res = int(arg)\n if res <= 0:\n raise ValueError(\"Positive integer argument required. Got %s\" % res)\n return res\n\n\ndef non_neg_int(arg):\n res = int(arg)\n if res < 0:\n raise ValueError(\n \"Non-negative integer argument required. Get %s\" % res\n )\n return res\n\n\ndef parse_envs(arg):\n \"\"\"Parse environment configs as a dict.\n\n Support format 'k1=v1,k2=v2,k3=v3..'. Note that comma is supported\n in value field.\n \"\"\"\n envs = {}\n if not arg:\n return envs\n\n i = 0\n fields = arg.split(\"=\")\n if len(fields) < 2:\n return envs\n pre_key = \"\"\n while i < len(fields):\n if i == 0:\n pre_key = fields[i]\n elif i == len(fields) - 1:\n envs[pre_key] = fields[i]\n else:\n r = fields[i].rfind(\",\")\n envs[pre_key] = fields[i][:r]\n pre_key = fields[i][r + 1 :] # noqa: E203\n i += 1\n return envs\n\n\ndef add_bool_param(parser, name, default, help):\n parser.add_argument(\n name, # should be in \"--foo\" format\n nargs=\"?\",\n const=not default,\n default=default,\n type=lambda x: x.lower() in [\"true\", \"yes\", \"t\", \"y\"],\n help=help,\n )\n\n\ndef add_common_params(parser):\n \"\"\"Common arguments for training/prediction/evaluation\"\"\"\n add_common_args_between_master_and_worker(parser)\n parser.add_argument(\n \"--docker_image_repository\",\n default=\"\",\n help=\"The repository for generated Docker images, if set, the image \"\n \"is also pushed to the repository\",\n )\n parser.add_argument(\"--image_base\", help=\"Base Docker image.\")\n parser.add_argument(\"--job_name\", help=\"ElasticDL job name\", required=True)\n parser.add_argument(\n \"--master_resource_request\",\n default=\"cpu=0.1,memory=1024Mi\",\n type=str,\n help=\"The minimal resource required by master, \"\n \"e.g. cpu=0.1,memory=1024Mi,disk=1024Mi,gpu=1\",\n )\n parser.add_argument(\n \"--master_resource_limit\",\n type=str,\n default=\"\",\n help=\"The maximal resource required by master, \"\n \"e.g. cpu=0.1,memory=1024Mi,disk=1024Mi,gpu=1, \"\n \"default to master_resource_request\",\n )\n parser.add_argument(\n \"--num_workers\", type=int, help=\"Number of workers\", default=0\n )\n parser.add_argument(\n \"--worker_resource_request\",\n default=\"cpu=1,memory=4096Mi\",\n type=str,\n help=\"The minimal resource required by worker, \"\n \"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1\",\n )\n parser.add_argument(\n \"--worker_resource_limit\",\n type=str,\n default=\"\",\n help=\"The maximal resource required by worker, \"\n \"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,\"\n \"default to worker_resource_request\",\n )\n parser.add_argument(\n \"--master_pod_priority\",\n default=\"\",\n help=\"The requested priority of master pod\",\n )\n parser.add_argument(\n \"--worker_pod_priority\",\n default=\"\",\n help=\"The requested priority of worker pod\",\n )\n parser.add_argument(\n \"--num_ps_pods\", type=int, help=\"Number of PS pods\", default=1\n )\n parser.add_argument(\n \"--ps_resource_request\",\n default=\"cpu=1,memory=4096Mi\",\n type=str,\n help=\"The minimal resource required by worker, \"\n \"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1\",\n )\n parser.add_argument(\n \"--ps_resource_limit\",\n default=\"\",\n type=str,\n help=\"The maximal resource required by worker, \"\n \"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,\"\n \"default to worker_resource_request\",\n )\n parser.add_argument(\n \"--ps_pod_priority\",\n default=\"\",\n help=\"The requested priority of PS pod\",\n )\n parser.add_argument(\n \"--volume\",\n default=\"\",\n type=str,\n help=\"The Kubernetes volume information, \"\n \"the supported volumes are `persistentVolumeClaim` and `hostPath`,\"\n 'e.g. \"claim_name=c1,mount_path=/path1\" for `persistentVolumeClaim`,'\n '\"host_path=c0,mount_path=/path0\" for `hostPath`,'\n 'or \"host_path=c0,mount_path=/path0,type=Directory\" for `hostPath`,'\n '\"host_path=c0,mount_path=/path0;claim_name=c1,mount_path=/path1\" for'\n \"multiple volumes\",\n )\n parser.add_argument(\n \"--image_pull_policy\",\n default=\"Always\",\n help=\"The image pull policy of master and worker\",\n choices=[\"Never\", \"IfNotPresent\", \"Always\"],\n )\n parser.add_argument(\n \"--restart_policy\",\n default=\"Never\",\n help=\"The pod restart policy when pod crashed\",\n choices=[\"Never\", \"OnFailure\", \"Always\"],\n )\n parser.add_argument(\n \"--envs\",\n type=str,\n default=\"\",\n help=\"Runtime environment variables. (key1=value1,key2=value2), \"\n \"comma is supported in value field\",\n )\n parser.add_argument(\n \"--extra_pypi_index\",\n default=\"https://pypi.org/simple\",\n help=\"The extra URLs of Python package repository indexes\",\n )\n parser.add_argument(\n \"--namespace\",\n default=\"default\",\n type=str,\n help=\"The name of the Kubernetes namespace where ElasticDL \"\n \"pods will be created\",\n )\n parser.add_argument(\n \"--num_minibatches_per_task\",\n type=int,\n help=\"The number of minibatches per task\",\n required=True,\n )\n parser.add_argument(\n \"--cluster_spec\",\n help=\"The file that contains user-defined cluster specification\",\n default=\"\",\n )\n parser.add_argument(\n \"--docker_base_url\",\n help=\"URL to the Docker server\",\n default=\"unix://var/run/docker.sock\",\n )\n parser.add_argument(\n \"--docker_tlscert\", help=\"Path to Docker client cert\", default=\"\"\n )\n parser.add_argument(\n \"--docker_tlskey\", help=\"Path to Docker client key\", default=\"\"\n )\n parser.add_argument(\n \"--yaml\",\n type=str,\n default=\"\",\n help=\"File path for dumping ElasticDL job YAML specification. \"\n \"Note that, if users specify --yaml, the client wouldn't submit \"\n \"the job automatically, and users need to launch the job through \"\n \"command `kubectl create -f path_to_yaml_file`.\",\n )\n\n\ndef add_train_params(parser):\n parser.add_argument(\n \"--tensorboard_log_dir\",\n default=\"\",\n type=str,\n help=\"Directory where TensorBoard will look to find \"\n \"TensorFlow event files that it can display. \"\n \"TensorBoard will recursively walk the directory \"\n \"structure rooted at log dir, looking for .*tfevents.* \"\n \"files. You may also pass a comma separated list of log \"\n \"directories, and TensorBoard will watch each \"\n \"directory.\",\n )\n parser.add_argument(\"--num_epochs\", type=int, default=1)\n parser.add_argument(\n \"--grads_to_wait\",\n type=int,\n help=\"Number of gradients to wait before updating model\",\n default=1,\n )\n parser.add_argument(\n \"--training_data\",\n help=\"Either the data directory that contains RecordIO files \"\n \"or an ODPS table name used for training.\",\n default=\"\",\n )\n parser.add_argument(\n \"--validation_data\",\n help=\"Either the data directory that contains RecordIO files \"\n \"or an ODPS table name used for evaluation.\",\n default=\"\",\n )\n parser.add_argument(\n \"--evaluation_steps\",\n type=int,\n help=\"Evaluate the model every this many steps.\"\n \"If 0, step-based evaluation is disabled\",\n default=0,\n )\n parser.add_argument(\n \"--evaluation_start_delay_secs\",\n type=int,\n help=\"Start time-based evaluation only after waiting for \"\n \"this many seconds\",\n default=100,\n )\n parser.add_argument(\n \"--evaluation_throttle_secs\",\n type=int,\n help=\"Do not re-evaluate unless the last evaluation was started \"\n \"at least this many seconds ago.\"\n \"If 0, time-based evaluation is disabled\",\n default=0,\n )\n parser.add_argument(\n \"--checkpoint_dir_for_init\",\n help=\"The checkpoint directory to initialize the training model\",\n default=\"\",\n )\n parser.add_argument(\n \"--output\",\n type=str,\n default=\"\",\n help=\"The path to save the final trained model\",\n )\n parser.add_argument(\n \"--sync_version_tolerance\",\n type=int,\n help=\"The maximum model version difference between reported gradients \"\n \"and PS that synchronous SGD can accepts.\",\n default=0,\n )\n add_bool_param(\n parser=parser,\n name=\"--use_async\",\n default=False,\n help=\"True for asynchronous SGD, False for synchronous SGD\",\n )\n add_bool_param(\n parser=parser,\n name=\"--lr_staleness_modulation\",\n default=False,\n help=\"If True, master will modulate the learning rate with staleness \"\n \"in asynchronous SGD\",\n )\n\n\ndef add_evaluate_params(parser):\n parser.add_argument(\n \"--validation_data\",\n help=\"Either the data directory that contains RecordIO files \"\n \"or an ODPS table name used for evaluation.\",\n required=True,\n )\n parser.add_argument(\n \"--checkpoint_dir_for_init\",\n help=\"The checkpoint directory to initialize the training model\",\n required=True,\n )\n\n\ndef add_predict_params(parser):\n parser.add_argument(\n \"--prediction_data\",\n help=\"Either the data directory that contains RecordIO files \"\n \"or an ODPS table name used for prediction.\",\n required=True,\n )\n parser.add_argument(\n \"--prediction_outputs_processor\",\n help=\"The name of the prediction output processor class \"\n \"defined in the model definition file.\",\n default=\"PredictionOutputsProcessor\",\n )\n parser.add_argument(\n \"--checkpoint_dir_for_init\",\n help=\"The checkpoint directory to initialize the training model\",\n required=True,\n )\n\n\ndef add_clean_params(parser):\n parser.add_argument(\n \"--docker_image_repository\",\n type=str,\n help=\"Clean docker images belonging to this repository.\",\n )\n parser.add_argument(\n \"--all\", action=\"store_true\", help=\"Clean all local docker images\"\n )\n parser.add_argument(\n \"--docker_base_url\",\n help=\"URL to the Docker server\",\n default=\"unix://var/run/docker.sock\",\n )\n parser.add_argument(\n \"--docker_tlscert\", help=\"Path to Docker client cert\", default=\"\"\n )\n parser.add_argument(\n \"--docker_tlskey\", help=\"Path to Docker client key\", default=\"\"\n )\n\n\ndef print_args(args, groups=None):\n \"\"\"\n Args:\n args: parsing results returned from `parser.parse_args`\n groups: It is a list of a list. It controls which options should be\n printed together. For example, we expect all model specifications such\n as `optimizer`, `loss` are better printed together.\n groups = [[\"optimizer\", \"loss\"]]\n \"\"\"\n\n def _get_attr(instance, attribute):\n try:\n return getattr(instance, attribute)\n except AttributeError:\n return None\n\n dedup = set()\n if groups:\n for group in groups:\n for element in group:\n dedup.add(element)\n logger.info(\"%s = %s\", element, _get_attr(args, element))\n other_options = [\n (key, value)\n for (key, value) in args.__dict__.items()\n if key not in dedup\n ]\n for key, value in other_options:\n logger.info(\"%s = %s\", key, value)\n\n\ndef add_common_args_between_master_and_worker(parser):\n parser.add_argument(\n \"--minibatch_size\",\n help=\"Minibatch size for worker\",\n type=int,\n required=True,\n )\n parser.add_argument(\n \"--model_zoo\",\n help=\"The directory that contains user-defined model files \"\n \"or a specific model file\",\n required=True,\n )\n parser.add_argument(\n \"--log_level\",\n choices=[\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"],\n type=str.upper,\n default=\"INFO\",\n help=\"Set the logging level\",\n )\n parser.add_argument(\n \"--dataset_fn\",\n type=str,\n default=\"dataset_fn\",\n help=\"The name of the dataset function defined in the model file\",\n )\n parser.add_argument(\n \"--loss\",\n type=str,\n default=\"loss\",\n help=\"The name of the loss function defined in the model file\",\n )\n parser.add_argument(\n \"--optimizer\",\n type=str,\n default=\"optimizer\",\n help=\"The name of the optimizer defined in the model file\",\n )\n parser.add_argument(\n \"--learning_rate_scheduler\",\n type=str,\n default=\"learning_rate_scheduler\",\n help=\"Optional callable learning rate scheduler defined in\"\n \"the model file, which takes model version as its input and\"\n \"returns a learning rate value\",\n )\n parser.add_argument(\n \"--eval_metrics_fn\",\n type=str,\n default=\"eval_metrics_fn\",\n help=\"The name of the evaluation metrics function defined \"\n \"in the model file\",\n )\n parser.add_argument(\n \"--custom_data_reader\",\n type=str,\n default=\"custom_data_reader\",\n help=\"The custom data reader defined in the model file\",\n )\n parser.add_argument(\n \"--model_def\",\n type=str,\n required=True,\n help=\"The import path to the model definition function/class in the \"\n 'model zoo, e.g. \"cifar10_subclass.cifar10_subclass.CustomModel\"',\n )\n parser.add_argument(\n \"--model_params\",\n type=str,\n default=\"\",\n help=\"The model parameters in a string separated by semi-colon \"\n 'used to instantiate the model, e.g. \"param1=1; param2=2\"',\n )\n parser.add_argument(\n \"--get_model_steps\",\n type=int,\n default=1,\n help=\"Worker will get_model from PS every this many steps\",\n )\n parser.add_argument(\n \"--data_reader_params\",\n type=str,\n default=\"\",\n help=\"The data reader parameters in a string separated by semi-colon \"\n 'used to instantiate the data reader, e.g. \"param1=1; param2=2\"',\n )\n parser.add_argument(\n \"--distribution_strategy\",\n type=str,\n choices=[\n \"\",\n DistributionStrategy.LOCAL,\n DistributionStrategy.PARAMETER_SERVER,\n DistributionStrategy.ALLREDUCE,\n ],\n default=\"\",\n help=\"Master will use a distribution policy on a list of devices \"\n \"according to the distributed strategy, \"\n 'e.g. \"ParameterServerStrategy\" or \"AllreduceStrategy\" or \"Local\"',\n )\n parser.add_argument(\n \"--checkpoint_steps\",\n type=int,\n help=\"Save checkpoint every this many steps.\"\n \"If 0, no checkpoints to save.\",\n default=0,\n )\n parser.add_argument(\n \"--checkpoint_dir\",\n help=\"The directory to store the checkpoint files\",\n default=\"\",\n )\n parser.add_argument(\n \"--keep_checkpoint_max\",\n type=int,\n help=\"The maximum number of recent checkpoint files to keep.\"\n \"If 0, keep all.\",\n default=0,\n )\n\n\ndef parse_master_args(master_args=None):\n parser = argparse.ArgumentParser(description=\"ElasticDL Master\")\n parser.add_argument(\n \"--port\",\n default=50001,\n type=pos_int,\n help=\"The listening port of master\",\n )\n parser.add_argument(\n \"--worker_image\", help=\"Docker image for workers\", default=None\n )\n parser.add_argument(\n \"--prediction_data\",\n help=\"Either the data directory that contains RecordIO files \"\n \"or an ODPS table name used for prediction.\",\n default=\"\",\n )\n add_common_params(parser)\n add_train_params(parser)\n\n args, unknown_args = parser.parse_known_args(args=master_args)\n print_args(args, groups=ALL_ARGS_GROUPS)\n if unknown_args:\n logger.warning(\"Unknown arguments: %s\", unknown_args)\n\n if all(\n v == \"\" or v is None\n for v in [\n args.training_data,\n args.validation_data,\n args.prediction_data,\n ]\n ):\n raise ValueError(\n \"At least one of the data directories needs to be provided\"\n )\n\n if args.prediction_data and (args.training_data or args.validation_data):\n raise ValueError(\n \"Running prediction together with training or evaluation \"\n \"is not supported\"\n )\n if args.prediction_data and not args.checkpoint_dir_for_init:\n raise ValueError(\n \"checkpoint_dir_for_init is required for running \" \"prediction job\"\n )\n if not args.use_async and args.get_model_steps > 1:\n args.get_model_steps = 1\n logger.warning(\n \"get_model_steps is set to 1 when using synchronous SGD.\"\n )\n if args.use_async and args.grads_to_wait > 1:\n args.grads_to_wait = 1\n logger.warning(\n \"grads_to_wait is set to 1 when using asynchronous SGD.\"\n )\n\n return args\n\n\ndef parse_ps_args(ps_args=None):\n parser = argparse.ArgumentParser(description=\"ElasticDL PS\")\n parser.add_argument(\n \"--ps_id\", help=\"ID unique to the PS\", type=int, required=True\n )\n parser.add_argument(\n \"--port\", help=\"Port used by the PS pod\", type=int, required=True\n )\n parser.add_argument(\"--master_addr\", help=\"Master ip:port\")\n\n add_common_params(parser)\n add_train_params(parser)\n # TODO: add PS replica address for RPC stub creation\n\n args, unknown_args = parser.parse_known_args(args=ps_args)\n print_args(args, groups=ALL_ARGS_GROUPS)\n if unknown_args:\n logger.warning(\"Unknown arguments: %s\", unknown_args)\n if args.use_async and args.grads_to_wait > 1:\n args.grads_to_wait = 1\n logger.warning(\n \"grads_to_wait is set to 1 when using asynchronous SGD.\"\n )\n return args\n\n\ndef parse_worker_args(worker_args=None):\n parser = argparse.ArgumentParser(description=\"ElasticDL Worker\")\n add_common_args_between_master_and_worker(parser)\n parser.add_argument(\n \"--worker_id\", help=\"ID unique to the worker\", type=int, required=True\n )\n parser.add_argument(\"--job_type\", help=\"Job type\", required=True)\n parser.add_argument(\"--master_addr\", help=\"Master ip:port\")\n parser.add_argument(\n \"--prediction_outputs_processor\",\n help=\"The name of the prediction output processor class \"\n \"defined in the model definition file.\",\n default=\"PredictionOutputsProcessor\",\n )\n parser.add_argument(\n \"--ps_addrs\",\n type=str,\n help=\"Addresses of parameter service pods, separated by comma\",\n )\n\n if worker_args:\n worker_args = list(map(str, worker_args))\n args, unknown_args = parser.parse_known_args(args=worker_args)\n print_args(args, groups=ALL_ARGS_GROUPS)\n if unknown_args:\n logger.warning(\"Unknown arguments: %s\", unknown_args)\n return args\n\n\ndef build_arguments_from_parsed_result(args, filter_args=None):\n \"\"\"Reconstruct arguments from parsed result\n Args:\n args: result from `parser.parse_args()`\n Returns:\n list of string: ready for parser to parse,\n such as [\"--foo\", \"3\", \"--bar\", False]\n \"\"\"\n items = vars(args).items()\n if filter_args:\n items = filter(lambda item: item[0] not in filter_args, items)\n\n def _str_ignore_none(s):\n if s is None:\n return s\n return str(s)\n\n arguments = map(_str_ignore_none, chain(*items))\n arguments = [\n \"--\" + k if i % 2 == 0 else k for i, k in enumerate(arguments)\n ]\n return arguments\n", "path": "elasticdl/python/common/args.py" } ]
[ { "content": "import argparse\nfrom itertools import chain\n\nfrom elasticdl.python.common.constants import DistributionStrategy\nfrom elasticdl.python.common.log_utils import default_logger as logger\n\nMODEL_SPEC_GROUP = [\n \"dataset_fn\",\n \"eval_metrics_fn\",\n \"model_def\",\n \"model_params\",\n \"optimizer\",\n \"loss\",\n \"output\",\n \"minibatch_size\",\n \"grads_to_wait\",\n \"num_epochs\",\n \"tensorboard_log_dir\",\n \"training_data\",\n]\n\nEVALUATION_GROUP = [\n \"evaluation_steps\",\n \"validation_data\",\n \"evaluation_start_delay_secs\",\n \"evaluation_throttle_secs\",\n]\n\nPREDICTION_GROUP = [\"prediction_data\", \"prediction_outputs_processor\"]\n\nCHECKPOINT_GROUP = [\n \"checkpoint_dir_for_init\",\n \"checkpoint_steps\",\n \"keep_checkpoint_max\",\n \"checkpoint_dir\",\n]\n\nALL_ARGS_GROUPS = [\n MODEL_SPEC_GROUP,\n EVALUATION_GROUP,\n PREDICTION_GROUP,\n CHECKPOINT_GROUP,\n]\n\n\ndef pos_int(arg):\n res = int(arg)\n if res <= 0:\n raise ValueError(\"Positive integer argument required. Got %s\" % res)\n return res\n\n\ndef non_neg_int(arg):\n res = int(arg)\n if res < 0:\n raise ValueError(\n \"Non-negative integer argument required. Get %s\" % res\n )\n return res\n\n\ndef parse_envs(arg):\n \"\"\"Parse environment configs as a dict.\n\n Support format 'k1=v1,k2=v2,k3=v3..'. Note that comma is supported\n in value field.\n \"\"\"\n envs = {}\n if not arg:\n return envs\n\n i = 0\n fields = arg.split(\"=\")\n if len(fields) < 2:\n return envs\n pre_key = \"\"\n while i < len(fields):\n if i == 0:\n pre_key = fields[i]\n elif i == len(fields) - 1:\n envs[pre_key] = fields[i]\n else:\n r = fields[i].rfind(\",\")\n envs[pre_key] = fields[i][:r]\n pre_key = fields[i][r + 1 :] # noqa: E203\n i += 1\n return envs\n\n\ndef add_bool_param(parser, name, default, help):\n parser.add_argument(\n name, # should be in \"--foo\" format\n nargs=\"?\",\n const=not default,\n default=default,\n type=lambda x: x.lower() in [\"true\", \"yes\", \"t\", \"y\"],\n help=help,\n )\n\n\ndef add_common_params(parser):\n \"\"\"Common arguments for training/prediction/evaluation\"\"\"\n add_common_args_between_master_and_worker(parser)\n parser.add_argument(\n \"--docker_image_repository\",\n default=\"\",\n help=\"The repository for generated Docker images, if set, the image \"\n \"is also pushed to the repository\",\n )\n parser.add_argument(\"--image_base\", help=\"Base Docker image.\")\n parser.add_argument(\"--job_name\", help=\"ElasticDL job name\", required=True)\n parser.add_argument(\n \"--master_resource_request\",\n default=\"cpu=0.1,memory=1024Mi\",\n type=str,\n help=\"The minimal resource required by master, \"\n \"e.g. cpu=0.1,memory=1024Mi,disk=1024Mi,gpu=1\",\n )\n parser.add_argument(\n \"--master_resource_limit\",\n type=str,\n default=\"\",\n help=\"The maximal resource required by master, \"\n \"e.g. cpu=0.1,memory=1024Mi,disk=1024Mi,gpu=1, \"\n \"default to master_resource_request\",\n )\n parser.add_argument(\n \"--num_workers\", type=int, help=\"Number of workers\", default=0\n )\n parser.add_argument(\n \"--worker_resource_request\",\n default=\"cpu=1,memory=4096Mi\",\n type=str,\n help=\"The minimal resource required by worker, \"\n \"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1\",\n )\n parser.add_argument(\n \"--worker_resource_limit\",\n type=str,\n default=\"\",\n help=\"The maximal resource required by worker, \"\n \"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,\"\n \"default to worker_resource_request\",\n )\n parser.add_argument(\n \"--master_pod_priority\",\n default=\"\",\n help=\"The requested priority of master pod\",\n )\n parser.add_argument(\n \"--worker_pod_priority\",\n default=\"\",\n help=\"The requested priority of worker pod\",\n )\n parser.add_argument(\n \"--num_ps_pods\", type=int, help=\"Number of PS pods\", default=1\n )\n parser.add_argument(\n \"--ps_resource_request\",\n default=\"cpu=1,memory=4096Mi\",\n type=str,\n help=\"The minimal resource required by worker, \"\n \"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1\",\n )\n parser.add_argument(\n \"--ps_resource_limit\",\n default=\"\",\n type=str,\n help=\"The maximal resource required by worker, \"\n \"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,\"\n \"default to worker_resource_request\",\n )\n parser.add_argument(\n \"--ps_pod_priority\",\n default=\"\",\n help=\"The requested priority of PS pod\",\n )\n parser.add_argument(\n \"--volume\",\n default=\"\",\n type=str,\n help=\"The Kubernetes volume information, \"\n \"the supported volumes are `persistentVolumeClaim` and `hostPath`,\"\n 'e.g. \"claim_name=c1,mount_path=/path1\" for `persistentVolumeClaim`,'\n '\"host_path=c0,mount_path=/path0\" for `hostPath`,'\n 'or \"host_path=c0,mount_path=/path0,type=Directory\" for `hostPath`,'\n '\"host_path=c0,mount_path=/path0;claim_name=c1,mount_path=/path1\" for'\n \"multiple volumes\",\n )\n parser.add_argument(\n \"--image_pull_policy\",\n default=\"Always\",\n help=\"The image pull policy of master and worker\",\n choices=[\"Never\", \"IfNotPresent\", \"Always\"],\n )\n parser.add_argument(\n \"--restart_policy\",\n default=\"Never\",\n help=\"The pod restart policy when pod crashed\",\n choices=[\"Never\", \"OnFailure\", \"Always\"],\n )\n parser.add_argument(\n \"--envs\",\n type=str,\n default=\"\",\n help=\"Runtime environment variables. (key1=value1,key2=value2), \"\n \"comma is supported in value field\",\n )\n parser.add_argument(\n \"--extra_pypi_index\",\n default=\"https://pypi.org/simple\",\n help=\"The extra URLs of Python package repository indexes\",\n )\n parser.add_argument(\n \"--namespace\",\n default=\"default\",\n type=str,\n help=\"The name of the Kubernetes namespace where ElasticDL \"\n \"pods will be created\",\n )\n parser.add_argument(\n \"--num_minibatches_per_task\",\n type=int,\n help=\"The number of minibatches per task\",\n required=True,\n )\n parser.add_argument(\n \"--cluster_spec\",\n help=\"The file that contains user-defined cluster specification\",\n default=\"\",\n )\n parser.add_argument(\n \"--docker_base_url\",\n help=\"URL to the Docker server\",\n default=\"unix://var/run/docker.sock\",\n )\n parser.add_argument(\n \"--docker_tlscert\", help=\"Path to Docker client cert\", default=\"\"\n )\n parser.add_argument(\n \"--docker_tlskey\", help=\"Path to Docker client key\", default=\"\"\n )\n parser.add_argument(\n \"--yaml\",\n type=str,\n default=\"\",\n help=\"File path for dumping ElasticDL job YAML specification. \"\n \"Note that, if users specify --yaml, the client wouldn't submit \"\n \"the job automatically, and users need to launch the job through \"\n \"command `kubectl create -f path_to_yaml_file`.\",\n )\n\n\ndef add_train_params(parser):\n parser.add_argument(\n \"--tensorboard_log_dir\",\n default=\"\",\n type=str,\n help=\"Directory where TensorBoard will look to find \"\n \"TensorFlow event files that it can display. \"\n \"TensorBoard will recursively walk the directory \"\n \"structure rooted at log dir, looking for .*tfevents.* \"\n \"files. You may also pass a comma separated list of log \"\n \"directories, and TensorBoard will watch each \"\n \"directory.\",\n )\n parser.add_argument(\"--num_epochs\", type=int, default=1)\n parser.add_argument(\n \"--grads_to_wait\",\n type=int,\n help=\"Number of gradients to wait before updating model\",\n default=1,\n )\n parser.add_argument(\n \"--training_data\",\n help=\"Either the data directory that contains RecordIO files \"\n \"or an ODPS table name used for training.\",\n default=\"\",\n )\n parser.add_argument(\n \"--validation_data\",\n help=\"Either the data directory that contains RecordIO files \"\n \"or an ODPS table name used for evaluation.\",\n default=\"\",\n )\n parser.add_argument(\n \"--evaluation_steps\",\n type=int,\n help=\"Evaluate the model every this many steps.\"\n \"If 0, step-based evaluation is disabled\",\n default=0,\n )\n parser.add_argument(\n \"--evaluation_start_delay_secs\",\n type=int,\n help=\"Start time-based evaluation only after waiting for \"\n \"this many seconds\",\n default=100,\n )\n parser.add_argument(\n \"--evaluation_throttle_secs\",\n type=int,\n help=\"Do not re-evaluate unless the last evaluation was started \"\n \"at least this many seconds ago.\"\n \"If 0, time-based evaluation is disabled\",\n default=0,\n )\n parser.add_argument(\n \"--checkpoint_dir_for_init\",\n help=\"The checkpoint directory to initialize the training model\",\n default=\"\",\n )\n parser.add_argument(\n \"--output\",\n type=str,\n default=\"\",\n help=\"The path to save the final trained model\",\n )\n parser.add_argument(\n \"--sync_version_tolerance\",\n type=int,\n help=\"The maximum model version difference between reported gradients \"\n \"and PS that synchronous SGD can accepts.\",\n default=0,\n )\n add_bool_param(\n parser=parser,\n name=\"--use_async\",\n default=False,\n help=\"True for asynchronous SGD, False for synchronous SGD\",\n )\n add_bool_param(\n parser=parser,\n name=\"--lr_staleness_modulation\",\n default=False,\n help=\"If True, PS will modulate the learning rate with staleness \"\n \"in asynchronous SGD\",\n )\n\n\ndef add_evaluate_params(parser):\n parser.add_argument(\n \"--validation_data\",\n help=\"Either the data directory that contains RecordIO files \"\n \"or an ODPS table name used for evaluation.\",\n required=True,\n )\n parser.add_argument(\n \"--checkpoint_dir_for_init\",\n help=\"The checkpoint directory to initialize the training model\",\n required=True,\n )\n\n\ndef add_predict_params(parser):\n parser.add_argument(\n \"--prediction_data\",\n help=\"Either the data directory that contains RecordIO files \"\n \"or an ODPS table name used for prediction.\",\n required=True,\n )\n parser.add_argument(\n \"--prediction_outputs_processor\",\n help=\"The name of the prediction output processor class \"\n \"defined in the model definition file.\",\n default=\"PredictionOutputsProcessor\",\n )\n parser.add_argument(\n \"--checkpoint_dir_for_init\",\n help=\"The checkpoint directory to initialize the training model\",\n required=True,\n )\n\n\ndef add_clean_params(parser):\n parser.add_argument(\n \"--docker_image_repository\",\n type=str,\n help=\"Clean docker images belonging to this repository.\",\n )\n parser.add_argument(\n \"--all\", action=\"store_true\", help=\"Clean all local docker images\"\n )\n parser.add_argument(\n \"--docker_base_url\",\n help=\"URL to the Docker server\",\n default=\"unix://var/run/docker.sock\",\n )\n parser.add_argument(\n \"--docker_tlscert\", help=\"Path to Docker client cert\", default=\"\"\n )\n parser.add_argument(\n \"--docker_tlskey\", help=\"Path to Docker client key\", default=\"\"\n )\n\n\ndef print_args(args, groups=None):\n \"\"\"\n Args:\n args: parsing results returned from `parser.parse_args`\n groups: It is a list of a list. It controls which options should be\n printed together. For example, we expect all model specifications such\n as `optimizer`, `loss` are better printed together.\n groups = [[\"optimizer\", \"loss\"]]\n \"\"\"\n\n def _get_attr(instance, attribute):\n try:\n return getattr(instance, attribute)\n except AttributeError:\n return None\n\n dedup = set()\n if groups:\n for group in groups:\n for element in group:\n dedup.add(element)\n logger.info(\"%s = %s\", element, _get_attr(args, element))\n other_options = [\n (key, value)\n for (key, value) in args.__dict__.items()\n if key not in dedup\n ]\n for key, value in other_options:\n logger.info(\"%s = %s\", key, value)\n\n\ndef add_common_args_between_master_and_worker(parser):\n parser.add_argument(\n \"--minibatch_size\",\n help=\"Minibatch size for worker\",\n type=int,\n required=True,\n )\n parser.add_argument(\n \"--model_zoo\",\n help=\"The directory that contains user-defined model files \"\n \"or a specific model file\",\n required=True,\n )\n parser.add_argument(\n \"--log_level\",\n choices=[\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"],\n type=str.upper,\n default=\"INFO\",\n help=\"Set the logging level\",\n )\n parser.add_argument(\n \"--dataset_fn\",\n type=str,\n default=\"dataset_fn\",\n help=\"The name of the dataset function defined in the model file\",\n )\n parser.add_argument(\n \"--loss\",\n type=str,\n default=\"loss\",\n help=\"The name of the loss function defined in the model file\",\n )\n parser.add_argument(\n \"--optimizer\",\n type=str,\n default=\"optimizer\",\n help=\"The name of the optimizer defined in the model file\",\n )\n parser.add_argument(\n \"--learning_rate_scheduler\",\n type=str,\n default=\"learning_rate_scheduler\",\n help=\"Optional callable learning rate scheduler defined in\"\n \"the model file, which takes model version as its input and\"\n \"returns a learning rate value\",\n )\n parser.add_argument(\n \"--eval_metrics_fn\",\n type=str,\n default=\"eval_metrics_fn\",\n help=\"The name of the evaluation metrics function defined \"\n \"in the model file\",\n )\n parser.add_argument(\n \"--custom_data_reader\",\n type=str,\n default=\"custom_data_reader\",\n help=\"The custom data reader defined in the model file\",\n )\n parser.add_argument(\n \"--model_def\",\n type=str,\n required=True,\n help=\"The import path to the model definition function/class in the \"\n 'model zoo, e.g. \"cifar10_subclass.cifar10_subclass.CustomModel\"',\n )\n parser.add_argument(\n \"--model_params\",\n type=str,\n default=\"\",\n help=\"The model parameters in a string separated by semi-colon \"\n 'used to instantiate the model, e.g. \"param1=1; param2=2\"',\n )\n parser.add_argument(\n \"--get_model_steps\",\n type=int,\n default=1,\n help=\"Worker will get_model from PS every this many steps\",\n )\n parser.add_argument(\n \"--data_reader_params\",\n type=str,\n default=\"\",\n help=\"The data reader parameters in a string separated by semi-colon \"\n 'used to instantiate the data reader, e.g. \"param1=1; param2=2\"',\n )\n parser.add_argument(\n \"--distribution_strategy\",\n type=str,\n choices=[\n \"\",\n DistributionStrategy.LOCAL,\n DistributionStrategy.PARAMETER_SERVER,\n DistributionStrategy.ALLREDUCE,\n ],\n default=\"\",\n help=\"Master will use a distribution policy on a list of devices \"\n \"according to the distributed strategy, \"\n 'e.g. \"ParameterServerStrategy\" or \"AllreduceStrategy\" or \"Local\"',\n )\n parser.add_argument(\n \"--checkpoint_steps\",\n type=int,\n help=\"Save checkpoint every this many steps.\"\n \"If 0, no checkpoints to save.\",\n default=0,\n )\n parser.add_argument(\n \"--checkpoint_dir\",\n help=\"The directory to store the checkpoint files\",\n default=\"\",\n )\n parser.add_argument(\n \"--keep_checkpoint_max\",\n type=int,\n help=\"The maximum number of recent checkpoint files to keep.\"\n \"If 0, keep all.\",\n default=0,\n )\n\n\ndef parse_master_args(master_args=None):\n parser = argparse.ArgumentParser(description=\"ElasticDL Master\")\n parser.add_argument(\n \"--port\",\n default=50001,\n type=pos_int,\n help=\"The listening port of master\",\n )\n parser.add_argument(\n \"--worker_image\", help=\"Docker image for workers\", default=None\n )\n parser.add_argument(\n \"--prediction_data\",\n help=\"Either the data directory that contains RecordIO files \"\n \"or an ODPS table name used for prediction.\",\n default=\"\",\n )\n add_common_params(parser)\n add_train_params(parser)\n\n args, unknown_args = parser.parse_known_args(args=master_args)\n print_args(args, groups=ALL_ARGS_GROUPS)\n if unknown_args:\n logger.warning(\"Unknown arguments: %s\", unknown_args)\n\n if all(\n v == \"\" or v is None\n for v in [\n args.training_data,\n args.validation_data,\n args.prediction_data,\n ]\n ):\n raise ValueError(\n \"At least one of the data directories needs to be provided\"\n )\n\n if args.prediction_data and (args.training_data or args.validation_data):\n raise ValueError(\n \"Running prediction together with training or evaluation \"\n \"is not supported\"\n )\n if args.prediction_data and not args.checkpoint_dir_for_init:\n raise ValueError(\n \"checkpoint_dir_for_init is required for running \" \"prediction job\"\n )\n if not args.use_async and args.get_model_steps > 1:\n args.get_model_steps = 1\n logger.warning(\n \"get_model_steps is set to 1 when using synchronous SGD.\"\n )\n if args.use_async and args.grads_to_wait > 1:\n args.grads_to_wait = 1\n logger.warning(\n \"grads_to_wait is set to 1 when using asynchronous SGD.\"\n )\n\n return args\n\n\ndef parse_ps_args(ps_args=None):\n parser = argparse.ArgumentParser(description=\"ElasticDL PS\")\n parser.add_argument(\n \"--ps_id\", help=\"ID unique to the PS\", type=int, required=True\n )\n parser.add_argument(\n \"--port\", help=\"Port used by the PS pod\", type=int, required=True\n )\n parser.add_argument(\"--master_addr\", help=\"Master ip:port\")\n\n add_common_params(parser)\n add_train_params(parser)\n # TODO: add PS replica address for RPC stub creation\n\n args, unknown_args = parser.parse_known_args(args=ps_args)\n print_args(args, groups=ALL_ARGS_GROUPS)\n if unknown_args:\n logger.warning(\"Unknown arguments: %s\", unknown_args)\n if args.use_async and args.grads_to_wait > 1:\n args.grads_to_wait = 1\n logger.warning(\n \"grads_to_wait is set to 1 when using asynchronous SGD.\"\n )\n return args\n\n\ndef parse_worker_args(worker_args=None):\n parser = argparse.ArgumentParser(description=\"ElasticDL Worker\")\n add_common_args_between_master_and_worker(parser)\n parser.add_argument(\n \"--worker_id\", help=\"ID unique to the worker\", type=int, required=True\n )\n parser.add_argument(\"--job_type\", help=\"Job type\", required=True)\n parser.add_argument(\"--master_addr\", help=\"Master ip:port\")\n parser.add_argument(\n \"--prediction_outputs_processor\",\n help=\"The name of the prediction output processor class \"\n \"defined in the model definition file.\",\n default=\"PredictionOutputsProcessor\",\n )\n parser.add_argument(\n \"--ps_addrs\",\n type=str,\n help=\"Addresses of parameter service pods, separated by comma\",\n )\n\n if worker_args:\n worker_args = list(map(str, worker_args))\n args, unknown_args = parser.parse_known_args(args=worker_args)\n print_args(args, groups=ALL_ARGS_GROUPS)\n if unknown_args:\n logger.warning(\"Unknown arguments: %s\", unknown_args)\n return args\n\n\ndef build_arguments_from_parsed_result(args, filter_args=None):\n \"\"\"Reconstruct arguments from parsed result\n Args:\n args: result from `parser.parse_args()`\n Returns:\n list of string: ready for parser to parse,\n such as [\"--foo\", \"3\", \"--bar\", False]\n \"\"\"\n items = vars(args).items()\n if filter_args:\n items = filter(lambda item: item[0] not in filter_args, items)\n\n def _str_ignore_none(s):\n if s is None:\n return s\n return str(s)\n\n arguments = map(_str_ignore_none, chain(*items))\n arguments = [\n \"--\" + k if i % 2 == 0 else k for i, k in enumerate(arguments)\n ]\n return arguments\n", "path": "elasticdl/python/common/args.py" } ]
diff --git a/elasticdl/pkg/main/main.go b/elasticdl/pkg/main/main.go index 0f59d5f88..18e1598c5 100644 --- a/elasticdl/pkg/main/main.go +++ b/elasticdl/pkg/main/main.go @@ -9,8 +9,22 @@ import ( ) var ( - // TODO: parse more args - port = flag.Int("port", 2222, "The server port") + jobName = flag.String("job_name", "", "ElasticDL job name") + namespace = flag.String("namespace", "", "The name of the Kubernetes namespace where ElasticDL pods will be created") + masterAddr = flag.String("master_addr", "localhost:50001", "The master pod address") + port = flag.Int("port", 2222, "The server port") + useAsync = flag.Bool("use_async", false, "true for asynchronous SGD, false for synchronous SGD") + gradsToWait = flag.Int("grads_to_wait", 1, "Number of gradients to wait before updating mode") + lrStalenessModulation = flag.Bool("lr_staleness_modulation", false, "If True, PS will modulate the learning rate with staleness") + syncVersionTolerance = flag.Int("sync_version_tolerance", 0, "The maximum model version difference between reported gradients and PS that synchronous SGD can accepts") + evaluationSteps = flag.Int("evaluation_steps", 0, "Evaluate the model every this many steps. If 0, evaluation is disabled") + numPsPods = flag.Int("num_ps_pods", 1, "Number of PS pod") + psID = flag.Int("ps_id", 0, "PS id") + numWorkers = flag.Int("num_workers", 1, "Number of workers") + checkpointDirForInit = flag.String("checkpoint_dir_for_init", "", "The checkpoint directory to initialize the training model") + checkpointDir = flag.String("checkpoint_dir", "", "The directory to store the checkpoint file") + checkpointSteps = flag.Int("checkpoint_steps", 0, "Save checkpoint every this many steps. If 0, no checkpoints to save") + keepCheckpointMax = flag.Int("keep_checkpoint_max", 3, "The maximum number of recent checkpoint files to keep. If 0, keep all") ) func main() { diff --git a/elasticdl/python/common/args.py b/elasticdl/python/common/args.py index 2864686b5..1a98ce6da 100644 --- a/elasticdl/python/common/args.py +++ b/elasticdl/python/common/args.py @@ -333,7 +333,7 @@ def add_train_params(parser): parser=parser, name="--lr_staleness_modulation", default=False, - help="If True, master will modulate the learning rate with staleness " + help="If True, PS will modulate the learning rate with staleness " "in asynchronous SGD", )
docker__docker-py-3200
Can't create config object Much like https://github.com/docker/docker-py/issues/2025 the config model is failing to create a new object due to 'name' KeyError ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "docker\models\configs.py", line 10, in __repr__ return f"<{self.__class__.__name__}: '{self.name}'>" File "docker\models\configs.py", line 14, in name return self.attrs['Spec']['Name'] ``` This https://github.com/docker/docker-py/pull/2793 appears to be the fix that was implemented and should likely be implements for configs as well (if not other models that might have this issue)
[ { "content": "from ..api import APIClient\nfrom .resource import Model, Collection\n\n\nclass Config(Model):\n \"\"\"A config.\"\"\"\n id_attribute = 'ID'\n\n def __repr__(self):\n return f\"<{self.__class__.__name__}: '{self.name}'>\"\n\n @property\n def name(self):\n return self.attrs['Spec']['Name']\n\n def remove(self):\n \"\"\"\n Remove this config.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If config failed to remove.\n \"\"\"\n return self.client.api.remove_config(self.id)\n\n\nclass ConfigCollection(Collection):\n \"\"\"Configs on the Docker server.\"\"\"\n model = Config\n\n def create(self, **kwargs):\n obj = self.client.api.create_config(**kwargs)\n return self.prepare_model(obj)\n create.__doc__ = APIClient.create_config.__doc__\n\n def get(self, config_id):\n \"\"\"\n Get a config.\n\n Args:\n config_id (str): Config ID.\n\n Returns:\n (:py:class:`Config`): The config.\n\n Raises:\n :py:class:`docker.errors.NotFound`\n If the config does not exist.\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.prepare_model(self.client.api.inspect_config(config_id))\n\n def list(self, **kwargs):\n \"\"\"\n List configs. Similar to the ``docker config ls`` command.\n\n Args:\n filters (dict): Server-side list filtering options.\n\n Returns:\n (list of :py:class:`Config`): The configs.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n resp = self.client.api.configs(**kwargs)\n return [self.prepare_model(obj) for obj in resp]\n", "path": "docker/models/configs.py" } ]
[ { "content": "from ..api import APIClient\nfrom .resource import Model, Collection\n\n\nclass Config(Model):\n \"\"\"A config.\"\"\"\n id_attribute = 'ID'\n\n def __repr__(self):\n return f\"<{self.__class__.__name__}: '{self.name}'>\"\n\n @property\n def name(self):\n return self.attrs['Spec']['Name']\n\n def remove(self):\n \"\"\"\n Remove this config.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If config failed to remove.\n \"\"\"\n return self.client.api.remove_config(self.id)\n\n\nclass ConfigCollection(Collection):\n \"\"\"Configs on the Docker server.\"\"\"\n model = Config\n\n def create(self, **kwargs):\n obj = self.client.api.create_config(**kwargs)\n obj.setdefault(\"Spec\", {})[\"Name\"] = kwargs.get(\"name\")\n return self.prepare_model(obj)\n create.__doc__ = APIClient.create_config.__doc__\n\n def get(self, config_id):\n \"\"\"\n Get a config.\n\n Args:\n config_id (str): Config ID.\n\n Returns:\n (:py:class:`Config`): The config.\n\n Raises:\n :py:class:`docker.errors.NotFound`\n If the config does not exist.\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.prepare_model(self.client.api.inspect_config(config_id))\n\n def list(self, **kwargs):\n \"\"\"\n List configs. Similar to the ``docker config ls`` command.\n\n Args:\n filters (dict): Server-side list filtering options.\n\n Returns:\n (list of :py:class:`Config`): The configs.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n resp = self.client.api.configs(**kwargs)\n return [self.prepare_model(obj) for obj in resp]\n", "path": "docker/models/configs.py" } ]
diff --git a/docker/models/configs.py b/docker/models/configs.py index 3588c8b5d..5ef137784 100644 --- a/docker/models/configs.py +++ b/docker/models/configs.py @@ -30,6 +30,7 @@ class ConfigCollection(Collection): def create(self, **kwargs): obj = self.client.api.create_config(**kwargs) + obj.setdefault("Spec", {})["Name"] = kwargs.get("name") return self.prepare_model(obj) create.__doc__ = APIClient.create_config.__doc__ diff --git a/tests/unit/fake_api.py b/tests/unit/fake_api.py index 0524becdc..03e53cc64 100644 --- a/tests/unit/fake_api.py +++ b/tests/unit/fake_api.py @@ -19,6 +19,8 @@ FAKE_NODE_ID = '24ifsmvkjbyhk' FAKE_SECRET_ID = 'epdyrw4tsi03xy3deu8g8ly6o' FAKE_SECRET_NAME = 'super_secret' +FAKE_CONFIG_ID = 'sekvs771242jfdjnvfuds8232' +FAKE_CONFIG_NAME = 'super_config' # Each method is prefixed with HTTP method (get, post...) # for clarity and readability @@ -512,6 +514,11 @@ def post_fake_secret(): response = {'ID': FAKE_SECRET_ID} return status_code, response +def post_fake_config(): + status_code = 200 + response = {'ID': FAKE_CONFIG_ID} + return status_code, response + # Maps real api url to fake response callback prefix = 'http+docker://localhost' @@ -630,4 +637,6 @@ def post_fake_secret(): post_fake_network_disconnect, f'{prefix}/{CURRENT_VERSION}/secrets/create': post_fake_secret, + f'{prefix}/{CURRENT_VERSION}/configs/create': + post_fake_config, } diff --git a/tests/unit/fake_api_client.py b/tests/unit/fake_api_client.py index 95cf63b49..797994216 100644 --- a/tests/unit/fake_api_client.py +++ b/tests/unit/fake_api_client.py @@ -37,6 +37,7 @@ def make_fake_api_client(overrides=None): 'create_host_config.side_effect': api_client.create_host_config, 'create_network.return_value': fake_api.post_fake_network()[1], 'create_secret.return_value': fake_api.post_fake_secret()[1], + 'create_config.return_value': fake_api.post_fake_config()[1], 'exec_create.return_value': fake_api.post_fake_exec_create()[1], 'exec_start.return_value': fake_api.post_fake_exec_start()[1], 'images.return_value': fake_api.get_fake_images()[1], diff --git a/tests/unit/models_configs_test.py b/tests/unit/models_configs_test.py new file mode 100644 index 000000000..6960397ff --- /dev/null +++ b/tests/unit/models_configs_test.py @@ -0,0 +1,10 @@ +import unittest + +from .fake_api_client import make_fake_client +from .fake_api import FAKE_CONFIG_NAME + +class CreateConfigsTest(unittest.TestCase): + def test_create_config(self): + client = make_fake_client() + config = client.configs.create(name="super_config", data="config") + assert config.__repr__() == "<Config: '{}'>".format(FAKE_CONFIG_NAME)
googleapis__google-cloud-python-10168
PubSub: declaratively drop Python 3.4 support The README and the language classifiers in `setup.py` both only claim support for Python 3.5+ (and 2.7), but not Python 3.4. However, the `python_requires` in `setup.py` does not reflect that, and does not prevent installing the library in Python 3.4.
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-pubsub\"\ndescription = \"Google Cloud Pub/Sub API client library\"\nversion = \"1.1.0\"\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.14.0, < 2.0.0dev\",\n \"grpc-google-iam-v1 >= 0.12.3, < 0.13dev\",\n 'enum34; python_version < \"3.4\"',\n]\nextras = {}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages() if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/GoogleCloudPlatform/google-cloud-python\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "pubsub/setup.py" } ]
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-pubsub\"\ndescription = \"Google Cloud Pub/Sub API client library\"\nversion = \"1.1.0\"\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.14.0, < 2.0.0dev\",\n \"grpc-google-iam-v1 >= 0.12.3, < 0.13dev\",\n 'enum34; python_version < \"3.4\"',\n]\nextras = {}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages() if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/GoogleCloudPlatform/google-cloud-python\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "pubsub/setup.py" } ]
diff --git a/pubsub/setup.py b/pubsub/setup.py index e26fb4b75778..69f19b3db72e 100644 --- a/pubsub/setup.py +++ b/pubsub/setup.py @@ -84,7 +84,7 @@ namespace_packages=namespaces, install_requires=dependencies, extras_require=extras, - python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", + python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*", include_package_data=True, zip_safe=False, )
agconti__cookiecutter-django-rest-150
Set APPEND_SLASH to False Since Django cannot send POST data in a `301` redirect, `POST`s to an api at `api/v1/resource` will fail with a `400` bad request, assuming the configured route is `api/v1/resource/`. With `APPEND_SLASH` set to false, `POST`s to an api at `api/v1/resource` will fail with a `404`, letting the developer know they have forgotten a slash. While its convenient to redirect `GET` requests, from the perspective of consuming an api; its preferred to have a more direct and straight forward errors.
[ { "content": "import os\nfrom os.path import join\n\nfrom configurations import Configuration, values\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\nclass Common(Configuration):\n\n INSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n\n # Third party apps\n 'rest_framework', # utilities for rest apis\n 'rest_framework.authtoken', # token authentication\n 'django_rq', # asynchronous queuing\n 'versatileimagefield', # image manipulation\n\n # Your apps\n 'authentication',\n 'users'\n\n )\n\n # https://docs.djangoproject.com/en/1.8/topics/http/middleware/\n MIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware'\n )\n\n ROOT_URLCONF = 'urls'\n\n TEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n ]\n\n SECRET_KEY = 'Not a secret'\n WSGI_APPLICATION = 'wsgi.application'\n\n # Allow for less strict handling of urls\n APPEND_SLASH = values.BooleanValue(True)\n\n # Migrations\n MIGRATION_MODULES = {\n 'sites': 'contrib.sites.migrations'\n }\n\n # Set DEBUG to False as a default for safety\n # https://docs.djangoproject.com/en/dev/ref/settings/#debug\n DEBUG = values.BooleanValue(False)\n for config in TEMPLATES:\n config['OPTIONS']['debug'] = DEBUG\n\n # Email\n EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend')\n\n MANAGERS = (\n (\"Author\", '{{cookiecutter.email}}'),\n )\n\n # Postgres\n DATABASES = values.DatabaseURLValue('postgres://localhost/{{cookiecutter.app_name}}')\n\n # General\n TIME_ZONE = 'UTC'\n LANGUAGE_CODE = 'en-us'\n SITE_ID = 1\n # If you set this to False, Django will make some optimizations so as not\n # to load the internationalization machinery.\n USE_I18N = False\n USE_L10N = True\n USE_TZ = True\n LOGIN_REDIRECT_URL = '/'\n\n # Static Files\n STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles')\n STATIC_URL = '/static/'\n STATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n )\n\n # Media files\n MEDIA_ROOT = join(os.path.dirname(BASE_DIR), 'media')\n MEDIA_URL = '/media/'\n\n # Logging\n LOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n \"rq_console\": {\n \"format\": \"%(asctime)s %(message)s\",\n \"datefmt\": \"%H:%M:%S\",\n },\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n \"rq_console\": {\n \"level\": \"DEBUG\",\n \"class\": \"rq.utils.ColorizingStreamHandler\",\n \"formatter\": \"rq_console\",\n \"exclude\": [\"%(asctime)s\"],\n },\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True\n },\n \"rq.worker\": {\n \"handlers\": [\"rq_console\"],\n \"level\": \"DEBUG\"\n },\n }\n }\n\n # Custom user app\n AUTH_USER_MODEL = 'users.User'\n\n # Django Rest Framework\n REST_FRAMEWORK = {\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',\n 'PAGE_SIZE': 100,\n 'DATETIME_FORMAT': '%Y-%m-%dT%H:%M:%S%z',\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n ),\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.IsAuthenticated',\n ],\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.SessionAuthentication',\n 'rest_framework.authentication.TokenAuthentication',\n )\n }\n\n # Versatile Image Field\n VERSATILEIMAGEFIELD_SETTINGS = {\n # The amount of time, in seconds, that references to created images\n # should be stored in the cache. Defaults to `2592000` (30 days)\n 'cache_length': 2592000,\n 'cache_name': 'versatileimagefield_cache',\n 'jpeg_resize_quality': 70,\n 'sized_directory_name': '__sized__',\n 'filtered_directory_name': '__filtered__',\n 'placeholder_directory_name': '__placeholder__',\n 'create_images_on_demand': False\n }\n", "path": "{{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/config/common.py" } ]
[ { "content": "import os\nfrom os.path import join\n\nfrom configurations import Configuration, values\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\nclass Common(Configuration):\n\n INSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n\n # Third party apps\n 'rest_framework', # utilities for rest apis\n 'rest_framework.authtoken', # token authentication\n 'django_rq', # asynchronous queuing\n 'versatileimagefield', # image manipulation\n\n # Your apps\n 'authentication',\n 'users'\n\n )\n\n # https://docs.djangoproject.com/en/1.8/topics/http/middleware/\n MIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware'\n )\n\n ROOT_URLCONF = 'urls'\n\n TEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n ]\n\n SECRET_KEY = 'Not a secret'\n WSGI_APPLICATION = 'wsgi.application'\n\n # Allow for less strict handling of urls\n APPEND_SLASH = values.BooleanValue(True)\n\n # Migrations\n MIGRATION_MODULES = {\n 'sites': 'contrib.sites.migrations'\n }\n\n # Set DEBUG to False as a default for safety\n # https://docs.djangoproject.com/en/dev/ref/settings/#debug\n DEBUG = values.BooleanValue(False)\n for config in TEMPLATES:\n config['OPTIONS']['debug'] = DEBUG\n\n # Email\n EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend')\n\n MANAGERS = (\n (\"Author\", '{{cookiecutter.email}}'),\n )\n\n # Postgres\n DATABASES = values.DatabaseURLValue('postgres://localhost/{{cookiecutter.app_name}}')\n\n # General\n APPEND_SLASH = False\n TIME_ZONE = 'UTC'\n LANGUAGE_CODE = 'en-us'\n SITE_ID = 1\n # If you set this to False, Django will make some optimizations so as not\n # to load the internationalization machinery.\n USE_I18N = False\n USE_L10N = True\n USE_TZ = True\n LOGIN_REDIRECT_URL = '/'\n\n # Static Files\n STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles')\n STATIC_URL = '/static/'\n STATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n )\n\n # Media files\n MEDIA_ROOT = join(os.path.dirname(BASE_DIR), 'media')\n MEDIA_URL = '/media/'\n\n # Logging\n LOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n \"rq_console\": {\n \"format\": \"%(asctime)s %(message)s\",\n \"datefmt\": \"%H:%M:%S\",\n },\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n \"rq_console\": {\n \"level\": \"DEBUG\",\n \"class\": \"rq.utils.ColorizingStreamHandler\",\n \"formatter\": \"rq_console\",\n \"exclude\": [\"%(asctime)s\"],\n },\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True\n },\n \"rq.worker\": {\n \"handlers\": [\"rq_console\"],\n \"level\": \"DEBUG\"\n },\n }\n }\n\n # Custom user app\n AUTH_USER_MODEL = 'users.User'\n\n # Django Rest Framework\n REST_FRAMEWORK = {\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',\n 'PAGE_SIZE': 100,\n 'DATETIME_FORMAT': '%Y-%m-%dT%H:%M:%S%z',\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n ),\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.IsAuthenticated',\n ],\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.SessionAuthentication',\n 'rest_framework.authentication.TokenAuthentication',\n )\n }\n\n # Versatile Image Field\n VERSATILEIMAGEFIELD_SETTINGS = {\n # The amount of time, in seconds, that references to created images\n # should be stored in the cache. Defaults to `2592000` (30 days)\n 'cache_length': 2592000,\n 'cache_name': 'versatileimagefield_cache',\n 'jpeg_resize_quality': 70,\n 'sized_directory_name': '__sized__',\n 'filtered_directory_name': '__filtered__',\n 'placeholder_directory_name': '__placeholder__',\n 'create_images_on_demand': False\n }\n", "path": "{{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/config/common.py" } ]
diff --git a/{{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/config/common.py b/{{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/config/common.py index 9c95c1466..afda9ce22 100755 --- a/{{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/config/common.py +++ b/{{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/config/common.py @@ -86,6 +86,7 @@ class Common(Configuration): DATABASES = values.DatabaseURLValue('postgres://localhost/{{cookiecutter.app_name}}') # General + APPEND_SLASH = False TIME_ZONE = 'UTC' LANGUAGE_CODE = 'en-us' SITE_ID = 1
MAKENTNU__web-204
Fix delete permissions for course registration
[ { "content": "import io\n\nimport xlsxwriter\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.views.generic import TemplateView, View, CreateView, UpdateView, DeleteView\n\nfrom make_queue.forms import Printer3DCourseForm\nfrom make_queue.models.course import Printer3DCourse\n\n\nclass CourseView(TemplateView):\n template_name = \"make_queue/course/course_panel.html\"\n\n def get_context_data(self, **kwargs):\n context_data = super().get_context_data(**kwargs)\n context_data.update({\n \"registrations\": Printer3DCourse.objects.order_by(\"name\"),\n \"possible_statuses\": Printer3DCourse.STATUS_CHOICES,\n })\n return context_data\n\n\nclass CreateRegistrationView(PermissionRequiredMixin, CreateView):\n is_next = False\n model = Printer3DCourse\n form_class = Printer3DCourseForm\n template_name = \"make_queue/course/registration_create.html\"\n permission_required = (\n \"make_queue.add_printer3dcourse\",\n )\n\n def get_context_data(self, **kwargs):\n context_data = super().get_context_data(**kwargs)\n if self.is_next:\n context_data[\"is_next\"] = True\n return context_data\n\n def get_success_url(self):\n return reverse(\"create_course_registration_success\")\n\n\nclass EditRegistrationView(PermissionRequiredMixin, UpdateView):\n model = Printer3DCourse\n form_class = Printer3DCourseForm\n template_name = \"make_queue/course/registration_edit.html\"\n permission_required = (\n \"make_queue.change_printer3dcourse\",\n )\n\n def get_success_url(self):\n return reverse(\"course_panel\")\n\n\nclass DeleteRegistrationView(PermissionRequiredMixin, DeleteView):\n model = Printer3DCourse\n permission_required = (\n \"make_queue.delete_printer3d_course\",\n )\n\n def get_success_url(self):\n return reverse(\"course_panel\")\n\n\nclass BulkStatusUpdate(View):\n \"\"\"\n Provides a method for bulk updating the status of course registrations\n \"\"\"\n\n def post(self, request):\n status = request.POST.get(\"status\")\n registrations = list(map(int, request.POST.getlist(\"users\")))\n Printer3DCourse.objects.filter(pk__in=registrations).update(status=status)\n\n return redirect(\"course_panel\")\n\n\nclass CourseXLSXView(View):\n\n def post(self, request):\n search_string = request.POST.get(\"search_text\")\n status_filter = request.POST.get(\"status_filter\")\n\n course_registrations = Printer3DCourse.objects.filter(\n Q(username__icontains=search_string) | Q(name__icontains=search_string), status__icontains=status_filter)\n\n # Use an in-memory output file, to avoid having to clean up the disk\n output_file = io.BytesIO()\n\n workbook = xlsxwriter.Workbook(output_file, {\"in_memory\": True})\n worksheet = workbook.add_worksheet(\"Kursdeltagere\")\n\n # Styles\n format_header = workbook.add_format({\n \"bold\": True,\n \"font_size\": 10,\n \"font_name\": \"Arial\",\n \"font_color\": \"#000000\",\n \"bg_color\": \"#f8c700\",\n \"border\": 1,\n \"border_color\": \"#000000\",\n })\n\n format_row = workbook.add_format({\n \"font_size\": 10,\n \"font_name\": \"Arial\",\n \"font_color\": \"#000000\",\n \"bg_color\": \"#fff2cc\",\n \"border\": 1,\n \"border_color\": \"#000000\",\n })\n\n # Set column width\n worksheet.set_column(\"A:A\", 40)\n worksheet.set_column(\"B:B\", 20)\n worksheet.set_column(\"C:C\", 15)\n worksheet.set_column(\"D:D\", 10)\n\n # Header\n worksheet.write(0, 0, \"Navn\", format_header)\n worksheet.write(0, 1, \"Brukernavn\", format_header)\n worksheet.write(0, 2, \"Kortnummer\", format_header)\n worksheet.write(0, 3, \"Dato\", format_header)\n\n for index, registration in enumerate(course_registrations):\n worksheet.write(index + 1, 0, registration.name, format_row)\n worksheet.write(index + 1, 1, registration.username, format_row)\n worksheet.write(index + 1, 2, registration.card_number if registration.card_number is not None else \"\",\n format_row)\n worksheet.write(index + 1, 3, registration.date.strftime(\"%Y-%m-%d\"), format_row)\n\n workbook.close()\n output_file.seek(0)\n\n response = HttpResponse(output_file.read(),\n content_type=\"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\")\n\n response['Content-Disposition'] = 'attachment; filename=\"Kursdeltagere.xlsx\"'\n\n return response\n", "path": "make_queue/views/admin/course.py" } ]
[ { "content": "import io\n\nimport xlsxwriter\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.views.generic import TemplateView, View, CreateView, UpdateView, DeleteView\n\nfrom make_queue.forms import Printer3DCourseForm\nfrom make_queue.models.course import Printer3DCourse\n\n\nclass CourseView(TemplateView):\n template_name = \"make_queue/course/course_panel.html\"\n\n def get_context_data(self, **kwargs):\n context_data = super().get_context_data(**kwargs)\n context_data.update({\n \"registrations\": Printer3DCourse.objects.order_by(\"name\"),\n \"possible_statuses\": Printer3DCourse.STATUS_CHOICES,\n })\n return context_data\n\n\nclass CreateRegistrationView(PermissionRequiredMixin, CreateView):\n is_next = False\n model = Printer3DCourse\n form_class = Printer3DCourseForm\n template_name = \"make_queue/course/registration_create.html\"\n permission_required = (\n \"make_queue.add_printer3dcourse\",\n )\n\n def get_context_data(self, **kwargs):\n context_data = super().get_context_data(**kwargs)\n if self.is_next:\n context_data[\"is_next\"] = True\n return context_data\n\n def get_success_url(self):\n return reverse(\"create_course_registration_success\")\n\n\nclass EditRegistrationView(PermissionRequiredMixin, UpdateView):\n model = Printer3DCourse\n form_class = Printer3DCourseForm\n template_name = \"make_queue/course/registration_edit.html\"\n permission_required = (\n \"make_queue.change_printer3dcourse\",\n )\n\n def get_success_url(self):\n return reverse(\"course_panel\")\n\n\nclass DeleteRegistrationView(PermissionRequiredMixin, DeleteView):\n model = Printer3DCourse\n permission_required = (\n \"make_queue.delete_printer3dcourse\",\n )\n\n def get_success_url(self):\n return reverse(\"course_panel\")\n\n\nclass BulkStatusUpdate(View):\n \"\"\"\n Provides a method for bulk updating the status of course registrations\n \"\"\"\n\n def post(self, request):\n status = request.POST.get(\"status\")\n registrations = list(map(int, request.POST.getlist(\"users\")))\n Printer3DCourse.objects.filter(pk__in=registrations).update(status=status)\n\n return redirect(\"course_panel\")\n\n\nclass CourseXLSXView(View):\n\n def post(self, request):\n search_string = request.POST.get(\"search_text\")\n status_filter = request.POST.get(\"status_filter\")\n\n course_registrations = Printer3DCourse.objects.filter(\n Q(username__icontains=search_string) | Q(name__icontains=search_string), status__icontains=status_filter)\n\n # Use an in-memory output file, to avoid having to clean up the disk\n output_file = io.BytesIO()\n\n workbook = xlsxwriter.Workbook(output_file, {\"in_memory\": True})\n worksheet = workbook.add_worksheet(\"Kursdeltagere\")\n\n # Styles\n format_header = workbook.add_format({\n \"bold\": True,\n \"font_size\": 10,\n \"font_name\": \"Arial\",\n \"font_color\": \"#000000\",\n \"bg_color\": \"#f8c700\",\n \"border\": 1,\n \"border_color\": \"#000000\",\n })\n\n format_row = workbook.add_format({\n \"font_size\": 10,\n \"font_name\": \"Arial\",\n \"font_color\": \"#000000\",\n \"bg_color\": \"#fff2cc\",\n \"border\": 1,\n \"border_color\": \"#000000\",\n })\n\n # Set column width\n worksheet.set_column(\"A:A\", 40)\n worksheet.set_column(\"B:B\", 20)\n worksheet.set_column(\"C:C\", 15)\n worksheet.set_column(\"D:D\", 10)\n\n # Header\n worksheet.write(0, 0, \"Navn\", format_header)\n worksheet.write(0, 1, \"Brukernavn\", format_header)\n worksheet.write(0, 2, \"Kortnummer\", format_header)\n worksheet.write(0, 3, \"Dato\", format_header)\n\n for index, registration in enumerate(course_registrations):\n worksheet.write(index + 1, 0, registration.name, format_row)\n worksheet.write(index + 1, 1, registration.username, format_row)\n worksheet.write(index + 1, 2, registration.card_number if registration.card_number is not None else \"\",\n format_row)\n worksheet.write(index + 1, 3, registration.date.strftime(\"%Y-%m-%d\"), format_row)\n\n workbook.close()\n output_file.seek(0)\n\n response = HttpResponse(output_file.read(),\n content_type=\"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\")\n\n response['Content-Disposition'] = 'attachment; filename=\"Kursdeltagere.xlsx\"'\n\n return response\n", "path": "make_queue/views/admin/course.py" } ]
diff --git a/make_queue/templates/make_queue/course/course_panel.html b/make_queue/templates/make_queue/course/course_panel.html index d9fe53cee..c83f0bca7 100644 --- a/make_queue/templates/make_queue/course/course_panel.html +++ b/make_queue/templates/make_queue/course/course_panel.html @@ -105,7 +105,7 @@ <h4> <a href="{% url "edit_course_registration" registration.pk %}"> <i class="ui yellow pencil icon"></i> </a> - {% if perms.delete_printer3dcourse %} + {% if perms.make_queue.delete_printer3dcourse %} <a class="delete confirm" href="{% url "delete_course_registration" registration.pk %}"> <i class="ui red trash icon"></i> </a> diff --git a/make_queue/views/admin/course.py b/make_queue/views/admin/course.py index 4abae218c..f1aa7e57b 100644 --- a/make_queue/views/admin/course.py +++ b/make_queue/views/admin/course.py @@ -58,7 +58,7 @@ def get_success_url(self): class DeleteRegistrationView(PermissionRequiredMixin, DeleteView): model = Printer3DCourse permission_required = ( - "make_queue.delete_printer3d_course", + "make_queue.delete_printer3dcourse", ) def get_success_url(self):
ivy-llc__ivy-17429
empty_like
[ { "content": "# global\r\nimport ivy\r\nfrom ivy.func_wrapper import with_unsupported_dtypes\r\nfrom .tensor import Tensor\r\nfrom ivy.functional.frontends.paddle.func_wrapper import (\r\n to_ivy_arrays_and_back,\r\n)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True):\r\n array = ivy.array(data, dtype=dtype, device=place)\r\n return Tensor(array, dtype=dtype, place=place)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.4.2 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef ones(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.ones(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.4.2 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef ones_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.ones_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.4.2 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef zeros(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.zeros(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.4.2 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef zeros_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.zeros_like(x, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full(shape, fill_value, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.full(shape, fill_value, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full_like(x, fill_value, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.full_like(x, fill_value, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef arange(start, end=None, step=1, dtype=None, name=None):\r\n return ivy.arange(start, end, step=step, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty(shape, dtype=None):\r\n return ivy.empty(shape=shape, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef eye(num_rows, num_columns=None, dtype=None, name=None):\r\n return ivy.eye(num_rows, num_columns, dtype=dtype)\r\n", "path": "ivy/functional/frontends/paddle/tensor/creation.py" } ]
[ { "content": "# global\r\nimport ivy\r\nfrom ivy.func_wrapper import with_unsupported_dtypes\r\nfrom .tensor import Tensor\r\nfrom ivy.functional.frontends.paddle.func_wrapper import (\r\n to_ivy_arrays_and_back,\r\n)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True):\r\n array = ivy.array(data, dtype=dtype, device=place)\r\n return Tensor(array, dtype=dtype, place=place)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.4.2 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef ones(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.ones(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.4.2 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef ones_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.ones_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.4.2 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef zeros(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.zeros(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.4.2 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef zeros_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.zeros_like(x, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full(shape, fill_value, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.full(shape, fill_value, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full_like(x, fill_value, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.full_like(x, fill_value, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef arange(start, end=None, step=1, dtype=None, name=None):\r\n return ivy.arange(start, end, step=step, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty(shape, dtype=None):\r\n return ivy.empty(shape=shape, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef eye(num_rows, num_columns=None, dtype=None, name=None):\r\n return ivy.eye(num_rows, num_columns, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty_like(x, dtype=None, name=None):\r\n return ivy.empty_like(x, dtype=dtype)\r\n", "path": "ivy/functional/frontends/paddle/tensor/creation.py" } ]
diff --git a/ivy/functional/frontends/paddle/tensor/creation.py b/ivy/functional/frontends/paddle/tensor/creation.py index 43939f0aab490..da5b22abd7a9a 100644 --- a/ivy/functional/frontends/paddle/tensor/creation.py +++ b/ivy/functional/frontends/paddle/tensor/creation.py @@ -71,3 +71,8 @@ def empty(shape, dtype=None): @to_ivy_arrays_and_back def eye(num_rows, num_columns=None, dtype=None, name=None): return ivy.eye(num_rows, num_columns, dtype=dtype) + + +@to_ivy_arrays_and_back +def empty_like(x, dtype=None, name=None): + return ivy.empty_like(x, dtype=dtype) diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_creation.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_creation.py index e19ce390bb7c7..d534514fed0cb 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_creation.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_creation.py @@ -332,3 +332,31 @@ def test_paddle_eye( num_columns=num_columns, dtype=dtypes[0], ) + + +# empty_like +@handle_frontend_test( + fn_tree="paddle.empty_like", + dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")), + dtype=helpers.get_dtypes("valid", full=False), + test_with_out=st.just(False), +) +def test_paddle_empty_like( + dtype_and_x, + dtype, + test_flags, + frontend, + fn_tree, + on_device, +): + input_dtype, x = dtype_and_x + helpers.test_frontend_function( + input_dtypes=input_dtype, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + test_values=False, + x=x[0], + dtype=dtype[0], + )
pyinstaller__pyinstaller-2225
missing hidden import for skimage When packaging an application that imports skimage.feature (and nothing else), the app would not run due to an ImportError on the "transform" module. This can be fixed by adding one item to the hiddenimports in hook-skimage.transform.py file (bolded below): > hiddenimports = ['skimage.draw.draw', > 'skimage._shared.geometry', > 'skimage.filters.rank.core_cy', > **'skimage._shared.transform'**] > > datas = collect_data_files('skimage') PyInstaller 3.2, Windows 7 64 bit, Python 2.7.12, Anaconda 4.1.1 distribution.
[ { "content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2014-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nfrom PyInstaller.utils.hooks import collect_data_files\n\n# Hook tested with scikit-image (skimage) 0.9.3 on Mac OS 10.9 and Windows 7\n# 64-bit\nhiddenimports = ['skimage.draw.draw',\n 'skimage._shared.geometry',\n 'skimage.filters.rank.core_cy']\n\ndatas = collect_data_files('skimage')\n", "path": "PyInstaller/hooks/hook-skimage.transform.py" } ]
[ { "content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2014-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nfrom PyInstaller.utils.hooks import collect_data_files\n\n# Hook tested with scikit-image (skimage) 0.9.3 on Mac OS 10.9 and Windows 7\n# 64-bit\nhiddenimports = ['skimage.draw.draw',\n 'skimage._shared.geometry',\n 'skimage._shared.transform',\n 'skimage.filters.rank.core_cy']\n\ndatas = collect_data_files('skimage')\n", "path": "PyInstaller/hooks/hook-skimage.transform.py" } ]
diff --git a/PyInstaller/hooks/hook-skimage.transform.py b/PyInstaller/hooks/hook-skimage.transform.py index 8768c0c1f0..8c2b452094 100644 --- a/PyInstaller/hooks/hook-skimage.transform.py +++ b/PyInstaller/hooks/hook-skimage.transform.py @@ -12,6 +12,7 @@ # 64-bit hiddenimports = ['skimage.draw.draw', 'skimage._shared.geometry', + 'skimage._shared.transform', 'skimage.filters.rank.core_cy'] datas = collect_data_files('skimage')
cupy__cupy-3468
Remove mock from test requirements? I'm learning how to write mock tests, and I noticed things like `import mock` are workarounds to support PY27 and older PY3. Since CuPy now support PY35+ only and `mock` becomes part of the standard Python library, I suppose this line is no longer needed: https://github.com/cupy/cupy/blob/74dcb4172578a0771e06f4e44b10b5f73f68fb59/setup.py#L39 and all `import mock` can be replaced by `from unittest import mock`?
[ { "content": "#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup, find_packages\nimport sys\n\nimport cupy_setup_build\n\n\nif sys.version_info[:3] == (3, 5, 0):\n if not int(os.getenv('CUPY_PYTHON_350_FORCE', '0')):\n msg = \"\"\"\nCuPy does not work with Python 3.5.0.\n\nWe strongly recommend to use another version of Python.\nIf you want to use CuPy with Python 3.5.0 at your own risk,\nset 1 to CUPY_PYTHON_350_FORCE environment variable.\"\"\"\n print(msg)\n sys.exit(1)\n\n\nrequirements = {\n 'setup': [\n 'fastrlock>=0.3',\n ],\n 'install': [\n 'numpy>=1.15',\n 'fastrlock>=0.3',\n ],\n 'stylecheck': [\n 'autopep8==1.3.5',\n 'flake8==3.5.0',\n 'pbr==4.0.4',\n 'pycodestyle==2.3.1',\n ],\n 'test': [\n 'pytest<4.2.0', # 4.2.0 is slow collecting tests and times out on CI.\n 'attrs<19.2.0', # pytest 4.1.1 does not run with attrs==19.2.0\n 'mock',\n ],\n 'doctest': [\n 'matplotlib',\n 'optuna',\n ],\n 'docs': [\n 'sphinx==3.0.4',\n 'sphinx_rtd_theme',\n ],\n 'travis': [\n '-r stylecheck',\n '-r docs',\n ],\n 'appveyor': [\n '-r test',\n ],\n 'jenkins': [\n '-r test',\n 'pytest-timeout',\n 'pytest-cov',\n 'coveralls',\n 'codecov',\n ],\n}\n\n\ndef reduce_requirements(key):\n # Resolve recursive requirements notation (-r)\n reqs = requirements[key]\n resolved_reqs = []\n for req in reqs:\n if req.startswith('-r'):\n depend_key = req[2:].lstrip()\n reduce_requirements(depend_key)\n resolved_reqs += requirements[depend_key]\n else:\n resolved_reqs.append(req)\n requirements[key] = resolved_reqs\n\n\nfor k in requirements.keys():\n reduce_requirements(k)\n\n\nextras_require = {k: v for k, v in requirements.items() if k != 'install'}\n\n\nsetup_requires = requirements['setup']\ninstall_requires = requirements['install']\ntests_require = requirements['test']\n\n\npackage_data = {\n 'cupy': [\n 'core/include/cupy/complex/arithmetic.h',\n 'core/include/cupy/complex/catrig.h',\n 'core/include/cupy/complex/catrigf.h',\n 'core/include/cupy/complex/ccosh.h',\n 'core/include/cupy/complex/ccoshf.h',\n 'core/include/cupy/complex/cexp.h',\n 'core/include/cupy/complex/cexpf.h',\n 'core/include/cupy/complex/clog.h',\n 'core/include/cupy/complex/clogf.h',\n 'core/include/cupy/complex/complex.h',\n 'core/include/cupy/complex/complex_inl.h',\n 'core/include/cupy/complex/cpow.h',\n 'core/include/cupy/complex/cproj.h',\n 'core/include/cupy/complex/csinh.h',\n 'core/include/cupy/complex/csinhf.h',\n 'core/include/cupy/complex/csqrt.h',\n 'core/include/cupy/complex/csqrtf.h',\n 'core/include/cupy/complex/ctanh.h',\n 'core/include/cupy/complex/ctanhf.h',\n 'core/include/cupy/complex/math_private.h',\n 'core/include/cupy/carray.cuh',\n 'core/include/cupy/complex.cuh',\n 'core/include/cupy/atomics.cuh',\n 'core/include/cupy/cuComplex_bridge.h',\n 'core/include/cupy/_cuda/cuda-*/*.h',\n 'core/include/cupy/_cuda/cuda-*/*.hpp',\n 'cuda/cupy_thrust.cu',\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs()\n\npackage_name = cupy_setup_build.get_package_name()\nlong_description = cupy_setup_build.get_long_description()\next_modules = cupy_setup_build.get_ext_modules()\nbuild_ext = cupy_setup_build.custom_build_ext\nsdist = cupy_setup_build.sdist_with_cython\n\nhere = os.path.abspath(os.path.dirname(__file__))\n# Get __version__ variable\nexec(open(os.path.join(here, 'cupy', '_version.py')).read())\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.5\nProgramming Language :: Python :: 3.6\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: Microsoft :: Windows\nOperating System :: POSIX\nOperating System :: MacOS\n\"\"\"\n\n\nsetup(\n name=package_name,\n version=__version__, # NOQA\n description='CuPy: NumPy-like API accelerated with CUDA',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n url='https://cupy.chainer.org/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs-cupy.chainer.org/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.5.0',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': build_ext,\n 'sdist': sdist},\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup, find_packages\nimport sys\n\nimport cupy_setup_build\n\n\nif sys.version_info[:3] == (3, 5, 0):\n if not int(os.getenv('CUPY_PYTHON_350_FORCE', '0')):\n msg = \"\"\"\nCuPy does not work with Python 3.5.0.\n\nWe strongly recommend to use another version of Python.\nIf you want to use CuPy with Python 3.5.0 at your own risk,\nset 1 to CUPY_PYTHON_350_FORCE environment variable.\"\"\"\n print(msg)\n sys.exit(1)\n\n\nrequirements = {\n 'setup': [\n 'fastrlock>=0.3',\n ],\n 'install': [\n 'numpy>=1.15',\n 'fastrlock>=0.3',\n ],\n 'stylecheck': [\n 'autopep8==1.3.5',\n 'flake8==3.5.0',\n 'pbr==4.0.4',\n 'pycodestyle==2.3.1',\n ],\n 'test': [\n 'pytest<4.2.0', # 4.2.0 is slow collecting tests and times out on CI.\n 'attrs<19.2.0', # pytest 4.1.1 does not run with attrs==19.2.0\n ],\n 'doctest': [\n 'matplotlib',\n 'optuna',\n ],\n 'docs': [\n 'sphinx==3.0.4',\n 'sphinx_rtd_theme',\n ],\n 'travis': [\n '-r stylecheck',\n '-r docs',\n ],\n 'appveyor': [\n '-r test',\n ],\n 'jenkins': [\n '-r test',\n 'pytest-timeout',\n 'pytest-cov',\n 'coveralls',\n 'codecov',\n ],\n}\n\n\ndef reduce_requirements(key):\n # Resolve recursive requirements notation (-r)\n reqs = requirements[key]\n resolved_reqs = []\n for req in reqs:\n if req.startswith('-r'):\n depend_key = req[2:].lstrip()\n reduce_requirements(depend_key)\n resolved_reqs += requirements[depend_key]\n else:\n resolved_reqs.append(req)\n requirements[key] = resolved_reqs\n\n\nfor k in requirements.keys():\n reduce_requirements(k)\n\n\nextras_require = {k: v for k, v in requirements.items() if k != 'install'}\n\n\nsetup_requires = requirements['setup']\ninstall_requires = requirements['install']\ntests_require = requirements['test']\n\n\npackage_data = {\n 'cupy': [\n 'core/include/cupy/complex/arithmetic.h',\n 'core/include/cupy/complex/catrig.h',\n 'core/include/cupy/complex/catrigf.h',\n 'core/include/cupy/complex/ccosh.h',\n 'core/include/cupy/complex/ccoshf.h',\n 'core/include/cupy/complex/cexp.h',\n 'core/include/cupy/complex/cexpf.h',\n 'core/include/cupy/complex/clog.h',\n 'core/include/cupy/complex/clogf.h',\n 'core/include/cupy/complex/complex.h',\n 'core/include/cupy/complex/complex_inl.h',\n 'core/include/cupy/complex/cpow.h',\n 'core/include/cupy/complex/cproj.h',\n 'core/include/cupy/complex/csinh.h',\n 'core/include/cupy/complex/csinhf.h',\n 'core/include/cupy/complex/csqrt.h',\n 'core/include/cupy/complex/csqrtf.h',\n 'core/include/cupy/complex/ctanh.h',\n 'core/include/cupy/complex/ctanhf.h',\n 'core/include/cupy/complex/math_private.h',\n 'core/include/cupy/carray.cuh',\n 'core/include/cupy/complex.cuh',\n 'core/include/cupy/atomics.cuh',\n 'core/include/cupy/cuComplex_bridge.h',\n 'core/include/cupy/_cuda/cuda-*/*.h',\n 'core/include/cupy/_cuda/cuda-*/*.hpp',\n 'cuda/cupy_thrust.cu',\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs()\n\npackage_name = cupy_setup_build.get_package_name()\nlong_description = cupy_setup_build.get_long_description()\next_modules = cupy_setup_build.get_ext_modules()\nbuild_ext = cupy_setup_build.custom_build_ext\nsdist = cupy_setup_build.sdist_with_cython\n\nhere = os.path.abspath(os.path.dirname(__file__))\n# Get __version__ variable\nexec(open(os.path.join(here, 'cupy', '_version.py')).read())\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.5\nProgramming Language :: Python :: 3.6\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: Microsoft :: Windows\nOperating System :: POSIX\nOperating System :: MacOS\n\"\"\"\n\n\nsetup(\n name=package_name,\n version=__version__, # NOQA\n description='CuPy: NumPy-like API accelerated with CUDA',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n url='https://cupy.chainer.org/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs-cupy.chainer.org/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.5.0',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': build_ext,\n 'sdist': sdist},\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 2dba2314e41..dfa680ee986 100644 --- a/setup.py +++ b/setup.py @@ -36,7 +36,6 @@ 'test': [ 'pytest<4.2.0', # 4.2.0 is slow collecting tests and times out on CI. 'attrs<19.2.0', # pytest 4.1.1 does not run with attrs==19.2.0 - 'mock', ], 'doctest': [ 'matplotlib', diff --git a/tests/cupy_tests/core_tests/fusion_tests/test_function.py b/tests/cupy_tests/core_tests/fusion_tests/test_function.py index 79e5acaee75..6ca8cb57659 100644 --- a/tests/cupy_tests/core_tests/fusion_tests/test_function.py +++ b/tests/cupy_tests/core_tests/fusion_tests/test_function.py @@ -1,7 +1,6 @@ import threading import unittest - -import mock +from unittest import mock import cupy from cupy import testing @@ -200,7 +199,7 @@ def check(self, xp, func, expected_name, is_elementwise): with mock.patch(target_full_name) as kernel: func(a, b, c) - kernel.assert_called_once() + assert kernel.call_count == 1 self.assertEqual(kernel.call_args[1]['name'], expected_name) # Test there's no error in computation (without mock) diff --git a/tests/cupy_tests/cuda_tests/test_compiler.py b/tests/cupy_tests/cuda_tests/test_compiler.py index 59aab999232..6d8a21cdd1c 100644 --- a/tests/cupy_tests/cuda_tests/test_compiler.py +++ b/tests/cupy_tests/cuda_tests/test_compiler.py @@ -1,7 +1,6 @@ import pickle import unittest - -import mock +from unittest import mock import cupy from cupy.cuda import compiler diff --git a/tests/cupy_tests/cuda_tests/test_profile.py b/tests/cupy_tests/cuda_tests/test_profile.py index 626128877a0..1467822a76c 100644 --- a/tests/cupy_tests/cuda_tests/test_profile.py +++ b/tests/cupy_tests/cuda_tests/test_profile.py @@ -1,6 +1,5 @@ import unittest - -import mock +from unittest import mock from cupy import cuda diff --git a/tests/cupy_tests/prof_tests/test_range.py b/tests/cupy_tests/prof_tests/test_range.py index f1ab075108a..4cec205cfb3 100644 --- a/tests/cupy_tests/prof_tests/test_range.py +++ b/tests/cupy_tests/prof_tests/test_range.py @@ -1,6 +1,5 @@ import unittest - -import mock +from unittest import mock from cupy import cuda from cupy import prof diff --git a/tests/cupy_tests/random_tests/test_sample.py b/tests/cupy_tests/random_tests/test_sample.py index b99627e609c..d3dacb887a3 100644 --- a/tests/cupy_tests/random_tests/test_sample.py +++ b/tests/cupy_tests/random_tests/test_sample.py @@ -1,5 +1,5 @@ -import mock import unittest +from unittest import mock import numpy diff --git a/tests/cupy_tests/test_init.py b/tests/cupy_tests/test_init.py index ea06ef0f63a..c8e01086871 100644 --- a/tests/cupy_tests/test_init.py +++ b/tests/cupy_tests/test_init.py @@ -4,8 +4,8 @@ import sys import tempfile import unittest +from unittest import mock -import mock import numpy import cupy diff --git a/tests/cupyx_tests/scipy_tests/sparse_tests/test_construct.py b/tests/cupyx_tests/scipy_tests/sparse_tests/test_construct.py index 55cca5df142..951dae01917 100644 --- a/tests/cupyx_tests/scipy_tests/sparse_tests/test_construct.py +++ b/tests/cupyx_tests/scipy_tests/sparse_tests/test_construct.py @@ -1,7 +1,7 @@ import re import unittest +from unittest import mock -import mock import numpy import pytest try: diff --git a/tests/cupyx_tests/test_optimize.py b/tests/cupyx_tests/test_optimize.py index 0e33d013a7f..28927f62e40 100644 --- a/tests/cupyx_tests/test_optimize.py +++ b/tests/cupyx_tests/test_optimize.py @@ -1,7 +1,8 @@ -import mock -import pytest import tempfile import unittest +from unittest import mock + +import pytest import cupy from cupy import testing diff --git a/tests/cupyx_tests/test_runtime.py b/tests/cupyx_tests/test_runtime.py index bdaed711ef7..0df56e7e757 100644 --- a/tests/cupyx_tests/test_runtime.py +++ b/tests/cupyx_tests/test_runtime.py @@ -1,6 +1,5 @@ import unittest - -import mock +from unittest import mock import cupy import cupyx diff --git a/tests/cupyx_tests/test_time.py b/tests/cupyx_tests/test_time.py index bccf78c99fc..79973a35fa1 100644 --- a/tests/cupyx_tests/test_time.py +++ b/tests/cupyx_tests/test_time.py @@ -1,5 +1,5 @@ -import mock import unittest +from unittest import mock import numpy
mkdocs__mkdocs-904
Error while executing gh-deploy I've successfully deployed a MkDocs site using the gh-deploy command. When I try to deploy some additional changes to my master branch, I get the following error: ``` c:\docs>mkdocs gh-deploy --clean INFO - Cleaning site directory INFO - Building documentation to directory: c:\docs\site INFO - Copying 'c:\docs\site' to 'gh-pages' branch and pushing to GitHub. Traceback (most recent call last): File "C:\Python34\lib\runpy.py", line 170, in _run_module_as_main "__main__", mod_spec) File "C:\Python34\lib\runpy.py", line 85, in _run_code exec(code, run_globals) File "c:\Python34\Scripts\mkdocs.exe\__main__.py", line 9, in <module> File "C:\Python34\lib\site-packages\click\core.py", line 664, in __call__ return self.main(*args, **kwargs) File "C:\Python34\lib\site-packages\click\core.py", line 644, in main rv = self.invoke(ctx) File "C:\Python34\lib\site-packages\click\core.py", line 991, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "C:\Python34\lib\site-packages\click\core.py", line 837, in invoke return ctx.invoke(self.callback, **ctx.params) File "C:\Python34\lib\site-packages\click\core.py", line 464, in invoke return callback(*args, **kwargs) File "C:\Python34\lib\site-packages\mkdocs\cli.py", line 186, in gh_deploy_command gh_deploy.gh_deploy(config, message=message) File "C:\Python34\lib\site-packages\mkdocs\gh_deploy.py", line 69, in gh_deploy remote_branch) File "C:\Python34\lib\site-packages\mkdocs\utils\ghp_import.py", line 163, in ghp_import if not try_rebase(remote, branch): File "C:\Python34\lib\site-packages\mkdocs\utils\ghp_import.py", line 78, in try_rebase if sp.call(cmd) != 0: File "C:\Python34\lib\subprocess.py", line 537, in call with Popen(*popenargs, **kwargs) as p: File "C:\Python34\lib\subprocess.py", line 859, in __init__ restore_signals, start_new_session) File "C:\Python34\lib\subprocess.py", line 1086, in _execute_child args = list2cmdline(args) File "C:\Python34\lib\subprocess.py", line 663, in list2cmdline needquote = (" " in arg) or ("\t" in arg) or not arg TypeError: 'str' does not support the buffer interface ```
[ { "content": "#! /usr/bin/env python\n#\n# This file is part of the ghp-import package released under\n# the Tumbolia Public License.\n\n# Tumbolia Public License\n\n# Copyright 2013, Paul Davis <[email protected]>\n\n# Copying and distribution of this file, with or without modification, are\n# permitted in any medium without royalty provided the copyright notice and this\n# notice are preserved.\n\n# TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\n\n# 0. opan saurce LOL\n\nfrom __future__ import unicode_literals\n\nimport errno\nimport logging\nimport os\nimport subprocess as sp\nimport sys\nimport time\nimport unicodedata\n\nlog = logging.getLogger(__name__)\n\n\nif sys.version_info[0] == 3:\n def enc(text):\n if isinstance(text, bytes):\n return text\n return text.encode()\n\n def dec(text):\n if isinstance(text, bytes):\n return text.decode('utf-8')\n return text\n\n def write(pipe, data):\n try:\n pipe.stdin.write(data)\n except IOError as e:\n if e.errno != errno.EPIPE:\n raise\nelse:\n def enc(text):\n if isinstance(text, unicode):\n return text.encode('utf-8')\n return text\n\n def dec(text):\n if isinstance(text, unicode):\n return text\n return text.decode('utf-8')\n\n def write(pipe, data):\n pipe.stdin.write(data)\n\n\ndef normalize_path(path):\n # Fix unicode pathnames on OS X\n # See: http://stackoverflow.com/a/5582439/44289\n if sys.platform == \"darwin\":\n return unicodedata.normalize(\"NFKC\", dec(path))\n return path\n\n\ndef try_rebase(remote, branch):\n cmd = ['git', 'rev-list', '--max-count=1', '%s/%s' % (remote, branch)]\n p = sp.Popen(cmd, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE)\n (rev, _) = p.communicate()\n if p.wait() != 0:\n return True\n cmd = ['git', 'update-ref', 'refs/heads/%s' % branch, rev.strip()]\n if sp.call(cmd) != 0:\n return False\n return True\n\n\ndef get_config(key):\n p = sp.Popen(['git', 'config', key], stdin=sp.PIPE, stdout=sp.PIPE)\n (value, _) = p.communicate()\n return value.decode('utf-8').strip()\n\n\ndef get_prev_commit(branch):\n cmd = ['git', 'rev-list', '--max-count=1', branch, '--']\n p = sp.Popen(cmd, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE)\n (rev, _) = p.communicate()\n if p.wait() != 0:\n return None\n return rev.decode('utf-8').strip()\n\n\ndef mk_when(timestamp=None):\n if timestamp is None:\n timestamp = int(time.time())\n currtz = \"%+05d\" % (-1 * time.timezone / 36) # / 3600 * 100\n return \"%s %s\" % (timestamp, currtz)\n\n\ndef start_commit(pipe, branch, message):\n uname = dec(get_config(\"user.name\"))\n email = dec(get_config(\"user.email\"))\n write(pipe, enc('commit refs/heads/%s\\n' % branch))\n write(pipe, enc('committer %s <%s> %s\\n' % (uname, email, mk_when())))\n write(pipe, enc('data %d\\n%s\\n' % (len(message), message)))\n head = get_prev_commit(branch)\n if head:\n write(pipe, enc('from %s\\n' % head))\n write(pipe, enc('deleteall\\n'))\n\n\ndef add_file(pipe, srcpath, tgtpath):\n with open(srcpath, \"rb\") as handle:\n if os.access(srcpath, os.X_OK):\n write(pipe, enc('M 100755 inline %s\\n' % tgtpath))\n else:\n write(pipe, enc('M 100644 inline %s\\n' % tgtpath))\n data = handle.read()\n write(pipe, enc('data %d\\n' % len(data)))\n write(pipe, enc(data))\n write(pipe, enc('\\n'))\n\n\ndef add_nojekyll(pipe):\n write(pipe, enc('M 100644 inline .nojekyll\\n'))\n write(pipe, enc('data 0\\n'))\n write(pipe, enc('\\n'))\n\n\ndef gitpath(fname):\n norm = os.path.normpath(fname)\n return \"/\".join(norm.split(os.path.sep))\n\n\ndef run_import(srcdir, branch, message, nojekyll):\n cmd = ['git', 'fast-import', '--date-format=raw', '--quiet']\n kwargs = {\"stdin\": sp.PIPE}\n if sys.version_info >= (3, 2, 0):\n kwargs[\"universal_newlines\"] = False\n pipe = sp.Popen(cmd, **kwargs)\n start_commit(pipe, branch, message)\n for path, _, fnames in os.walk(srcdir):\n for fn in fnames:\n fpath = os.path.join(path, fn)\n fpath = normalize_path(fpath)\n gpath = gitpath(os.path.relpath(fpath, start=srcdir))\n add_file(pipe, fpath, gpath)\n if nojekyll:\n add_nojekyll(pipe)\n write(pipe, enc('\\n'))\n pipe.stdin.close()\n if pipe.wait() != 0:\n sys.stdout.write(enc(\"Failed to process commit.\\n\"))\n\n\ndef ghp_import(directory, message, remote='origin', branch='gh-pages'):\n\n if not try_rebase(remote, branch):\n log.error(\"Failed to rebase %s branch.\", branch)\n\n nojekyll = True\n\n run_import(directory, branch, message, nojekyll)\n\n proc = sp.Popen(['git', 'push', remote, branch],\n stdout=sp.PIPE, stderr=sp.PIPE)\n proc.communicate()\n return proc.wait() == 0\n", "path": "mkdocs/utils/ghp_import.py" } ]
[ { "content": "#! /usr/bin/env python\n#\n# This file is part of the ghp-import package released under\n# the Tumbolia Public License.\n\n# Tumbolia Public License\n\n# Copyright 2013, Paul Davis <[email protected]>\n\n# Copying and distribution of this file, with or without modification, are\n# permitted in any medium without royalty provided the copyright notice and this\n# notice are preserved.\n\n# TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\n\n# 0. opan saurce LOL\n\nfrom __future__ import unicode_literals\n\nimport errno\nimport logging\nimport os\nimport subprocess as sp\nimport sys\nimport time\nimport unicodedata\n\nlog = logging.getLogger(__name__)\n\n\nif sys.version_info[0] == 3:\n def enc(text):\n if isinstance(text, bytes):\n return text\n return text.encode()\n\n def dec(text):\n if isinstance(text, bytes):\n return text.decode('utf-8')\n return text\n\n def write(pipe, data):\n try:\n pipe.stdin.write(data)\n except IOError as e:\n if e.errno != errno.EPIPE:\n raise\nelse:\n def enc(text):\n if isinstance(text, unicode):\n return text.encode('utf-8')\n return text\n\n def dec(text):\n if isinstance(text, unicode):\n return text\n return text.decode('utf-8')\n\n def write(pipe, data):\n pipe.stdin.write(data)\n\n\ndef normalize_path(path):\n # Fix unicode pathnames on OS X\n # See: http://stackoverflow.com/a/5582439/44289\n if sys.platform == \"darwin\":\n return unicodedata.normalize(\"NFKC\", dec(path))\n return path\n\n\ndef try_rebase(remote, branch):\n cmd = ['git', 'rev-list', '--max-count=1', '%s/%s' % (remote, branch)]\n p = sp.Popen(cmd, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE)\n (rev, _) = p.communicate()\n if p.wait() != 0:\n return True\n cmd = ['git', 'update-ref', 'refs/heads/%s' % branch, dec(rev.strip())]\n if sp.call(cmd) != 0:\n return False\n return True\n\n\ndef get_config(key):\n p = sp.Popen(['git', 'config', key], stdin=sp.PIPE, stdout=sp.PIPE)\n (value, _) = p.communicate()\n return value.decode('utf-8').strip()\n\n\ndef get_prev_commit(branch):\n cmd = ['git', 'rev-list', '--max-count=1', branch, '--']\n p = sp.Popen(cmd, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE)\n (rev, _) = p.communicate()\n if p.wait() != 0:\n return None\n return rev.decode('utf-8').strip()\n\n\ndef mk_when(timestamp=None):\n if timestamp is None:\n timestamp = int(time.time())\n currtz = \"%+05d\" % (-1 * time.timezone / 36) # / 3600 * 100\n return \"%s %s\" % (timestamp, currtz)\n\n\ndef start_commit(pipe, branch, message):\n uname = dec(get_config(\"user.name\"))\n email = dec(get_config(\"user.email\"))\n write(pipe, enc('commit refs/heads/%s\\n' % branch))\n write(pipe, enc('committer %s <%s> %s\\n' % (uname, email, mk_when())))\n write(pipe, enc('data %d\\n%s\\n' % (len(message), message)))\n head = get_prev_commit(branch)\n if head:\n write(pipe, enc('from %s\\n' % head))\n write(pipe, enc('deleteall\\n'))\n\n\ndef add_file(pipe, srcpath, tgtpath):\n with open(srcpath, \"rb\") as handle:\n if os.access(srcpath, os.X_OK):\n write(pipe, enc('M 100755 inline %s\\n' % tgtpath))\n else:\n write(pipe, enc('M 100644 inline %s\\n' % tgtpath))\n data = handle.read()\n write(pipe, enc('data %d\\n' % len(data)))\n write(pipe, enc(data))\n write(pipe, enc('\\n'))\n\n\ndef add_nojekyll(pipe):\n write(pipe, enc('M 100644 inline .nojekyll\\n'))\n write(pipe, enc('data 0\\n'))\n write(pipe, enc('\\n'))\n\n\ndef gitpath(fname):\n norm = os.path.normpath(fname)\n return \"/\".join(norm.split(os.path.sep))\n\n\ndef run_import(srcdir, branch, message, nojekyll):\n cmd = ['git', 'fast-import', '--date-format=raw', '--quiet']\n kwargs = {\"stdin\": sp.PIPE}\n if sys.version_info >= (3, 2, 0):\n kwargs[\"universal_newlines\"] = False\n pipe = sp.Popen(cmd, **kwargs)\n start_commit(pipe, branch, message)\n for path, _, fnames in os.walk(srcdir):\n for fn in fnames:\n fpath = os.path.join(path, fn)\n fpath = normalize_path(fpath)\n gpath = gitpath(os.path.relpath(fpath, start=srcdir))\n add_file(pipe, fpath, gpath)\n if nojekyll:\n add_nojekyll(pipe)\n write(pipe, enc('\\n'))\n pipe.stdin.close()\n if pipe.wait() != 0:\n sys.stdout.write(enc(\"Failed to process commit.\\n\"))\n\n\ndef ghp_import(directory, message, remote='origin', branch='gh-pages'):\n\n if not try_rebase(remote, branch):\n log.error(\"Failed to rebase %s branch.\", branch)\n\n nojekyll = True\n\n run_import(directory, branch, message, nojekyll)\n\n proc = sp.Popen(['git', 'push', remote, branch],\n stdout=sp.PIPE, stderr=sp.PIPE)\n proc.communicate()\n return proc.wait() == 0\n", "path": "mkdocs/utils/ghp_import.py" } ]
diff --git a/mkdocs/utils/ghp_import.py b/mkdocs/utils/ghp_import.py index c7cc85c091..d6f543563f 100644 --- a/mkdocs/utils/ghp_import.py +++ b/mkdocs/utils/ghp_import.py @@ -74,7 +74,7 @@ def try_rebase(remote, branch): (rev, _) = p.communicate() if p.wait() != 0: return True - cmd = ['git', 'update-ref', 'refs/heads/%s' % branch, rev.strip()] + cmd = ['git', 'update-ref', 'refs/heads/%s' % branch, dec(rev.strip())] if sp.call(cmd) != 0: return False return True
ansible-collections__community.aws-1207
ec2_customer_gateway: bgp_asn is not required ### Summary The ec2_customer_gateway module has incorrect documentation for the bgp_asn parameter. It says the ASN must be passed when state=present, but the code defaults to 25000 if the parameter is absent. See the ensure_cgw_present() method: ``` def ensure_cgw_present(self, bgp_asn, ip_address): if not bgp_asn: bgp_asn = 65000 response = self.ec2.create_customer_gateway( DryRun=False, Type='ipsec.1', PublicIp=ip_address, BgpAsn=bgp_asn, ) return response ### Issue Type Documentation Report ### Component Name ec2_customer_gateway ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.12.4] config file = None configured module search path = ['/home/neil/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/neil/.local/share/virtualenvs/community.aws-uRL047Ho/lib/python3.10/site-packages/ansible ansible collection location = /home/neil/.ansible/collections:/usr/share/ansible/collections executable location = /home/neil/.local/share/virtualenvs/community.aws-uRL047Ho/bin/ansible python version = 3.10.1 (main, Jan 10 2022, 00:00:00) [GCC 11.2.1 20211203 (Red Hat 11.2.1-7)] jinja version = 3.1.1 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment main branch, as of 2022-04-18. ### Additional Information Suggested rewording: ``` options: bgp_asn: description: - Border Gateway Protocol (BGP) Autonomous System Number (ASN), defaults to 25000. type: int ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
[ { "content": "#!/usr/bin/python\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: ec2_customer_gateway\nversion_added: 1.0.0\nshort_description: Manage an AWS customer gateway\ndescription:\n - Manage an AWS customer gateway.\nauthor: Michael Baydoun (@MichaelBaydoun)\nnotes:\n - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the\n first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent\n requests do not create new customer gateway resources.\n - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use\n customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.\noptions:\n bgp_asn:\n description:\n - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present).\n type: int\n ip_address:\n description:\n - Internet-routable IP address for customers gateway, must be a static address.\n required: true\n type: str\n name:\n description:\n - Name of the customer gateway.\n required: true\n type: str\n routing:\n description:\n - The type of routing.\n choices: ['static', 'dynamic']\n default: dynamic\n type: str\n state:\n description:\n - Create or terminate the Customer Gateway.\n default: present\n choices: [ 'present', 'absent' ]\n type: str\nextends_documentation_fragment:\n- amazon.aws.aws\n- amazon.aws.ec2\n\n'''\n\nEXAMPLES = '''\n- name: Create Customer Gateway\n community.aws.ec2_customer_gateway:\n bgp_asn: 12345\n ip_address: 1.2.3.4\n name: IndianapolisOffice\n region: us-east-1\n register: cgw\n\n- name: Delete Customer Gateway\n community.aws.ec2_customer_gateway:\n ip_address: 1.2.3.4\n name: IndianapolisOffice\n state: absent\n region: us-east-1\n register: cgw\n'''\n\nRETURN = '''\ngateway.customer_gateways:\n description: details about the gateway that was created.\n returned: success\n type: complex\n contains:\n bgp_asn:\n description: The Border Gateway Autonomous System Number.\n returned: when exists and gateway is available.\n sample: 65123\n type: str\n customer_gateway_id:\n description: gateway id assigned by amazon.\n returned: when exists and gateway is available.\n sample: cgw-cb6386a2\n type: str\n ip_address:\n description: ip address of your gateway device.\n returned: when exists and gateway is available.\n sample: 1.2.3.4\n type: str\n state:\n description: state of gateway.\n returned: when gateway exists and is available.\n sample: available\n type: str\n tags:\n description: Any tags on the gateway.\n returned: when gateway exists and is available, and when tags exist.\n type: list\n type:\n description: encryption type.\n returned: when gateway exists and is available.\n sample: ipsec.1\n type: str\n'''\n\ntry:\n import botocore\nexcept ImportError:\n pass # Handled by AnsibleAWSModule\n\nfrom ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry\n\n\nclass Ec2CustomerGatewayManager:\n\n def __init__(self, module):\n self.module = module\n\n try:\n self.ec2 = module.client('ec2')\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg='Failed to connect to AWS')\n\n @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState'])\n def ensure_cgw_absent(self, gw_id):\n response = self.ec2.delete_customer_gateway(\n DryRun=False,\n CustomerGatewayId=gw_id\n )\n return response\n\n def ensure_cgw_present(self, bgp_asn, ip_address):\n if not bgp_asn:\n bgp_asn = 65000\n response = self.ec2.create_customer_gateway(\n DryRun=False,\n Type='ipsec.1',\n PublicIp=ip_address,\n BgpAsn=bgp_asn,\n )\n return response\n\n def tag_cgw_name(self, gw_id, name):\n response = self.ec2.create_tags(\n DryRun=False,\n Resources=[\n gw_id,\n ],\n Tags=[\n {\n 'Key': 'Name',\n 'Value': name\n },\n ]\n )\n return response\n\n def describe_gateways(self, ip_address):\n response = self.ec2.describe_customer_gateways(\n DryRun=False,\n Filters=[\n {\n 'Name': 'state',\n 'Values': [\n 'available',\n ]\n },\n {\n 'Name': 'ip-address',\n 'Values': [\n ip_address,\n ]\n }\n ]\n )\n return response\n\n\ndef main():\n argument_spec = dict(\n bgp_asn=dict(required=False, type='int'),\n ip_address=dict(required=True),\n name=dict(required=True),\n routing=dict(default='dynamic', choices=['dynamic', 'static']),\n state=dict(default='present', choices=['present', 'absent']),\n )\n\n module = AnsibleAWSModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n required_if=[\n ('routing', 'dynamic', ['bgp_asn'])\n ]\n )\n\n gw_mgr = Ec2CustomerGatewayManager(module)\n\n name = module.params.get('name')\n\n existing = gw_mgr.describe_gateways(module.params['ip_address'])\n\n results = dict(changed=False)\n if module.params['state'] == 'present':\n if existing['CustomerGateways']:\n existing['CustomerGateway'] = existing['CustomerGateways'][0]\n results['gateway'] = existing\n if existing['CustomerGateway']['Tags']:\n tag_array = existing['CustomerGateway']['Tags']\n for key, value in enumerate(tag_array):\n if value['Key'] == 'Name':\n current_name = value['Value']\n if current_name != name:\n results['name'] = gw_mgr.tag_cgw_name(\n results['gateway']['CustomerGateway']['CustomerGatewayId'],\n module.params['name'],\n )\n results['changed'] = True\n else:\n if not module.check_mode:\n results['gateway'] = gw_mgr.ensure_cgw_present(\n module.params['bgp_asn'],\n module.params['ip_address'],\n )\n results['name'] = gw_mgr.tag_cgw_name(\n results['gateway']['CustomerGateway']['CustomerGatewayId'],\n module.params['name'],\n )\n results['changed'] = True\n\n elif module.params['state'] == 'absent':\n if existing['CustomerGateways']:\n existing['CustomerGateway'] = existing['CustomerGateways'][0]\n results['gateway'] = existing\n if not module.check_mode:\n results['gateway'] = gw_mgr.ensure_cgw_absent(\n existing['CustomerGateway']['CustomerGatewayId']\n )\n results['changed'] = True\n\n pretty_results = camel_dict_to_snake_dict(results)\n module.exit_json(**pretty_results)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/ec2_customer_gateway.py" } ]
[ { "content": "#!/usr/bin/python\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: ec2_customer_gateway\nversion_added: 1.0.0\nshort_description: Manage an AWS customer gateway\ndescription:\n - Manage an AWS customer gateway.\nauthor: Michael Baydoun (@MichaelBaydoun)\nnotes:\n - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the\n first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent\n requests do not create new customer gateway resources.\n - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use\n customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.\noptions:\n bgp_asn:\n description:\n - Border Gateway Protocol (BGP) Autonomous System Number (ASN).\n - Defaults to C(65000) if not specified when I(state=present).\n type: int\n ip_address:\n description:\n - Internet-routable IP address for customers gateway, must be a static address.\n required: true\n type: str\n name:\n description:\n - Name of the customer gateway.\n required: true\n type: str\n routing:\n description:\n - The type of routing.\n choices: ['static', 'dynamic']\n default: dynamic\n type: str\n state:\n description:\n - Create or terminate the Customer Gateway.\n default: present\n choices: [ 'present', 'absent' ]\n type: str\nextends_documentation_fragment:\n- amazon.aws.aws\n- amazon.aws.ec2\n\n'''\n\nEXAMPLES = '''\n- name: Create Customer Gateway\n community.aws.ec2_customer_gateway:\n bgp_asn: 12345\n ip_address: 1.2.3.4\n name: IndianapolisOffice\n region: us-east-1\n register: cgw\n\n- name: Delete Customer Gateway\n community.aws.ec2_customer_gateway:\n ip_address: 1.2.3.4\n name: IndianapolisOffice\n state: absent\n region: us-east-1\n register: cgw\n'''\n\nRETURN = '''\ngateway.customer_gateways:\n description: details about the gateway that was created.\n returned: success\n type: complex\n contains:\n bgp_asn:\n description: The Border Gateway Autonomous System Number.\n returned: when exists and gateway is available.\n sample: 65123\n type: str\n customer_gateway_id:\n description: gateway id assigned by amazon.\n returned: when exists and gateway is available.\n sample: cgw-cb6386a2\n type: str\n ip_address:\n description: ip address of your gateway device.\n returned: when exists and gateway is available.\n sample: 1.2.3.4\n type: str\n state:\n description: state of gateway.\n returned: when gateway exists and is available.\n sample: available\n type: str\n tags:\n description: Any tags on the gateway.\n returned: when gateway exists and is available, and when tags exist.\n type: list\n type:\n description: encryption type.\n returned: when gateway exists and is available.\n sample: ipsec.1\n type: str\n'''\n\ntry:\n import botocore\nexcept ImportError:\n pass # Handled by AnsibleAWSModule\n\nfrom ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry\n\n\nclass Ec2CustomerGatewayManager:\n\n def __init__(self, module):\n self.module = module\n\n try:\n self.ec2 = module.client('ec2')\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg='Failed to connect to AWS')\n\n @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState'])\n def ensure_cgw_absent(self, gw_id):\n response = self.ec2.delete_customer_gateway(\n DryRun=False,\n CustomerGatewayId=gw_id\n )\n return response\n\n def ensure_cgw_present(self, bgp_asn, ip_address):\n if not bgp_asn:\n bgp_asn = 65000\n response = self.ec2.create_customer_gateway(\n DryRun=False,\n Type='ipsec.1',\n PublicIp=ip_address,\n BgpAsn=bgp_asn,\n )\n return response\n\n def tag_cgw_name(self, gw_id, name):\n response = self.ec2.create_tags(\n DryRun=False,\n Resources=[\n gw_id,\n ],\n Tags=[\n {\n 'Key': 'Name',\n 'Value': name\n },\n ]\n )\n return response\n\n def describe_gateways(self, ip_address):\n response = self.ec2.describe_customer_gateways(\n DryRun=False,\n Filters=[\n {\n 'Name': 'state',\n 'Values': [\n 'available',\n ]\n },\n {\n 'Name': 'ip-address',\n 'Values': [\n ip_address,\n ]\n }\n ]\n )\n return response\n\n\ndef main():\n argument_spec = dict(\n bgp_asn=dict(required=False, type='int'),\n ip_address=dict(required=True),\n name=dict(required=True),\n routing=dict(default='dynamic', choices=['dynamic', 'static']),\n state=dict(default='present', choices=['present', 'absent']),\n )\n\n module = AnsibleAWSModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n required_if=[\n ('routing', 'dynamic', ['bgp_asn'])\n ]\n )\n\n gw_mgr = Ec2CustomerGatewayManager(module)\n\n name = module.params.get('name')\n\n existing = gw_mgr.describe_gateways(module.params['ip_address'])\n\n results = dict(changed=False)\n if module.params['state'] == 'present':\n if existing['CustomerGateways']:\n existing['CustomerGateway'] = existing['CustomerGateways'][0]\n results['gateway'] = existing\n if existing['CustomerGateway']['Tags']:\n tag_array = existing['CustomerGateway']['Tags']\n for key, value in enumerate(tag_array):\n if value['Key'] == 'Name':\n current_name = value['Value']\n if current_name != name:\n results['name'] = gw_mgr.tag_cgw_name(\n results['gateway']['CustomerGateway']['CustomerGatewayId'],\n module.params['name'],\n )\n results['changed'] = True\n else:\n if not module.check_mode:\n results['gateway'] = gw_mgr.ensure_cgw_present(\n module.params['bgp_asn'],\n module.params['ip_address'],\n )\n results['name'] = gw_mgr.tag_cgw_name(\n results['gateway']['CustomerGateway']['CustomerGatewayId'],\n module.params['name'],\n )\n results['changed'] = True\n\n elif module.params['state'] == 'absent':\n if existing['CustomerGateways']:\n existing['CustomerGateway'] = existing['CustomerGateways'][0]\n results['gateway'] = existing\n if not module.check_mode:\n results['gateway'] = gw_mgr.ensure_cgw_absent(\n existing['CustomerGateway']['CustomerGatewayId']\n )\n results['changed'] = True\n\n pretty_results = camel_dict_to_snake_dict(results)\n module.exit_json(**pretty_results)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/ec2_customer_gateway.py" } ]
diff --git a/plugins/modules/ec2_customer_gateway.py b/plugins/modules/ec2_customer_gateway.py index 9c00783a58a..f07e92f4f7c 100644 --- a/plugins/modules/ec2_customer_gateway.py +++ b/plugins/modules/ec2_customer_gateway.py @@ -23,7 +23,8 @@ options: bgp_asn: description: - - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present). + - Border Gateway Protocol (BGP) Autonomous System Number (ASN). + - Defaults to C(65000) if not specified when I(state=present). type: int ip_address: description:
biolab__orange3-text-358
Guardian: Fix failing tests on Travis <!-- This is an issue template. Please fill in the relevant details in the sections below. --> ##### Text version <!-- From menu _Options→Add-ons→Orange3-Text_ or code `orangecontrib.text.version.full_version` --> 0.3.0 ##### Orange version <!-- From menu _Help→About→Version_ or code `Orange.version.full_version` --> 3.15.dev ##### Expected behavior Tests pass. ##### Actual behavior Guardian tests is failing. ##### Steps to reproduce the behavior ##### Additional info (worksheets, data, screenshots, ...) Fix tests.
[ { "content": "\"\"\" This module fetches data from The Guardian API.\n\nTo use first create :class:`TheGuardianCredentials`:\n\n >>> from orangecontrib.text.guardian import TheGuardianCredentials\n >>> credentials = TheGuardianCredentials('<your-api-key>')\n\nThen create :class:`TheGuardianAPI` object and use it for searching:\n\n >>> from orangecontrib.text.guardian import TheGuardianAPI\n >>> api = TheGuardianAPI(credentials)\n >>> corpus = api.search('Slovenia', max_documents=10)\n >>> len(corpus)\n 10\n\n\"\"\"\n\nimport requests\nimport math\nimport json\n\nfrom Orange import data\n\nfrom orangecontrib.text.corpus import Corpus\n\n\nBASE_URL = 'http://content.guardianapis.com/search'\nARTICLES_PER_PAGE = 10\n\n\nclass TheGuardianCredentials:\n \"\"\" The Guardian API credentials. \"\"\"\n def __init__(self, key):\n \"\"\"\n Args:\n key (str): The Guardian API key. Use `test` for testing purposes.\n \"\"\"\n self.key = key\n\n @property\n def valid(self):\n \"\"\" Check if given API key is valid. \"\"\"\n response = requests.get(BASE_URL, {'api-key': self.key})\n return response.status_code != 403 # 403 == Forbidden\n\n def __eq__(self, other):\n return self.key == other.key\n\n\nclass TheGuardianAPI:\n attributes = []\n\n class_vars = [\n (data.DiscreteVariable('Section'), lambda doc: doc['sectionName']),\n ]\n\n tv = data.TimeVariable('Publication Date')\n metas = [\n (data.StringVariable('Headline'), lambda doc: doc['fields']['headline']),\n (data.StringVariable('Content'), lambda doc: doc['fields']['bodyText']),\n (data.StringVariable('Trail Text'), lambda doc: doc['fields']['trailText']),\n (data.StringVariable('HTML'), lambda doc: doc['fields']['body']),\n (tv, lambda doc: TheGuardianAPI.tv.parse(doc['webPublicationDate'])),\n (data.DiscreteVariable('Type'), lambda doc: doc['type']),\n (data.DiscreteVariable('Language'), lambda doc: doc['fields']['lang']),\n (data.StringVariable('Tags'),\n lambda doc: ', '.join(tag['webTitle'] for tag in doc['tags'])),\n (data.StringVariable('URL'), lambda doc: doc['webUrl']),\n (data.ContinuousVariable('Word Count', number_of_decimals=0),\n lambda doc: doc['fields']['wordcount']),\n ]\n\n text_features = [metas[0][0], metas[1][0]] # Headline + Content\n title_indices = [-1] # Headline\n\n def __init__(self, credentials, on_progress=None, should_break=None):\n \"\"\"\n Args:\n credentials (:class:`TheGuardianCredentials`): The Guardian Creentials.\n on_progress (callable): Function for progress reporting.\n should_break (callable): Function for early stopping.\n \"\"\"\n self.per_page = ARTICLES_PER_PAGE\n self.pages = 0\n self.credentials = credentials\n self.on_progress = on_progress or (lambda x, y: None)\n self.should_break = should_break or (lambda: False)\n\n self.results = []\n\n def _search(self, query, from_date, to_date, page=1):\n data = self._build_query(query, from_date, to_date, page)\n\n response = requests.get(BASE_URL, data)\n parsed = json.loads(response.text)\n\n if page == 1: # store number of pages\n self.pages = parsed['response']['pages']\n\n self.results.extend(parsed['response']['results'])\n\n def _build_query(self, query, from_date=None, to_date=None, page=1):\n data = {\n 'q': query,\n 'api-key': self.credentials.key,\n 'page': str(page),\n 'show-fields': 'headline,trailText,body,bodyText,lang,wordcount',\n 'show-tags': 'all',\n }\n if from_date is not None:\n data['from-date'] = from_date\n if to_date is not None:\n data['to-date'] = to_date\n\n return data\n\n def search(self, query, from_date=None, to_date=None, max_documents=None,\n accumulate=False):\n \"\"\"\n Search The Guardian API for articles.\n\n Args:\n query (str): A query for searching the articles by\n from_date (str): Search only articles newer than the date provided.\n Date should be in ISO format; e.g. '2016-12-31'.\n to_date (str): Search only articles older than the date provided.\n Date should be in ISO format; e.g. '2016-12-31'.\n max_documents (int): Maximum number of documents to retrieve.\n When not given, retrieve all documents.\n accumulate (bool): A flag indicating whether to accumulate results\n of multiple consequent search calls.\n\n Returns:\n :ref:`Corpus`\n \"\"\"\n if not accumulate:\n self.results = []\n\n self._search(query, from_date, to_date)\n\n pages = math.ceil(max_documents/self.per_page) if max_documents else self.pages\n self.on_progress(self.per_page, pages * self.per_page)\n\n for p in range(2, pages+1): # to one based\n if self.should_break():\n break\n self._search(query, from_date, to_date, p)\n self.on_progress(p*self.per_page, pages * self.per_page)\n\n c = Corpus.from_documents(\n self.results, 'The Guardian', self.attributes, self.class_vars,\n self.metas, title_indices=self.title_indices)\n c.text_features = self.text_features\n return c\n\n\nif __name__ == '__main__':\n credentials = TheGuardianCredentials('')\n print(credentials.valid)\n api = TheGuardianAPI(credentials=credentials)\n c = api.search('refugees', max_documents=10)\n print(c)\n", "path": "orangecontrib/text/guardian.py" } ]
[ { "content": "\"\"\" This module fetches data from The Guardian API.\n\nTo use first create :class:`TheGuardianCredentials`:\n\n >>> from orangecontrib.text.guardian import TheGuardianCredentials\n >>> credentials = TheGuardianCredentials('<your-api-key>')\n\nThen create :class:`TheGuardianAPI` object and use it for searching:\n\n >>> from orangecontrib.text.guardian import TheGuardianAPI\n >>> api = TheGuardianAPI(credentials)\n >>> corpus = api.search('Slovenia', max_documents=10)\n >>> len(corpus)\n 10\n\n\"\"\"\n\nimport requests\nimport math\nimport json\n\nfrom Orange import data\n\nfrom orangecontrib.text.corpus import Corpus\n\n\nBASE_URL = 'http://content.guardianapis.com/search'\nARTICLES_PER_PAGE = 10\n\n\nclass TheGuardianCredentials:\n \"\"\" The Guardian API credentials. \"\"\"\n def __init__(self, key):\n \"\"\"\n Args:\n key (str): The Guardian API key. Use `test` for testing purposes.\n \"\"\"\n self.key = key\n\n @property\n def valid(self):\n \"\"\" Check if given API key is valid. \"\"\"\n response = requests.get(BASE_URL, {'api-key': self.key})\n return response.status_code != 403 # 403 == Forbidden\n\n def __eq__(self, other):\n return self.key == other.key\n\n\nclass TheGuardianAPI:\n attributes = []\n\n class_vars = [\n (data.DiscreteVariable('Section'), lambda doc: doc['sectionName']),\n ]\n\n tv = data.TimeVariable('Publication Date')\n metas = [\n (data.StringVariable('Headline'), lambda doc: doc['fields']['headline']),\n (data.StringVariable('Content'), lambda doc: doc['fields']['bodyText']),\n (data.StringVariable('Trail Text'), lambda doc: doc['fields']['trailText']),\n (data.StringVariable('HTML'), lambda doc: doc['fields']['body']),\n (tv, lambda doc: TheGuardianAPI.tv.parse(doc['webPublicationDate'])),\n (data.DiscreteVariable('Type'), lambda doc: doc['type']),\n (data.DiscreteVariable('Language'), lambda doc: doc['fields']['lang']),\n (data.StringVariable('Tags'),\n lambda doc: ', '.join(tag['webTitle'] for tag in doc['tags'])),\n (data.StringVariable('URL'), lambda doc: doc['webUrl']),\n (data.ContinuousVariable('Word Count', number_of_decimals=0),\n lambda doc: doc['fields']['wordcount']),\n ]\n\n text_features = [metas[0][0], metas[1][0]] # Headline + Content\n title_indices = [-1] # Headline\n\n def __init__(self, credentials, on_progress=None, should_break=None):\n \"\"\"\n Args:\n credentials (:class:`TheGuardianCredentials`): The Guardian Creentials.\n on_progress (callable): Function for progress reporting.\n should_break (callable): Function for early stopping.\n \"\"\"\n self.per_page = ARTICLES_PER_PAGE\n self.pages = 0\n self.credentials = credentials\n self.on_progress = on_progress or (lambda x, y: None)\n self.should_break = should_break or (lambda: False)\n\n self.results = []\n\n def _search(self, query, from_date, to_date, page=1):\n data = self._build_query(query, from_date, to_date, page)\n\n response = requests.get(BASE_URL, data)\n parsed = json.loads(response.text)\n\n if page == 1: # store number of pages\n self.pages = parsed['response']['pages']\n\n self.results.extend(parsed['response']['results'])\n\n def _build_query(self, query, from_date=None, to_date=None, page=1):\n data = {\n 'q': query,\n 'api-key': self.credentials.key,\n 'page': str(page),\n 'show-fields': 'headline,trailText,body,bodyText,lang,wordcount',\n 'show-tags': 'all',\n }\n if from_date is not None:\n data['from-date'] = from_date\n if to_date is not None:\n data['to-date'] = to_date\n\n return data\n\n def search(self, query, from_date=None, to_date=None, max_documents=None,\n accumulate=False):\n \"\"\"\n Search The Guardian API for articles.\n\n Args:\n query (str): A query for searching the articles by\n from_date (str): Search only articles newer than the date provided.\n Date should be in ISO format; e.g. '2016-12-31'.\n to_date (str): Search only articles older than the date provided.\n Date should be in ISO format; e.g. '2016-12-31'.\n max_documents (int): Maximum number of documents to retrieve.\n When not given, retrieve all documents.\n accumulate (bool): A flag indicating whether to accumulate results\n of multiple consequent search calls.\n\n Returns:\n :ref:`Corpus`\n \"\"\"\n if not accumulate:\n self.results = []\n\n self._search(query, from_date, to_date)\n\n pages = math.ceil(max_documents/self.per_page) if max_documents else self.pages\n self.on_progress(self.per_page, pages * self.per_page)\n\n for p in range(2, pages+1): # to one based\n if self.should_break():\n break\n self._search(query, from_date, to_date, p)\n self.on_progress(p*self.per_page, pages * self.per_page)\n\n c = Corpus.from_documents(\n self.results, 'The Guardian', self.attributes, self.class_vars,\n self.metas, title_indices=self.title_indices)\n c.text_features = self.text_features\n return c\n\n\nif __name__ == '__main__':\n credentials = TheGuardianCredentials('test')\n print(credentials.valid)\n api = TheGuardianAPI(credentials=credentials)\n c = api.search('refugees', max_documents=10)\n print(c)\n", "path": "orangecontrib/text/guardian.py" } ]
diff --git a/orangecontrib/text/guardian.py b/orangecontrib/text/guardian.py index 56177f642..d7222d41e 100644 --- a/orangecontrib/text/guardian.py +++ b/orangecontrib/text/guardian.py @@ -155,7 +155,7 @@ def search(self, query, from_date=None, to_date=None, max_documents=None, if __name__ == '__main__': - credentials = TheGuardianCredentials('') + credentials = TheGuardianCredentials('test') print(credentials.valid) api = TheGuardianAPI(credentials=credentials) c = api.search('refugees', max_documents=10)
ipython__ipython-8798
`pip install ipython[all]` ignores platform dependent dependencies If I try to run `pip install ipython[all]` on my python install on windows (Win 7 64-bit, WinPython 2.7.10), it fails with the following: ``` C:\Python\WinPython-64bit-2.7.10.1\python-2.7.10.amd64>pip install --upgrade ipy thon[all] Requirement already up-to-date: ipython[all] in c:\python\winpython-64bit-2.7.10 .1\python-2.7.10.amd64\lib\site-packages Requirement already up-to-date: decorator in c:\python\winpython-64bit-2.7.10.1\ python-2.7.10.amd64\lib\site-packages (from ipython[all]) Requirement already up-to-date: simplegeneric>0.8 in c:\python\winpython-64bit-2 .7.10.1\python-2.7.10.amd64\lib\site-packages (from ipython[all]) Requirement already up-to-date: traitlets in c:\python\winpython-64bit-2.7.10.1\ python-2.7.10.amd64\lib\site-packages (from ipython[all]) Requirement already up-to-date: pickleshare in c:\python\winpython-64bit-2.7.10. 1\python-2.7.10.amd64\lib\site-packages (from ipython[all]) Requirement already up-to-date: nose>=0.10.1 in c:\python\winpython-64bit-2.7.10 .1\python-2.7.10.amd64\lib\site-packages (from ipython[all]) Collecting ipyparallel (from ipython[all]) Downloading ipyparallel-4.0.2-py2.py3-none-any.whl (164kB) 100% |################################| 167kB 718kB/s Requirement already up-to-date: notebook in c:\python\winpython-64bit-2.7.10.1\p ython-2.7.10.amd64\lib\site-packages (from ipython[all]) Requirement already up-to-date: requests in c:\python\winpython-64bit-2.7.10.1\p ython-2.7.10.amd64\lib\site-packages (from ipython[all]) Requirement already up-to-date: nbformat in c:\python\winpython-64bit-2.7.10.1\p ython-2.7.10.amd64\lib\site-packages (from ipython[all]) Collecting pyreadline>=2 (from ipython[all]) Downloading pyreadline-2.0.zip (108kB) 100% |################################| 110kB 2.0MB/s Requirement already up-to-date: nbconvert in c:\python\winpython-64bit-2.7.10.1\ python-2.7.10.amd64\lib\site-packages (from ipython[all]) Collecting testpath (from ipython[all]) Downloading testpath-0.2-py2.py3-none-any.whl Requirement already up-to-date: ipykernel in c:\python\winpython-64bit-2.7.10.1\ python-2.7.10.amd64\lib\site-packages (from ipython[all]) Requirement already up-to-date: numpydoc in c:\python\winpython-64bit-2.7.10.1\p ython-2.7.10.amd64\lib\site-packages (from ipython[all]) Requirement already up-to-date: qtconsole in c:\python\winpython-64bit-2.7.10.1\ python-2.7.10.amd64\lib\site-packages (from ipython[all]) Requirement already up-to-date: Sphinx>=1.1 in c:\python\winpython-64bit-2.7.10. 1\python-2.7.10.amd64\lib\site-packages (from ipython[all]) Collecting mock (from ipython[all]) Downloading mock-1.3.0-py2.py3-none-any.whl (56kB) 100% |################################| 57kB 2.9MB/s Collecting gnureadline (from ipython[all]) Downloading gnureadline-6.3.3.tar.gz (2.5MB) 100% |################################| 2.5MB 128kB/s Complete output from command python setup.py egg_info: Error: this module is not meant to work on Windows (try pyreadline instead) ---------------------------------------- Command "python setup.py egg_info" failed with error code 1 in <userpath>\ap pdata\local\temp\pip-build-scarmp\gnureadline ``` pip is v7.1.2. Now, I wasn't able to figure out from pip and setuptools docs how `[all]` is supposed to work, so I'm not sure if this is an issue with the ipython setup file, setuptools, or pip, but I figured this would be the best place to start.
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Setup script for IPython.\n\nUnder Posix environments it works like a typical setup.py script.\nUnder Windows, the command sdist is not supported, since IPython\nrequires utilities which are not available under Windows.\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2008-2011, IPython Development Team.\n# Copyright (c) 2001-2007, Fernando Perez <[email protected]>\n# Copyright (c) 2001, Janko Hauser <[email protected]>\n# Copyright (c) 2001, Nathaniel Gray <[email protected]>\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.rst, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Minimal Python version sanity check\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\nimport sys\n\n# This check is also made in IPython/__init__, don't forget to update both when\n# changing Python version requirements.\nv = sys.version_info\nif v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)):\n error = \"ERROR: IPython requires Python version 2.7 or 3.3 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\nPY3 = (sys.version_info[0] >= 3)\n\n# At least we're on the python version we need, move on.\n\n#-------------------------------------------------------------------------------\n# Imports\n#-------------------------------------------------------------------------------\n\n# Stdlib imports\nimport os\nimport shutil\n\nfrom glob import glob\n\n# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly\n# update it when the contents of directories change.\nif os.path.exists('MANIFEST'): os.remove('MANIFEST')\n\nfrom distutils.core import setup\n\n# Our own imports\nfrom setupbase import target_update\n\nfrom setupbase import (\n setup_args,\n find_packages,\n find_package_data,\n check_package_data_first,\n find_entry_points,\n build_scripts_entrypt,\n find_data_files,\n git_prebuild,\n install_symlinked,\n install_lib_symlink,\n install_scripts_for_symlink,\n unsymlink,\n)\n\nisfile = os.path.isfile\npjoin = os.path.join\n\n#-------------------------------------------------------------------------------\n# Handle OS specific things\n#-------------------------------------------------------------------------------\n\nif os.name in ('nt','dos'):\n os_name = 'windows'\nelse:\n os_name = os.name\n\n# Under Windows, 'sdist' has not been supported. Now that the docs build with\n# Sphinx it might work, but let's not turn it on until someone confirms that it\n# actually works.\nif os_name == 'windows' and 'sdist' in sys.argv:\n print('The sdist command is not available under Windows. Exiting.')\n sys.exit(1)\n\n\n#-------------------------------------------------------------------------------\n# Things related to the IPython documentation\n#-------------------------------------------------------------------------------\n\n# update the manuals when building a source dist\nif len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'):\n\n # List of things to be updated. Each entry is a triplet of args for\n # target_update()\n to_update = [\n ('docs/man/ipython.1.gz',\n ['docs/man/ipython.1'],\n 'cd docs/man && gzip -9c ipython.1 > ipython.1.gz'),\n ]\n\n\n [ target_update(*t) for t in to_update ]\n\n#---------------------------------------------------------------------------\n# Find all the packages, package data, and data_files\n#---------------------------------------------------------------------------\n\npackages = find_packages()\npackage_data = find_package_data()\n\ndata_files = find_data_files()\n\nsetup_args['packages'] = packages\nsetup_args['package_data'] = package_data\nsetup_args['data_files'] = data_files\n\n#---------------------------------------------------------------------------\n# custom distutils commands\n#---------------------------------------------------------------------------\n# imports here, so they are after setuptools import if there was one\nfrom distutils.command.sdist import sdist\nfrom distutils.command.upload import upload\n\nclass UploadWindowsInstallers(upload):\n\n description = \"Upload Windows installers to PyPI (only used from tools/release_windows.py)\"\n user_options = upload.user_options + [\n ('files=', 'f', 'exe file (or glob) to upload')\n ]\n def initialize_options(self):\n upload.initialize_options(self)\n meta = self.distribution.metadata\n base = '{name}-{version}'.format(\n name=meta.get_name(),\n version=meta.get_version()\n )\n self.files = os.path.join('dist', '%s.*.exe' % base)\n\n def run(self):\n for dist_file in glob(self.files):\n self.upload_file('bdist_wininst', 'any', dist_file)\n\nsetup_args['cmdclass'] = {\n 'build_py': \\\n check_package_data_first(git_prebuild('IPython')),\n 'sdist' : git_prebuild('IPython', sdist),\n 'upload_wininst' : UploadWindowsInstallers,\n 'symlink': install_symlinked,\n 'install_lib_symlink': install_lib_symlink,\n 'install_scripts_sym': install_scripts_for_symlink,\n 'unsymlink': unsymlink,\n}\n\n\n#---------------------------------------------------------------------------\n# Handle scripts, dependencies, and setuptools specific things\n#---------------------------------------------------------------------------\n\n# For some commands, use setuptools. Note that we do NOT list install here!\n# If you want a setuptools-enhanced install, just run 'setupegg.py install'\nneeds_setuptools = set(('develop', 'release', 'bdist_egg', 'bdist_rpm',\n 'bdist', 'bdist_dumb', 'bdist_wininst', 'bdist_wheel',\n 'egg_info', 'easy_install', 'upload', 'install_egg_info',\n ))\n\nif len(needs_setuptools.intersection(sys.argv)) > 0:\n import setuptools\n\n# This dict is used for passing extra arguments that are setuptools\n# specific to setup\nsetuptools_extra_args = {}\n\n# setuptools requirements\n\nextras_require = dict(\n parallel = ['ipyparallel'],\n qtconsole = ['qtconsole'],\n doc = ['Sphinx>=1.1', 'numpydoc'],\n test = ['nose>=0.10.1', 'requests', 'testpath'],\n terminal = [],\n kernel = ['ipykernel'],\n nbformat = ['nbformat'],\n notebook = ['notebook'],\n nbconvert = ['nbconvert'],\n)\ninstall_requires = [\n 'decorator',\n 'pickleshare',\n 'simplegeneric>0.8',\n 'traitlets',\n]\n\n# Platform-specific dependencies:\n# This is the correct way to specify these,\n# but requires pip >= 6. pip < 6 ignores these.\nextras_require.update({\n ':sys_platform != \"win32\"': ['pexpect'],\n ':sys_platform == \"darwin\"': ['appnope', 'gnureadline'],\n 'terminal:sys_platform == \"win32\"': ['pyreadline>=2'],\n 'test:python_version == \"2.7\"': ['mock'],\n})\n# FIXME: re-specify above platform dependencies for pip < 6\n# These would result in non-portable bdists.\nif not any(arg.startswith('bdist') for arg in sys.argv):\n if sys.version_info < (3, 3):\n extras_require['test'].append('mock')\n\n if sys.platform == 'darwin':\n install_requires.extend(['appnope', 'gnureadline'])\n\n if sys.platform.startswith('win'):\n extras_require['terminal'].append('pyreadline>=2.0')\n else:\n install_requires.append('pexpect')\n\neverything = set()\nfor deps in extras_require.values():\n everything.update(deps)\nextras_require['all'] = everything\n\nif 'setuptools' in sys.modules:\n setuptools_extra_args['zip_safe'] = False\n setuptools_extra_args['entry_points'] = {\n 'console_scripts': find_entry_points(),\n 'pygments.lexers': [\n 'ipythonconsole = IPython.lib.lexers:IPythonConsoleLexer',\n 'ipython = IPython.lib.lexers:IPythonLexer',\n 'ipython3 = IPython.lib.lexers:IPython3Lexer',\n ],\n }\n setup_args['extras_require'] = extras_require\n requires = setup_args['install_requires'] = install_requires\n\n # Script to be run by the windows binary installer after the default setup\n # routine, to add shortcuts and similar windows-only things. Windows\n # post-install scripts MUST reside in the scripts/ dir, otherwise distutils\n # doesn't find them.\n if 'bdist_wininst' in sys.argv:\n if len(sys.argv) > 2 and \\\n ('sdist' in sys.argv or 'bdist_rpm' in sys.argv):\n print(\"ERROR: bdist_wininst must be run alone. Exiting.\", file=sys.stderr)\n sys.exit(1)\n setup_args['data_files'].append(\n ['Scripts', ('scripts/ipython.ico', 'scripts/ipython_nb.ico')])\n setup_args['scripts'] = [pjoin('scripts','ipython_win_post_install.py')]\n setup_args['options'] = {\"bdist_wininst\":\n {\"install_script\":\n \"ipython_win_post_install.py\"}}\n\nelse:\n # scripts has to be a non-empty list, or install_scripts isn't called\n setup_args['scripts'] = [e.split('=')[0].strip() for e in find_entry_points()]\n\n setup_args['cmdclass']['build_scripts'] = build_scripts_entrypt\n\n#---------------------------------------------------------------------------\n# Do the actual setup now\n#---------------------------------------------------------------------------\n\nsetup_args.update(setuptools_extra_args)\n\ndef main():\n setup(**setup_args)\n\nif __name__ == '__main__':\n main()\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Setup script for IPython.\n\nUnder Posix environments it works like a typical setup.py script.\nUnder Windows, the command sdist is not supported, since IPython\nrequires utilities which are not available under Windows.\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2008-2011, IPython Development Team.\n# Copyright (c) 2001-2007, Fernando Perez <[email protected]>\n# Copyright (c) 2001, Janko Hauser <[email protected]>\n# Copyright (c) 2001, Nathaniel Gray <[email protected]>\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.rst, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Minimal Python version sanity check\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\nimport sys\n\n# This check is also made in IPython/__init__, don't forget to update both when\n# changing Python version requirements.\nv = sys.version_info\nif v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)):\n error = \"ERROR: IPython requires Python version 2.7 or 3.3 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\nPY3 = (sys.version_info[0] >= 3)\n\n# At least we're on the python version we need, move on.\n\n#-------------------------------------------------------------------------------\n# Imports\n#-------------------------------------------------------------------------------\n\n# Stdlib imports\nimport os\nimport shutil\n\nfrom glob import glob\n\n# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly\n# update it when the contents of directories change.\nif os.path.exists('MANIFEST'): os.remove('MANIFEST')\n\nfrom distutils.core import setup\n\n# Our own imports\nfrom setupbase import target_update\n\nfrom setupbase import (\n setup_args,\n find_packages,\n find_package_data,\n check_package_data_first,\n find_entry_points,\n build_scripts_entrypt,\n find_data_files,\n git_prebuild,\n install_symlinked,\n install_lib_symlink,\n install_scripts_for_symlink,\n unsymlink,\n)\n\nisfile = os.path.isfile\npjoin = os.path.join\n\n#-------------------------------------------------------------------------------\n# Handle OS specific things\n#-------------------------------------------------------------------------------\n\nif os.name in ('nt','dos'):\n os_name = 'windows'\nelse:\n os_name = os.name\n\n# Under Windows, 'sdist' has not been supported. Now that the docs build with\n# Sphinx it might work, but let's not turn it on until someone confirms that it\n# actually works.\nif os_name == 'windows' and 'sdist' in sys.argv:\n print('The sdist command is not available under Windows. Exiting.')\n sys.exit(1)\n\n\n#-------------------------------------------------------------------------------\n# Things related to the IPython documentation\n#-------------------------------------------------------------------------------\n\n# update the manuals when building a source dist\nif len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'):\n\n # List of things to be updated. Each entry is a triplet of args for\n # target_update()\n to_update = [\n ('docs/man/ipython.1.gz',\n ['docs/man/ipython.1'],\n 'cd docs/man && gzip -9c ipython.1 > ipython.1.gz'),\n ]\n\n\n [ target_update(*t) for t in to_update ]\n\n#---------------------------------------------------------------------------\n# Find all the packages, package data, and data_files\n#---------------------------------------------------------------------------\n\npackages = find_packages()\npackage_data = find_package_data()\n\ndata_files = find_data_files()\n\nsetup_args['packages'] = packages\nsetup_args['package_data'] = package_data\nsetup_args['data_files'] = data_files\n\n#---------------------------------------------------------------------------\n# custom distutils commands\n#---------------------------------------------------------------------------\n# imports here, so they are after setuptools import if there was one\nfrom distutils.command.sdist import sdist\nfrom distutils.command.upload import upload\n\nclass UploadWindowsInstallers(upload):\n\n description = \"Upload Windows installers to PyPI (only used from tools/release_windows.py)\"\n user_options = upload.user_options + [\n ('files=', 'f', 'exe file (or glob) to upload')\n ]\n def initialize_options(self):\n upload.initialize_options(self)\n meta = self.distribution.metadata\n base = '{name}-{version}'.format(\n name=meta.get_name(),\n version=meta.get_version()\n )\n self.files = os.path.join('dist', '%s.*.exe' % base)\n\n def run(self):\n for dist_file in glob(self.files):\n self.upload_file('bdist_wininst', 'any', dist_file)\n\nsetup_args['cmdclass'] = {\n 'build_py': \\\n check_package_data_first(git_prebuild('IPython')),\n 'sdist' : git_prebuild('IPython', sdist),\n 'upload_wininst' : UploadWindowsInstallers,\n 'symlink': install_symlinked,\n 'install_lib_symlink': install_lib_symlink,\n 'install_scripts_sym': install_scripts_for_symlink,\n 'unsymlink': unsymlink,\n}\n\n\n#---------------------------------------------------------------------------\n# Handle scripts, dependencies, and setuptools specific things\n#---------------------------------------------------------------------------\n\n# For some commands, use setuptools. Note that we do NOT list install here!\n# If you want a setuptools-enhanced install, just run 'setupegg.py install'\nneeds_setuptools = set(('develop', 'release', 'bdist_egg', 'bdist_rpm',\n 'bdist', 'bdist_dumb', 'bdist_wininst', 'bdist_wheel',\n 'egg_info', 'easy_install', 'upload', 'install_egg_info',\n ))\n\nif len(needs_setuptools.intersection(sys.argv)) > 0:\n import setuptools\n\n# This dict is used for passing extra arguments that are setuptools\n# specific to setup\nsetuptools_extra_args = {}\n\n# setuptools requirements\n\nextras_require = dict(\n parallel = ['ipyparallel'],\n qtconsole = ['qtconsole'],\n doc = ['Sphinx>=1.1', 'numpydoc'],\n test = ['nose>=0.10.1', 'requests', 'testpath'],\n terminal = [],\n kernel = ['ipykernel'],\n nbformat = ['nbformat'],\n notebook = ['notebook'],\n nbconvert = ['nbconvert'],\n)\ninstall_requires = [\n 'decorator',\n 'pickleshare',\n 'simplegeneric>0.8',\n 'traitlets',\n]\n\n# Platform-specific dependencies:\n# This is the correct way to specify these,\n# but requires pip >= 6. pip < 6 ignores these.\nextras_require.update({\n ':sys_platform != \"win32\"': ['pexpect'],\n ':sys_platform == \"darwin\"': ['appnope', 'gnureadline'],\n 'terminal:sys_platform == \"win32\"': ['pyreadline>=2'],\n 'test:python_version == \"2.7\"': ['mock'],\n})\n# FIXME: re-specify above platform dependencies for pip < 6\n# These would result in non-portable bdists.\nif not any(arg.startswith('bdist') for arg in sys.argv):\n if sys.version_info < (3, 3):\n extras_require['test'].append('mock')\n\n if sys.platform == 'darwin':\n install_requires.extend(['appnope', 'gnureadline'])\n\n if sys.platform.startswith('win'):\n extras_require['terminal'].append('pyreadline>=2.0')\n else:\n install_requires.append('pexpect')\n\neverything = set()\nfor key, deps in extras_require.items():\n if ':' not in key:\n everything.update(deps)\nextras_require['all'] = everything\n\nif 'setuptools' in sys.modules:\n setuptools_extra_args['zip_safe'] = False\n setuptools_extra_args['entry_points'] = {\n 'console_scripts': find_entry_points(),\n 'pygments.lexers': [\n 'ipythonconsole = IPython.lib.lexers:IPythonConsoleLexer',\n 'ipython = IPython.lib.lexers:IPythonLexer',\n 'ipython3 = IPython.lib.lexers:IPython3Lexer',\n ],\n }\n setup_args['extras_require'] = extras_require\n requires = setup_args['install_requires'] = install_requires\n\n # Script to be run by the windows binary installer after the default setup\n # routine, to add shortcuts and similar windows-only things. Windows\n # post-install scripts MUST reside in the scripts/ dir, otherwise distutils\n # doesn't find them.\n if 'bdist_wininst' in sys.argv:\n if len(sys.argv) > 2 and \\\n ('sdist' in sys.argv or 'bdist_rpm' in sys.argv):\n print(\"ERROR: bdist_wininst must be run alone. Exiting.\", file=sys.stderr)\n sys.exit(1)\n setup_args['data_files'].append(\n ['Scripts', ('scripts/ipython.ico', 'scripts/ipython_nb.ico')])\n setup_args['scripts'] = [pjoin('scripts','ipython_win_post_install.py')]\n setup_args['options'] = {\"bdist_wininst\":\n {\"install_script\":\n \"ipython_win_post_install.py\"}}\n\nelse:\n # scripts has to be a non-empty list, or install_scripts isn't called\n setup_args['scripts'] = [e.split('=')[0].strip() for e in find_entry_points()]\n\n setup_args['cmdclass']['build_scripts'] = build_scripts_entrypt\n\n#---------------------------------------------------------------------------\n# Do the actual setup now\n#---------------------------------------------------------------------------\n\nsetup_args.update(setuptools_extra_args)\n\ndef main():\n setup(**setup_args)\n\nif __name__ == '__main__':\n main()\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index dd0e3727a9c..e871b6fe2d1 100755 --- a/setup.py +++ b/setup.py @@ -221,8 +221,9 @@ def run(self): install_requires.append('pexpect') everything = set() -for deps in extras_require.values(): - everything.update(deps) +for key, deps in extras_require.items(): + if ':' not in key: + everything.update(deps) extras_require['all'] = everything if 'setuptools' in sys.modules:
Parsl__parsl-186
Allow `DataFuture` to be initialized with a `str` file object [Here](https://github.com/Parsl/parsl/blob/master/parsl/app/futures.py#L77) we check if `file_obj` is `str`. Now that `File` is subclassed from `str`, this will always evaluate as `True`.
[ { "content": "\"\"\"This module implements DataFutures.\n\nWe have two basic types of futures:\n 1. DataFutures which represent data objects\n 2. AppFutures which represent the futures on App/Leaf tasks.\n\"\"\"\nimport os\nimport logging\nfrom concurrent.futures import Future\n\nfrom parsl.dataflow.futures import AppFuture\nfrom parsl.app.errors import *\nfrom parsl.data_provider.files import File\n\nlogger = logging.getLogger(__name__)\n\n# Possible future states (for internal use by the futures package).\nPENDING = 'PENDING'\nRUNNING = 'RUNNING'\n# The future was cancelled by the user...\nCANCELLED = 'CANCELLED'\n# ...and _Waiter.add_cancelled() was called by a worker.\nCANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'\nFINISHED = 'FINISHED'\n\n_STATE_TO_DESCRIPTION_MAP = {\n PENDING: \"pending\",\n RUNNING: \"running\",\n CANCELLED: \"cancelled\",\n CANCELLED_AND_NOTIFIED: \"cancelled\",\n FINISHED: \"finished\"\n}\n\n\nclass DataFuture(Future):\n \"\"\"A datafuture points at an AppFuture.\n\n We are simply wrapping a AppFuture, and adding the specific case where, if\n the future is resolved i.e file exists, then the DataFuture is assumed to be\n resolved.\n \"\"\"\n\n def parent_callback(self, parent_fu):\n \"\"\"Callback from executor future to update the parent.\n\n Args:\n - parent_fu (Future): Future returned by the executor along with callback\n\n Returns:\n - None\n\n Updates the super() with the result() or exception()\n \"\"\"\n if parent_fu.done() is True:\n e = parent_fu._exception\n if e:\n super().set_exception(e)\n else:\n super().set_result(parent_fu.result())\n return\n\n def __init__(self, fut, file_obj, parent=None, tid=None):\n \"\"\"Construct the DataFuture object.\n\n If the file_obj is a string convert to a File.\n\n Args:\n - fut (AppFuture) : AppFuture that this DataFuture will track\n - file_obj (string/File obj) : Something representing file(s)\n\n Kwargs:\n - parent ()\n - tid (task_id) : Task id that this DataFuture tracks\n \"\"\"\n super().__init__()\n self._tid = tid\n if isinstance(file_obj, str):\n self.file_obj = File(file_obj)\n else:\n self.file_obj = file_obj\n self.parent = parent\n self._exception = None\n\n if fut is None:\n logger.debug(\"Setting result to filepath since no future was passed\")\n self.set_result = self.file_obj\n\n else:\n if isinstance(fut, Future):\n self.parent = fut\n self.parent.add_done_callback(self.parent_callback)\n else:\n raise NotFutureError(\"DataFuture can be created only with a FunctionFuture on None\")\n\n logger.debug(\"Creating DataFuture with parent : %s\", parent)\n logger.debug(\"Filepath : %s\", self.filepath)\n\n @property\n def tid(self):\n \"\"\"Returns the task_id of the task that will resolve this DataFuture.\"\"\"\n return self._tid\n\n @property\n def filepath(self):\n \"\"\"Filepath of the File object this datafuture represents.\"\"\"\n return self.file_obj.filepath\n\n @property\n def filename(self):\n \"\"\"Filepath of the File object this datafuture represents.\"\"\"\n return self.filepath\n\n def result(self, timeout=None):\n \"\"\"A blocking call that returns either the result or raises an exception.\n\n Assumptions : A DataFuture always has a parent AppFuture. The AppFuture does callbacks when\n setup.\n\n Kwargs:\n - timeout (int): Timeout in seconds\n\n Returns:\n - If App completed successfully returns the filepath.\n\n Raises:\n - Exception raised by app if failed.\n\n \"\"\"\n if self.parent:\n if self.parent.done():\n # This explicit call to raise exceptions might be redundant.\n # the result() call *should* raise an exception if there's one\n e = self.parent._exception\n if e:\n raise e\n else:\n self.parent.result(timeout=timeout)\n else:\n self.parent.result(timeout=timeout)\n\n return self.file_obj\n\n def cancel(self):\n \"\"\"Cancel the task that this DataFuture is tracking.\n\n Note: This may not work\n \"\"\"\n if self.parent:\n return self.parent.cancel\n else:\n return False\n\n def cancelled(self):\n if self.parent:\n return self.parent.cancelled()\n else:\n return False\n\n def running(self):\n if self.parent:\n return self.parent.running()\n else:\n return False\n\n def done(self):\n if self.parent:\n return self.parent.done()\n else:\n return True\n\n def exception(self, timeout=None):\n if self.parent:\n return self.parent.exception(timeout=timeout)\n else:\n return True\n\n def add_done_callback(self, fn):\n if self.parent:\n return self.parent.add_done_callback(fn)\n else:\n return None\n\n def __repr__(self):\n\n # The DataFuture could be wrapping an AppFuture whose parent is a Future\n # check to find the top level parent\n if isinstance(self.parent, AppFuture):\n parent = self.parent.parent\n else:\n parent = self.parent\n\n if parent:\n with parent._condition:\n if parent._state == FINISHED:\n if parent._exception:\n return '<%s at %#x state=%s raised %s>' % (\n self.__class__.__name__,\n id(self),\n _STATE_TO_DESCRIPTION_MAP[parent._state],\n parent._exception.__class__.__name__)\n else:\n return '<%s at %#x state=%s returned %s>' % (\n self.__class__.__name__,\n id(self),\n _STATE_TO_DESCRIPTION_MAP[parent._state],\n self.filepath + '_file')\n return '<%s at %#x state=%s>' % (\n self.__class__.__name__,\n id(self),\n _STATE_TO_DESCRIPTION_MAP[parent._state])\n\n else:\n return '<%s at %#x state=%s>' % (\n self.__class__.__name__,\n id(self),\n _STATE_TO_DESCRIPTION_MAP[self._state])\n\n\ndef testing_nonfuture():\n fpath = '~/shuffled.txt'\n df = DataFuture(None, fpath)\n print(df)\n print(\"Result : \", df.filepath)\n assert df.filepath == os.path.abspath(os.path.expanduser(fpath))\n\n\nif __name__ == \"__main__\":\n # logging.basicConfig(filename='futures.testing.log',level=logging.DEBUG)\n import sys\n import random\n logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n logger.debug(\"Begin Testing\")\n\n with open('shuffled.txt', 'w') as testfile:\n nums = list(range(0, 10000))\n random.shuffle(nums)\n for item in nums:\n testfile.write(\"{0}\\n\".format(item))\n\n foo = Future()\n df = DataFuture(foo, './shuffled.txt')\n dx = DataFuture(foo, '~/shuffled.txt')\n\n print(foo.done())\n print(df.done())\n\n testing_nonfuture()\n", "path": "parsl/app/futures.py" } ]
[ { "content": "\"\"\"This module implements DataFutures.\n\nWe have two basic types of futures:\n 1. DataFutures which represent data objects\n 2. AppFutures which represent the futures on App/Leaf tasks.\n\"\"\"\nimport os\nimport logging\nfrom concurrent.futures import Future\n\nfrom parsl.dataflow.futures import AppFuture\nfrom parsl.app.errors import *\nfrom parsl.data_provider.files import File\n\nlogger = logging.getLogger(__name__)\n\n# Possible future states (for internal use by the futures package).\nPENDING = 'PENDING'\nRUNNING = 'RUNNING'\n# The future was cancelled by the user...\nCANCELLED = 'CANCELLED'\n# ...and _Waiter.add_cancelled() was called by a worker.\nCANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'\nFINISHED = 'FINISHED'\n\n_STATE_TO_DESCRIPTION_MAP = {\n PENDING: \"pending\",\n RUNNING: \"running\",\n CANCELLED: \"cancelled\",\n CANCELLED_AND_NOTIFIED: \"cancelled\",\n FINISHED: \"finished\"\n}\n\n\nclass DataFuture(Future):\n \"\"\"A datafuture points at an AppFuture.\n\n We are simply wrapping a AppFuture, and adding the specific case where, if\n the future is resolved i.e file exists, then the DataFuture is assumed to be\n resolved.\n \"\"\"\n\n def parent_callback(self, parent_fu):\n \"\"\"Callback from executor future to update the parent.\n\n Args:\n - parent_fu (Future): Future returned by the executor along with callback\n\n Returns:\n - None\n\n Updates the super() with the result() or exception()\n \"\"\"\n if parent_fu.done() is True:\n e = parent_fu._exception\n if e:\n super().set_exception(e)\n else:\n super().set_result(parent_fu.result())\n return\n\n def __init__(self, fut, file_obj, parent=None, tid=None):\n \"\"\"Construct the DataFuture object.\n\n If the file_obj is a string convert to a File.\n\n Args:\n - fut (AppFuture) : AppFuture that this DataFuture will track\n - file_obj (string/File obj) : Something representing file(s)\n\n Kwargs:\n - parent ()\n - tid (task_id) : Task id that this DataFuture tracks\n \"\"\"\n super().__init__()\n self._tid = tid\n if isinstance(file_obj, str) and not isinstance(file_obj, File):\n self.file_obj = File(file_obj)\n else:\n self.file_obj = file_obj\n self.parent = parent\n self._exception = None\n\n if fut is None:\n logger.debug(\"Setting result to filepath since no future was passed\")\n self.set_result = self.file_obj\n\n else:\n if isinstance(fut, Future):\n self.parent = fut\n self.parent.add_done_callback(self.parent_callback)\n else:\n raise NotFutureError(\"DataFuture can be created only with a FunctionFuture on None\")\n\n logger.debug(\"Creating DataFuture with parent : %s\", parent)\n logger.debug(\"Filepath : %s\", self.filepath)\n\n @property\n def tid(self):\n \"\"\"Returns the task_id of the task that will resolve this DataFuture.\"\"\"\n return self._tid\n\n @property\n def filepath(self):\n \"\"\"Filepath of the File object this datafuture represents.\"\"\"\n return self.file_obj.filepath\n\n @property\n def filename(self):\n \"\"\"Filepath of the File object this datafuture represents.\"\"\"\n return self.filepath\n\n def result(self, timeout=None):\n \"\"\"A blocking call that returns either the result or raises an exception.\n\n Assumptions : A DataFuture always has a parent AppFuture. The AppFuture does callbacks when\n setup.\n\n Kwargs:\n - timeout (int): Timeout in seconds\n\n Returns:\n - If App completed successfully returns the filepath.\n\n Raises:\n - Exception raised by app if failed.\n\n \"\"\"\n if self.parent:\n if self.parent.done():\n # This explicit call to raise exceptions might be redundant.\n # the result() call *should* raise an exception if there's one\n e = self.parent._exception\n if e:\n raise e\n else:\n self.parent.result(timeout=timeout)\n else:\n self.parent.result(timeout=timeout)\n\n return self.file_obj\n\n def cancel(self):\n \"\"\"Cancel the task that this DataFuture is tracking.\n\n Note: This may not work\n \"\"\"\n if self.parent:\n return self.parent.cancel\n else:\n return False\n\n def cancelled(self):\n if self.parent:\n return self.parent.cancelled()\n else:\n return False\n\n def running(self):\n if self.parent:\n return self.parent.running()\n else:\n return False\n\n def done(self):\n if self.parent:\n return self.parent.done()\n else:\n return True\n\n def exception(self, timeout=None):\n if self.parent:\n return self.parent.exception(timeout=timeout)\n else:\n return True\n\n def add_done_callback(self, fn):\n if self.parent:\n return self.parent.add_done_callback(fn)\n else:\n return None\n\n def __repr__(self):\n\n # The DataFuture could be wrapping an AppFuture whose parent is a Future\n # check to find the top level parent\n if isinstance(self.parent, AppFuture):\n parent = self.parent.parent\n else:\n parent = self.parent\n\n if parent:\n with parent._condition:\n if parent._state == FINISHED:\n if parent._exception:\n return '<%s at %#x state=%s raised %s>' % (\n self.__class__.__name__,\n id(self),\n _STATE_TO_DESCRIPTION_MAP[parent._state],\n parent._exception.__class__.__name__)\n else:\n return '<%s at %#x state=%s returned %s>' % (\n self.__class__.__name__,\n id(self),\n _STATE_TO_DESCRIPTION_MAP[parent._state],\n self.filepath + '_file')\n return '<%s at %#x state=%s>' % (\n self.__class__.__name__,\n id(self),\n _STATE_TO_DESCRIPTION_MAP[parent._state])\n\n else:\n return '<%s at %#x state=%s>' % (\n self.__class__.__name__,\n id(self),\n _STATE_TO_DESCRIPTION_MAP[self._state])\n\n\ndef testing_nonfuture():\n fpath = '~/shuffled.txt'\n df = DataFuture(None, fpath)\n print(df)\n print(\"Result : \", df.filepath)\n assert df.filepath == os.path.abspath(os.path.expanduser(fpath))\n\n\nif __name__ == \"__main__\":\n # logging.basicConfig(filename='futures.testing.log',level=logging.DEBUG)\n import sys\n import random\n logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n logger.debug(\"Begin Testing\")\n\n with open('shuffled.txt', 'w') as testfile:\n nums = list(range(0, 10000))\n random.shuffle(nums)\n for item in nums:\n testfile.write(\"{0}\\n\".format(item))\n\n foo = Future()\n df = DataFuture(foo, './shuffled.txt')\n dx = DataFuture(foo, '~/shuffled.txt')\n\n print(foo.done())\n print(df.done())\n\n testing_nonfuture()\n", "path": "parsl/app/futures.py" } ]
diff --git a/parsl/app/futures.py b/parsl/app/futures.py index 41b0c7946b..9b13e13aec 100644 --- a/parsl/app/futures.py +++ b/parsl/app/futures.py @@ -74,7 +74,7 @@ def __init__(self, fut, file_obj, parent=None, tid=None): """ super().__init__() self._tid = tid - if isinstance(file_obj, str): + if isinstance(file_obj, str) and not isinstance(file_obj, File): self.file_obj = File(file_obj) else: self.file_obj = file_obj
microsoft__botbuilder-python-1190
No module named 'botbuilder.ai.qna.dialogs' - Python QnA Sample 49 ## Version botbuilder-ai - 4.9.1 ## Describe the bug I was trying out the QnA Maker Sample - 49.qnamaker-all-features . I've configured my QnA KB and also the config.py with the necessary info. However the module botbuilder.ai.qna.dialogs does not seem to exist. I've manually verified for the class QnAMakermDialog and it does not exist > from botbuilder.ai.qna.dialogs import QnAMakermDialog ## To Reproduce Steps to reproduce the behavior: 1. Download the sample 49.qnamaker-all-features 2. Install the necessary requirements and configure QnAMaker. 3. Run python app.py in the folder ## Expected behavior The sample should've run successfully. [bug]
[ { "content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"azure-cognitiveservices-language-luis==0.2.0\",\n \"botbuilder-schema>=4.7.1\",\n \"botbuilder-core>=4.7.1\",\n \"aiohttp==3.6.2\",\n]\n\nTESTS_REQUIRES = [\"aiounittest>=1.1.0\"]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"ai\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=\"botbuilder-ai LUIS QnAMaker bots ai botframework botbuilder\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.ai\",\n \"botbuilder.ai.qna\",\n \"botbuilder.ai.luis\",\n \"botbuilder.ai.qna.models\",\n \"botbuilder.ai.qna.utils\",\n ],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-ai/setup.py" } ]
[ { "content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"azure-cognitiveservices-language-luis==0.2.0\",\n \"botbuilder-schema>=4.7.1\",\n \"botbuilder-core>=4.7.1\",\n \"aiohttp==3.6.2\",\n]\n\nTESTS_REQUIRES = [\"aiounittest>=1.1.0\"]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"ai\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=\"botbuilder-ai LUIS QnAMaker bots ai botframework botbuilder\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.ai\",\n \"botbuilder.ai.qna\",\n \"botbuilder.ai.luis\",\n \"botbuilder.ai.qna.models\",\n \"botbuilder.ai.qna.utils\",\n \"botbuilder.ai.qna.dialogs\",\n ],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-ai/setup.py" } ]
diff --git a/libraries/botbuilder-ai/setup.py b/libraries/botbuilder-ai/setup.py index 72f112a5a..65b7a8d85 100644 --- a/libraries/botbuilder-ai/setup.py +++ b/libraries/botbuilder-ai/setup.py @@ -39,6 +39,7 @@ "botbuilder.ai.luis", "botbuilder.ai.qna.models", "botbuilder.ai.qna.utils", + "botbuilder.ai.qna.dialogs", ], install_requires=REQUIRES + TESTS_REQUIRES, tests_require=TESTS_REQUIRES,
OpenNMT__OpenNMT-tf-953
An issue with SequenceRecordInputter ? I tried to create SequenceClassifier model which used SequenceRecordInputter as a part of ParallelInputter - it produced an error before ending first learning step. After isolating the problem, it seems that SequenceRecordInputter dataset generation is the source of it: Reproducible code: ```python3 import numpy as np from opennmt import encoders, inputters, models, Runner vectors = [] for i in range(1000): vectors.append(np.random.rand(np.random.randint(1, 9), 16)) inputters.create_sequence_records(vectors, "train.records") with open("train_labels.txt", "w") as f: f.write("\n".join(np.random.randint(0, 2, 1000).astype("str"))) with open("labels_vocab.txt", "w") as f: f.write("\n".join(["0", "1"])) model = models.SequenceClassifier( inputters.SequenceRecordInputter(16), encoders.SelfAttentionEncoder( num_layers=2, num_units=16, num_heads=4, ffn_inner_dim=64 ), ) config = { "model_dir": ".", "data": { "target_vocabulary": "labels_vocab.txt", "train_features_file": "train.records", "train_labels_file": "train_labels.txt", }, "params": {"optimizer": "Adam", "learning_rate": 0.001}, "train": {"batch_size": 1, "max_step": 2}, } runner = Runner(model, config, auto_config=False) runner.train() ``` Error text ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) Input In [12], in <cell line: 1>() ----> 1 runner.train() File ~/.local/lib/python3.9/site-packages/opennmt/runner.py:281, in Runner.train(self, num_devices, with_eval, checkpoint_path, hvd, return_summary, fallback_to_cpu, continue_from_checkpoint) 278 else: 279 trainer = training_util.Trainer(model, optimizer, checkpoint=checkpoint) --> 281 summary = trainer( 282 dataset_fn, 283 max_step=train_config.get("max_step"), 284 accum_steps=accum_steps, 285 report_steps=train_config.get("save_summary_steps", 100), 286 save_steps=train_config.get("save_checkpoints_steps", 5000), 287 evaluator=evaluator, 288 eval_steps=eval_config.get("steps", 5000), 289 moving_average_decay=train_config.get("moving_average_decay"), 290 ) 292 average_last_checkpoints = train_config.get("average_last_checkpoints", 0) 293 if checkpoint is None: File ~/.local/lib/python3.9/site-packages/opennmt/training.py:109, in Trainer.__call__(self, dataset, max_step, accum_steps, report_steps, save_steps, evaluator, eval_steps, moving_average_decay) 107 step = None 108 moving_average = None --> 109 for i, loss in enumerate( 110 self._steps(dataset, accum_steps=accum_steps, report_steps=report_steps) 111 ): 112 if i == 0: 113 self._log_model_info() File ~/.local/lib/python3.9/site-packages/opennmt/training.py:221, in Trainer._steps(self, dataset, accum_steps, report_steps) 209 def _steps(self, dataset, accum_steps=1, report_steps=None): 210 """Returns a generator over training steps (i.e. parameters update). 211 212 Args: (...) 219 A generator that yields a loss value to report for this step. 220 """ --> 221 dataset = self._finalize_dataset(dataset) 222 iterator = iter(dataset) 224 # We define 2 separate functions to support gradient accumulation: 225 # * forward: compute and accumulate the gradients 226 # * step: apply the gradients 227 # When gradient accumulation is disabled, the forward function also applies the gradients. File ~/.local/lib/python3.9/site-packages/opennmt/training.py:206, in Trainer._finalize_dataset(self, dataset) 196 """Returns the final dataset instance to be used for training. 197 198 Args: (...) 203 A ``tf.data.Dataset``. 204 """ 205 if callable(dataset): --> 206 dataset = dataset(tf.distribute.InputContext()) 207 return dataset File ~/.local/lib/python3.9/site-packages/opennmt/runner.py:220, in Runner.train.<locals>.<lambda>(input_context) 216 batch_type = train_config["batch_type"] 217 batch_size_multiple = 8 if mixed_precision and batch_type == "tokens" else 1 219 dataset_fn = ( --> 220 lambda input_context: model.examples_inputter.make_training_dataset( 221 data_config["train_features_file"], 222 data_config.get("train_labels_file"), 223 train_config["batch_size"], 224 batch_type=batch_type, 225 batch_size_multiple=batch_size_multiple, 226 shuffle_buffer_size=train_config["sample_buffer_size"], 227 length_bucket_width=train_config["length_bucket_width"], 228 maximum_features_length=train_config.get("maximum_features_length"), 229 maximum_labels_length=train_config.get("maximum_labels_length"), 230 single_pass=train_config.get("single_pass", False), 231 num_shards=input_context.num_input_pipelines, 232 shard_index=input_context.input_pipeline_id, 233 prefetch_buffer_size=train_config.get("prefetch_buffer_size"), 234 cardinality_multiple=input_context.num_replicas_in_sync, 235 weights=data_config.get("train_files_weights"), 236 batch_autotune_mode=train_config.get("batch_autotune_mode"), 237 ) 238 ) 240 checkpoint = None 241 evaluator = None File ~/.local/lib/python3.9/site-packages/opennmt/inputters/inputter.py:834, in ExampleInputterAdapter.make_training_dataset(self, features_file, labels_file, batch_size, batch_type, batch_multiplier, batch_size_multiple, shuffle_buffer_size, length_bucket_width, maximum_features_length, maximum_labels_length, single_pass, num_shards, shard_index, num_threads, prefetch_buffer_size, cardinality_multiple, weights, batch_autotune_mode) 832 if weights is not None: 833 dataset = (dataset, weights) --> 834 dataset = dataset_util.training_pipeline( 835 batch_size, 836 batch_type=batch_type, 837 batch_multiplier=batch_multiplier, 838 batch_size_multiple=batch_size_multiple, 839 transform_fns=transform_fns, 840 length_bucket_width=length_bucket_width, 841 features_length_fn=features_length_fn, 842 labels_length_fn=labels_length_fn, 843 single_pass=single_pass, 844 num_shards=num_shards, 845 shard_index=shard_index, 846 num_threads=num_threads, 847 dataset_size=self.get_dataset_size(data_files), 848 shuffle_buffer_size=shuffle_buffer_size, 849 prefetch_buffer_size=prefetch_buffer_size, 850 cardinality_multiple=cardinality_multiple, 851 )(dataset) 852 return dataset File ~/.local/lib/python3.9/site-packages/opennmt/data/dataset.py:637, in training_pipeline.<locals>._pipeline(dataset) 635 if labels_length_fn is not None: 636 length_fn.append(labels_length_fn) --> 637 dataset = dataset.apply( 638 batch_sequence_dataset( 639 batch_size, 640 batch_type=batch_type, 641 batch_multiplier=batch_multiplier, 642 batch_size_multiple=batch_size_multiple, 643 length_bucket_width=length_bucket_width, 644 length_fn=length_fn, 645 ) 646 ) 647 dataset = dataset.apply(filter_irregular_batches(batch_multiplier)) 648 if not single_pass: File ~/.local/lib/python3.9/site-packages/tensorflow/python/data/ops/dataset_ops.py:2270, in DatasetV2.apply(self, transformation_func) 2248 def apply(self, transformation_func): 2249 """Applies a transformation function to this dataset. 2250 2251 `apply` enables chaining of custom `Dataset` transformations, which are (...) 2268 dataset. 2269 """ -> 2270 dataset = transformation_func(self) 2271 if not isinstance(dataset, DatasetV2): 2272 raise TypeError( 2273 f"`transformation_func` must return a `tf.data.Dataset` object. " 2274 f"Got {type(dataset)}.") File ~/.local/lib/python3.9/site-packages/opennmt/data/dataset.py:482, in batch_sequence_dataset.<locals>.<lambda>(dataset) 475 else: 476 raise ValueError( 477 "Invalid batch type: '{}'; should be 'examples' or 'tokens'".format( 478 batch_type 479 ) 480 ) --> 482 return lambda dataset: dataset.group_by_window(_key_func, _reduce_func, **kwargs) File ~/.local/lib/python3.9/site-packages/tensorflow/python/data/ops/dataset_ops.py:2823, in DatasetV2.group_by_window(self, key_func, reduce_func, window_size, window_size_func, name) 2819 window_size_func = constant_window_func 2821 assert window_size_func is not None -> 2823 return _GroupByWindowDataset( 2824 self, key_func, reduce_func, window_size_func, name=name) File ~/.local/lib/python3.9/site-packages/tensorflow/python/data/ops/dataset_ops.py:5683, in _GroupByWindowDataset.__init__(self, input_dataset, key_func, reduce_func, window_size_func, name) 5681 """See `group_by_window()` for details.""" 5682 self._input_dataset = input_dataset -> 5683 self._make_key_func(key_func, input_dataset) 5684 self._make_reduce_func(reduce_func, input_dataset) 5685 self._make_window_size_func(window_size_func) File ~/.local/lib/python3.9/site-packages/tensorflow/python/data/ops/dataset_ops.py:5721, in _GroupByWindowDataset._make_key_func(self, key_func, input_dataset) 5718 def key_func_wrapper(*args): 5719 return ops.convert_to_tensor(key_func(*args), dtype=dtypes.int64) -> 5721 self._key_func = structured_function.StructuredFunctionWrapper( 5722 key_func_wrapper, self._transformation_name(), dataset=input_dataset) 5723 if not self._key_func.output_structure.is_compatible_with( 5724 tensor_spec.TensorSpec([], dtypes.int64)): 5725 raise ValueError(f"Invalid `key_func`. `key_func` must return a single " 5726 f"`tf.int64` scalar tensor but its return type is " 5727 f"{self._key_func.output_structure}.") File ~/.local/lib/python3.9/site-packages/tensorflow/python/data/ops/structured_function.py:271, in StructuredFunctionWrapper.__init__(self, func, transformation_name, dataset, input_classes, input_shapes, input_types, input_structure, add_to_graph, use_legacy_function, defun_kwargs) 264 warnings.warn( 265 "Even though the `tf.config.experimental_run_functions_eagerly` " 266 "option is set, this option does not apply to tf.data functions. " 267 "To force eager execution of tf.data functions, please use " 268 "`tf.data.experimental.enable_debug_mode()`.") 269 fn_factory = trace_tf_function(defun_kwargs) --> 271 self._function = fn_factory() 272 # There is no graph to add in eager mode. 273 add_to_graph &= not context.executing_eagerly() File ~/.local/lib/python3.9/site-packages/tensorflow/python/eager/function.py:2567, in Function.get_concrete_function(self, *args, **kwargs) 2558 def get_concrete_function(self, *args, **kwargs): 2559 """Returns a `ConcreteFunction` specialized to inputs and execution context. 2560 2561 Args: (...) 2565 or `tf.Tensor` or `tf.TensorSpec`. 2566 """ -> 2567 graph_function = self._get_concrete_function_garbage_collected( 2568 *args, **kwargs) 2569 graph_function._garbage_collector.release() # pylint: disable=protected-access 2570 return graph_function File ~/.local/lib/python3.9/site-packages/tensorflow/python/eager/function.py:2533, in Function._get_concrete_function_garbage_collected(self, *args, **kwargs) 2531 args, kwargs = None, None 2532 with self._lock: -> 2533 graph_function, _ = self._maybe_define_function(args, kwargs) 2534 seen_names = set() 2535 captured = object_identity.ObjectIdentitySet( 2536 graph_function.graph.internal_captures) File ~/.local/lib/python3.9/site-packages/tensorflow/python/eager/function.py:2711, in Function._maybe_define_function(self, args, kwargs) 2708 cache_key = self._function_cache.generalize(cache_key) 2709 (args, kwargs) = cache_key._placeholder_value() # pylint: disable=protected-access -> 2711 graph_function = self._create_graph_function(args, kwargs) 2712 self._function_cache.add(cache_key, cache_key_deletion_observer, 2713 graph_function) 2715 return graph_function, filtered_flat_args File ~/.local/lib/python3.9/site-packages/tensorflow/python/eager/function.py:2627, in Function._create_graph_function(self, args, kwargs) 2622 missing_arg_names = [ 2623 "%s_%d" % (arg, i) for i, arg in enumerate(missing_arg_names) 2624 ] 2625 arg_names = base_arg_names + missing_arg_names 2626 graph_function = ConcreteFunction( -> 2627 func_graph_module.func_graph_from_py_func( 2628 self._name, 2629 self._python_function, 2630 args, 2631 kwargs, 2632 self.input_signature, 2633 autograph=self._autograph, 2634 autograph_options=self._autograph_options, 2635 arg_names=arg_names, 2636 capture_by_value=self._capture_by_value), 2637 self._function_attributes, 2638 spec=self.function_spec, 2639 # Tell the ConcreteFunction to clean up its graph once it goes out of 2640 # scope. This is not the default behavior since it gets used in some 2641 # places (like Keras) where the FuncGraph lives longer than the 2642 # ConcreteFunction. 2643 shared_func_graph=False) 2644 return graph_function File ~/.local/lib/python3.9/site-packages/tensorflow/python/framework/func_graph.py:1141, in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, acd_record_initial_resource_uses) 1138 else: 1139 _, original_func = tf_decorator.unwrap(python_func) -> 1141 func_outputs = python_func(*func_args, **func_kwargs) 1143 # invariant: `func_outputs` contains only Tensors, CompositeTensors, 1144 # TensorArrays and `None`s. 1145 func_outputs = nest.map_structure( 1146 convert, func_outputs, expand_composites=True) File ~/.local/lib/python3.9/site-packages/tensorflow/python/data/ops/structured_function.py:248, in StructuredFunctionWrapper.__init__.<locals>.trace_tf_function.<locals>.wrapped_fn(*args) 242 @eager_function.defun_with_attributes( 243 input_signature=structure.get_flat_tensor_specs( 244 self._input_structure), 245 autograph=False, 246 attributes=defun_kwargs) 247 def wrapped_fn(*args): # pylint: disable=missing-docstring --> 248 ret = wrapper_helper(*args) 249 ret = structure.to_tensor_list(self._output_structure, ret) 250 return [ops.convert_to_tensor(t) for t in ret] File ~/.local/lib/python3.9/site-packages/tensorflow/python/data/ops/structured_function.py:177, in StructuredFunctionWrapper.__init__.<locals>.wrapper_helper(*args) 175 if not _should_unpack(nested_args): 176 nested_args = (nested_args,) --> 177 ret = autograph.tf_convert(self._func, ag_ctx)(*nested_args) 178 if _should_pack(ret): 179 ret = tuple(ret) File ~/.local/lib/python3.9/site-packages/tensorflow/python/autograph/impl/api.py:689, in convert.<locals>.decorator.<locals>.wrapper(*args, **kwargs) 687 try: 688 with conversion_ctx: --> 689 return converted_call(f, args, kwargs, options=options) 690 except Exception as e: # pylint:disable=broad-except 691 if hasattr(e, 'ag_error_metadata'): File ~/.local/lib/python3.9/site-packages/tensorflow/python/autograph/impl/api.py:377, in converted_call(f, args, kwargs, caller_fn_scope, options) 374 return _call_unconverted(f, args, kwargs, options) 376 if not options.user_requested and conversion.is_allowlisted(f): --> 377 return _call_unconverted(f, args, kwargs, options) 379 # internal_convert_user_code is for example turned off when issuing a dynamic 380 # call conversion from generated code while in nonrecursive mode. In that 381 # case we evidently don't want to recurse, but we still have to convert 382 # things like builtins. 383 if not options.internal_convert_user_code: File ~/.local/lib/python3.9/site-packages/tensorflow/python/autograph/impl/api.py:458, in _call_unconverted(f, args, kwargs, options, update_cache) 455 return f.__self__.call(args, kwargs) 457 if kwargs is not None: --> 458 return f(*args, **kwargs) 459 return f(*args) File ~/.local/lib/python3.9/site-packages/tensorflow/python/data/ops/dataset_ops.py:5719, in _GroupByWindowDataset._make_key_func.<locals>.key_func_wrapper(*args) 5718 def key_func_wrapper(*args): -> 5719 return ops.convert_to_tensor(key_func(*args), dtype=dtypes.int64) File ~/.local/lib/python3.9/site-packages/opennmt/data/dataset.py:442, in batch_sequence_dataset.<locals>._key_func(*args) 437 raise ValueError( 438 "%d length functions were passed but this dataset contains " 439 "%d parallel elements" % (len(length_fns), len(args)) 440 ) 441 # Take the highest bucket id. --> 442 bucket_id = tf.reduce_max( 443 [ 444 _get_bucket_id(features, length_fn) 445 for features, length_fn in zip(args, length_fns) 446 ] 447 ) 448 return tf.cast(bucket_id, tf.int64) File ~/.local/lib/python3.9/site-packages/tensorflow/python/util/traceback_utils.py:153, in filter_traceback.<locals>.error_handler(*args, **kwargs) 151 except Exception as e: 152 filtered_tb = _process_traceback_frames(e.__traceback__) --> 153 raise e.with_traceback(filtered_tb) from None 154 finally: 155 del filtered_tb File ~/.local/lib/python3.9/site-packages/tensorflow/python/ops/array_ops.py:1506, in _autopacking_helper(list_or_tuple, dtype, name) 1504 if isinstance(elem, core.Tensor): 1505 if dtype is not None and elem.dtype.base_dtype != dtype: -> 1506 raise TypeError(f"Cannot convert a list containing a tensor of dtype " 1507 f"{elem.dtype} to {dtype} (Tensor is: {elem!r})") 1508 converted_elems.append(elem) 1509 must_pack = True TypeError: Cannot convert a list containing a tensor of dtype <dtype: 'int32'> to <dtype: 'int64'> (Tensor is: <tf.Tensor 'Const_1:0' shape=() dtype=int32>) ```
[ { "content": "\"\"\"Define inputters reading from TFRecord files.\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom opennmt.data import dataset as dataset_util\nfrom opennmt.inputters.inputter import Inputter\n\n\nclass SequenceRecordInputter(Inputter):\n \"\"\"Inputter that reads ``tf.train.SequenceExample``.\n\n See Also:\n :func:`opennmt.inputters.create_sequence_records` to generate a compatible\n dataset.\n \"\"\"\n\n def __init__(self, input_depth, **kwargs):\n \"\"\"Initializes the parameters of the record inputter.\n\n Args:\n input_depth: The depth dimension of the input vectors.\n **kwargs: Additional layer keyword arguments.\n \"\"\"\n super().__init__(**kwargs)\n self.input_depth = input_depth\n\n def make_dataset(self, data_file, training=None):\n return dataset_util.make_datasets(tf.data.TFRecordDataset, data_file)\n\n def input_signature(self):\n return {\n \"tensor\": tf.TensorSpec([None, None, self.input_depth], self.dtype),\n \"length\": tf.TensorSpec([None], tf.int32),\n }\n\n def make_features(self, element=None, features=None, training=None):\n if features is None:\n features = {}\n if \"tensor\" in features:\n return features\n _, feature_lists, lengths = tf.io.parse_sequence_example(\n element,\n sequence_features={\n \"values\": tf.io.FixedLenSequenceFeature(\n [self.input_depth], dtype=tf.float32\n )\n },\n )\n tensor = feature_lists[\"values\"]\n features[\"length\"] = lengths[\"values\"]\n features[\"tensor\"] = tf.cast(tensor, self.dtype)\n return features\n\n def call(self, features, training=None):\n return features[\"tensor\"]\n\n\ndef write_sequence_record(vector, writer):\n \"\"\"Writes a sequence vector as a TFRecord.\n\n Args:\n vector: A 2D Numpy float array of shape :math:`[T, D]`.\n writer: A ``tf.io.TFRecordWriter``.\n\n See Also:\n - :class:`opennmt.inputters.SequenceRecordInputter`\n - :func:`opennmt.inputters.create_sequence_records`\n \"\"\"\n feature_list = tf.train.FeatureList(\n feature=[\n tf.train.Feature(float_list=tf.train.FloatList(value=values))\n for values in vector.astype(np.float32)\n ]\n )\n feature_lists = tf.train.FeatureLists(feature_list={\"values\": feature_list})\n example = tf.train.SequenceExample(feature_lists=feature_lists)\n writer.write(example.SerializeToString())\n\n\ndef create_sequence_records(vectors, path, compression=None):\n \"\"\"Creates a TFRecord file of sequence vectors.\n\n Args:\n vectors: An iterable of 2D Numpy float arrays of shape :math:`[T, D]`.\n path: The output TFRecord file.\n compression: Optional compression type, can be \"GZIP\".\n\n Returns:\n Path to the TFRecord file. In most cases this is the same as :obj:`path` but\n if GZIP compression is enabled, the \".gz\" extension is added if not already\n present.\n\n Raises:\n ValueError: if :obj:`compression` is invalid.\n\n See Also:\n - :class:`opennmt.inputters.SequenceRecordInputter`\n - :func:`opennmt.inputters.write_sequence_record`\n \"\"\"\n if compression is not None:\n if compression not in (\"GZIP\",):\n raise ValueError(\"invalid compression type: %s\" % compression)\n if compression == \"GZIP\" and not path.endswith(\".gz\"):\n path = \"%s.gz\" % path\n writer = tf.io.TFRecordWriter(path, options=compression)\n for vector in vectors:\n write_sequence_record(vector, writer)\n writer.close()\n return path\n", "path": "opennmt/inputters/record_inputter.py" } ]
[ { "content": "\"\"\"Define inputters reading from TFRecord files.\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom opennmt.data import dataset as dataset_util\nfrom opennmt.inputters.inputter import Inputter\n\n\nclass SequenceRecordInputter(Inputter):\n \"\"\"Inputter that reads ``tf.train.SequenceExample``.\n\n See Also:\n :func:`opennmt.inputters.create_sequence_records` to generate a compatible\n dataset.\n \"\"\"\n\n def __init__(self, input_depth, **kwargs):\n \"\"\"Initializes the parameters of the record inputter.\n\n Args:\n input_depth: The depth dimension of the input vectors.\n **kwargs: Additional layer keyword arguments.\n \"\"\"\n super().__init__(**kwargs)\n self.input_depth = input_depth\n\n def make_dataset(self, data_file, training=None):\n return dataset_util.make_datasets(tf.data.TFRecordDataset, data_file)\n\n def input_signature(self):\n return {\n \"tensor\": tf.TensorSpec([None, None, self.input_depth], self.dtype),\n \"length\": tf.TensorSpec([None], tf.int32),\n }\n\n def make_features(self, element=None, features=None, training=None):\n if features is None:\n features = {}\n if \"tensor\" in features:\n return features\n _, feature_lists, lengths = tf.io.parse_sequence_example(\n element,\n sequence_features={\n \"values\": tf.io.FixedLenSequenceFeature(\n [self.input_depth], dtype=tf.float32\n )\n },\n )\n tensor = feature_lists[\"values\"]\n features[\"length\"] = tf.cast(lengths[\"values\"], tf.int32)\n features[\"tensor\"] = tf.cast(tensor, self.dtype)\n return features\n\n def call(self, features, training=None):\n return features[\"tensor\"]\n\n\ndef write_sequence_record(vector, writer):\n \"\"\"Writes a sequence vector as a TFRecord.\n\n Args:\n vector: A 2D Numpy float array of shape :math:`[T, D]`.\n writer: A ``tf.io.TFRecordWriter``.\n\n See Also:\n - :class:`opennmt.inputters.SequenceRecordInputter`\n - :func:`opennmt.inputters.create_sequence_records`\n \"\"\"\n feature_list = tf.train.FeatureList(\n feature=[\n tf.train.Feature(float_list=tf.train.FloatList(value=values))\n for values in vector.astype(np.float32)\n ]\n )\n feature_lists = tf.train.FeatureLists(feature_list={\"values\": feature_list})\n example = tf.train.SequenceExample(feature_lists=feature_lists)\n writer.write(example.SerializeToString())\n\n\ndef create_sequence_records(vectors, path, compression=None):\n \"\"\"Creates a TFRecord file of sequence vectors.\n\n Args:\n vectors: An iterable of 2D Numpy float arrays of shape :math:`[T, D]`.\n path: The output TFRecord file.\n compression: Optional compression type, can be \"GZIP\".\n\n Returns:\n Path to the TFRecord file. In most cases this is the same as :obj:`path` but\n if GZIP compression is enabled, the \".gz\" extension is added if not already\n present.\n\n Raises:\n ValueError: if :obj:`compression` is invalid.\n\n See Also:\n - :class:`opennmt.inputters.SequenceRecordInputter`\n - :func:`opennmt.inputters.write_sequence_record`\n \"\"\"\n if compression is not None:\n if compression not in (\"GZIP\",):\n raise ValueError(\"invalid compression type: %s\" % compression)\n if compression == \"GZIP\" and not path.endswith(\".gz\"):\n path = \"%s.gz\" % path\n writer = tf.io.TFRecordWriter(path, options=compression)\n for vector in vectors:\n write_sequence_record(vector, writer)\n writer.close()\n return path\n", "path": "opennmt/inputters/record_inputter.py" } ]
diff --git a/opennmt/inputters/record_inputter.py b/opennmt/inputters/record_inputter.py index dc5d68909..26fbd5cdb 100644 --- a/opennmt/inputters/record_inputter.py +++ b/opennmt/inputters/record_inputter.py @@ -48,7 +48,7 @@ def make_features(self, element=None, features=None, training=None): }, ) tensor = feature_lists["values"] - features["length"] = lengths["values"] + features["length"] = tf.cast(lengths["values"], tf.int32) features["tensor"] = tf.cast(tensor, self.dtype) return features diff --git a/opennmt/tests/inputter_test.py b/opennmt/tests/inputter_test.py index 48036665c..f273d40c5 100644 --- a/opennmt/tests/inputter_test.py +++ b/opennmt/tests/inputter_test.py @@ -770,6 +770,7 @@ def testSequenceRecordBatch(self): features = next(iter(dataset)) lengths = features["length"] tensors = features["tensor"] + self.assertEqual(lengths.dtype, tf.int32) self.assertAllEqual(lengths, [3, 6, 1]) for length, tensor, expected_vector in zip(lengths, tensors, vectors): self.assertAllClose(tensor[:length], expected_vector)
mitmproxy__mitmproxy-1801
Make Travis Great Again (Master Edition) Working on improving the speed and accuracy of Travis's testing.
[ { "content": "import os\nimport select\nimport socket\nimport sys\nimport threading\nimport time\nimport traceback\n\nimport binascii\n\nfrom typing import Optional # noqa\n\nfrom mitmproxy.utils import strutils\n\nimport certifi\nfrom backports import ssl_match_hostname\nimport OpenSSL\nfrom OpenSSL import SSL\n\nfrom mitmproxy import certs\nfrom mitmproxy.utils import version_check\nfrom mitmproxy.types import serializable\nfrom mitmproxy import exceptions\nfrom mitmproxy.types import basethread\n\n# This is a rather hackish way to make sure that\n# the latest version of pyOpenSSL is actually installed.\nversion_check.check_pyopenssl_version()\n\nsocket_fileobject = socket.SocketIO\n\nEINTR = 4\nif os.environ.get(\"NO_ALPN\"):\n HAS_ALPN = False\nelse:\n HAS_ALPN = SSL._lib.Cryptography_HAS_ALPN\n\n# To enable all SSL methods use: SSLv23\n# then add options to disable certain methods\n# https://bugs.launchpad.net/pyopenssl/+bug/1020632/comments/3\nSSL_BASIC_OPTIONS = (\n SSL.OP_CIPHER_SERVER_PREFERENCE\n)\nif hasattr(SSL, \"OP_NO_COMPRESSION\"):\n SSL_BASIC_OPTIONS |= SSL.OP_NO_COMPRESSION\n\nSSL_DEFAULT_METHOD = SSL.SSLv23_METHOD\nSSL_DEFAULT_OPTIONS = (\n SSL.OP_NO_SSLv2 |\n SSL.OP_NO_SSLv3 |\n SSL_BASIC_OPTIONS\n)\nif hasattr(SSL, \"OP_NO_COMPRESSION\"):\n SSL_DEFAULT_OPTIONS |= SSL.OP_NO_COMPRESSION\n\n\"\"\"\nMap a reasonable SSL version specification into the format OpenSSL expects.\nDon't ask...\nhttps://bugs.launchpad.net/pyopenssl/+bug/1020632/comments/3\n\"\"\"\nsslversion_choices = {\n \"all\": (SSL.SSLv23_METHOD, SSL_BASIC_OPTIONS),\n # SSLv23_METHOD + NO_SSLv2 + NO_SSLv3 == TLS 1.0+\n # TLSv1_METHOD would be TLS 1.0 only\n \"secure\": (SSL.SSLv23_METHOD, (SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3 | SSL_BASIC_OPTIONS)),\n \"SSLv2\": (SSL.SSLv2_METHOD, SSL_BASIC_OPTIONS),\n \"SSLv3\": (SSL.SSLv3_METHOD, SSL_BASIC_OPTIONS),\n \"TLSv1\": (SSL.TLSv1_METHOD, SSL_BASIC_OPTIONS),\n \"TLSv1_1\": (SSL.TLSv1_1_METHOD, SSL_BASIC_OPTIONS),\n \"TLSv1_2\": (SSL.TLSv1_2_METHOD, SSL_BASIC_OPTIONS),\n}\n\nssl_method_names = {\n SSL.SSLv2_METHOD: \"SSLv2\",\n SSL.SSLv3_METHOD: \"SSLv3\",\n SSL.SSLv23_METHOD: \"SSLv23\",\n SSL.TLSv1_METHOD: \"TLSv1\",\n SSL.TLSv1_1_METHOD: \"TLSv1.1\",\n SSL.TLSv1_2_METHOD: \"TLSv1.2\",\n}\n\n\nclass SSLKeyLogger:\n\n def __init__(self, filename):\n self.filename = filename\n self.f = None\n self.lock = threading.Lock()\n\n # required for functools.wraps, which pyOpenSSL uses.\n __name__ = \"SSLKeyLogger\"\n\n def __call__(self, connection, where, ret):\n if where == SSL.SSL_CB_HANDSHAKE_DONE and ret == 1:\n with self.lock:\n if not self.f:\n d = os.path.dirname(self.filename)\n if not os.path.isdir(d):\n os.makedirs(d)\n self.f = open(self.filename, \"ab\")\n self.f.write(b\"\\r\\n\")\n client_random = binascii.hexlify(connection.client_random())\n masterkey = binascii.hexlify(connection.master_key())\n self.f.write(b\"CLIENT_RANDOM %s %s\\r\\n\" % (client_random, masterkey))\n self.f.flush()\n\n def close(self):\n with self.lock:\n if self.f:\n self.f.close()\n\n @staticmethod\n def create_logfun(filename):\n if filename:\n return SSLKeyLogger(filename)\n return False\n\n\nlog_ssl_key = SSLKeyLogger.create_logfun(\n os.getenv(\"MITMPROXY_SSLKEYLOGFILE\") or os.getenv(\"SSLKEYLOGFILE\"))\n\n\nclass _FileLike:\n BLOCKSIZE = 1024 * 32\n\n def __init__(self, o):\n self.o = o\n self._log = None\n self.first_byte_timestamp = None\n\n def set_descriptor(self, o):\n self.o = o\n\n def __getattr__(self, attr):\n return getattr(self.o, attr)\n\n def start_log(self):\n \"\"\"\n Starts or resets the log.\n\n This will store all bytes read or written.\n \"\"\"\n self._log = []\n\n def stop_log(self):\n \"\"\"\n Stops the log.\n \"\"\"\n self._log = None\n\n def is_logging(self):\n return self._log is not None\n\n def get_log(self):\n \"\"\"\n Returns the log as a string.\n \"\"\"\n if not self.is_logging():\n raise ValueError(\"Not logging!\")\n return b\"\".join(self._log)\n\n def add_log(self, v):\n if self.is_logging():\n self._log.append(v)\n\n def reset_timestamps(self):\n self.first_byte_timestamp = None\n\n\nclass Writer(_FileLike):\n\n def flush(self):\n \"\"\"\n May raise exceptions.TcpDisconnect\n \"\"\"\n if hasattr(self.o, \"flush\"):\n try:\n self.o.flush()\n except (socket.error, IOError) as v:\n raise exceptions.TcpDisconnect(str(v))\n\n def write(self, v):\n \"\"\"\n May raise exceptions.TcpDisconnect\n \"\"\"\n if v:\n self.first_byte_timestamp = self.first_byte_timestamp or time.time()\n try:\n if hasattr(self.o, \"sendall\"):\n self.add_log(v)\n return self.o.sendall(v)\n else:\n r = self.o.write(v)\n self.add_log(v[:r])\n return r\n except (SSL.Error, socket.error) as e:\n raise exceptions.TcpDisconnect(str(e))\n\n\nclass Reader(_FileLike):\n\n def read(self, length):\n \"\"\"\n If length is -1, we read until connection closes.\n \"\"\"\n result = b''\n start = time.time()\n while length == -1 or length > 0:\n if length == -1 or length > self.BLOCKSIZE:\n rlen = self.BLOCKSIZE\n else:\n rlen = length\n try:\n data = self.o.read(rlen)\n except SSL.ZeroReturnError:\n # TLS connection was shut down cleanly\n break\n except (SSL.WantWriteError, SSL.WantReadError):\n # From the OpenSSL docs:\n # If the underlying BIO is non-blocking, SSL_read() will also return when the\n # underlying BIO could not satisfy the needs of SSL_read() to continue the\n # operation. In this case a call to SSL_get_error with the return value of\n # SSL_read() will yield SSL_ERROR_WANT_READ or SSL_ERROR_WANT_WRITE.\n if (time.time() - start) < self.o.gettimeout():\n time.sleep(0.1)\n continue\n else:\n raise exceptions.TcpTimeout()\n except socket.timeout:\n raise exceptions.TcpTimeout()\n except socket.error as e:\n raise exceptions.TcpDisconnect(str(e))\n except SSL.SysCallError as e:\n if e.args == (-1, 'Unexpected EOF'):\n break\n raise exceptions.TlsException(str(e))\n except SSL.Error as e:\n raise exceptions.TlsException(str(e))\n self.first_byte_timestamp = self.first_byte_timestamp or time.time()\n if not data:\n break\n result += data\n if length != -1:\n length -= len(data)\n self.add_log(result)\n return result\n\n def readline(self, size=None):\n result = b''\n bytes_read = 0\n while True:\n if size is not None and bytes_read >= size:\n break\n ch = self.read(1)\n bytes_read += 1\n if not ch:\n break\n else:\n result += ch\n if ch == b'\\n':\n break\n return result\n\n def safe_read(self, length):\n \"\"\"\n Like .read, but is guaranteed to either return length bytes, or\n raise an exception.\n \"\"\"\n result = self.read(length)\n if length != -1 and len(result) != length:\n if not result:\n raise exceptions.TcpDisconnect()\n else:\n raise exceptions.TcpReadIncomplete(\n \"Expected %s bytes, got %s\" % (length, len(result))\n )\n return result\n\n def peek(self, length):\n \"\"\"\n Tries to peek into the underlying file object.\n\n Returns:\n Up to the next N bytes if peeking is successful.\n\n Raises:\n exceptions.TcpException if there was an error with the socket\n TlsException if there was an error with pyOpenSSL.\n NotImplementedError if the underlying file object is not a [pyOpenSSL] socket\n \"\"\"\n if isinstance(self.o, socket_fileobject):\n try:\n return self.o._sock.recv(length, socket.MSG_PEEK)\n except socket.error as e:\n raise exceptions.TcpException(repr(e))\n elif isinstance(self.o, SSL.Connection):\n try:\n return self.o.recv(length, socket.MSG_PEEK)\n except SSL.Error as e:\n raise exceptions.TlsException(str(e))\n else:\n raise NotImplementedError(\"Can only peek into (pyOpenSSL) sockets\")\n\n\nclass Address(serializable.Serializable):\n\n \"\"\"\n This class wraps an IPv4/IPv6 tuple to provide named attributes and\n ipv6 information.\n \"\"\"\n\n def __init__(self, address, use_ipv6=False):\n self.address = tuple(address)\n self.use_ipv6 = use_ipv6\n\n def get_state(self):\n return {\n \"address\": self.address,\n \"use_ipv6\": self.use_ipv6\n }\n\n def set_state(self, state):\n self.address = state[\"address\"]\n self.use_ipv6 = state[\"use_ipv6\"]\n\n @classmethod\n def from_state(cls, state):\n return Address(**state)\n\n @classmethod\n def wrap(cls, t):\n if isinstance(t, cls):\n return t\n else:\n return cls(t)\n\n def __call__(self):\n return self.address\n\n @property\n def host(self):\n return self.address[0]\n\n @property\n def port(self):\n return self.address[1]\n\n @property\n def use_ipv6(self):\n return self.family == socket.AF_INET6\n\n @use_ipv6.setter\n def use_ipv6(self, b):\n self.family = socket.AF_INET6 if b else socket.AF_INET\n\n def __repr__(self):\n return \"{}:{}\".format(self.host, self.port)\n\n def __eq__(self, other):\n if not other:\n return False\n other = Address.wrap(other)\n return (self.address, self.family) == (other.address, other.family)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash(self.address) ^ 42 # different hash than the tuple alone.\n\n\ndef ssl_read_select(rlist, timeout):\n \"\"\"\n This is a wrapper around select.select() which also works for SSL.Connections\n by taking ssl_connection.pending() into account.\n\n Caveats:\n If .pending() > 0 for any of the connections in rlist, we avoid the select syscall\n and **will not include any other connections which may or may not be ready**.\n\n Args:\n rlist: wait until ready for reading\n\n Returns:\n subset of rlist which is ready for reading.\n \"\"\"\n return [\n conn for conn in rlist\n if isinstance(conn, SSL.Connection) and conn.pending() > 0\n ] or select.select(rlist, (), (), timeout)[0]\n\n\ndef close_socket(sock):\n \"\"\"\n Does a hard close of a socket, without emitting a RST.\n \"\"\"\n try:\n # We already indicate that we close our end.\n # may raise \"Transport endpoint is not connected\" on Linux\n sock.shutdown(socket.SHUT_WR)\n\n # Section 4.2.2.13 of RFC 1122 tells us that a close() with any pending\n # readable data could lead to an immediate RST being sent (which is the\n # case on Windows).\n # http://ia600609.us.archive.org/22/items/TheUltimateSo_lingerPageOrWhyIsMyTcpNotReliable/the-ultimate-so_linger-page-or-why-is-my-tcp-not-reliable.html\n #\n # This in turn results in the following issue: If we send an error page\n # to the client and then close the socket, the RST may be received by\n # the client before the error page and the users sees a connection\n # error rather than the error page. Thus, we try to empty the read\n # buffer on Windows first. (see\n # https://github.com/mitmproxy/mitmproxy/issues/527#issuecomment-93782988)\n #\n\n if os.name == \"nt\": # pragma: no cover\n # We cannot rely on the shutdown()-followed-by-read()-eof technique\n # proposed by the page above: Some remote machines just don't send\n # a TCP FIN, which would leave us in the unfortunate situation that\n # recv() would block infinitely. As a workaround, we set a timeout\n # here even if we are in blocking mode.\n sock.settimeout(sock.gettimeout() or 20)\n\n # limit at a megabyte so that we don't read infinitely\n for _ in range(1024 ** 3 // 4096):\n # may raise a timeout/disconnect exception.\n if not sock.recv(4096):\n break\n\n # Now we can close the other half as well.\n sock.shutdown(socket.SHUT_RD)\n\n except socket.error:\n pass\n\n sock.close()\n\n\nclass _Connection:\n\n rbufsize = -1\n wbufsize = -1\n\n def _makefile(self):\n \"\"\"\n Set up .rfile and .wfile attributes from .connection\n \"\"\"\n # Ideally, we would use the Buffered IO in Python 3 by default.\n # Unfortunately, the implementation of .peek() is broken for n>1 bytes,\n # as it may just return what's left in the buffer and not all the bytes we want.\n # As a workaround, we just use unbuffered sockets directly.\n # https://mail.python.org/pipermail/python-dev/2009-June/089986.html\n self.rfile = Reader(socket.SocketIO(self.connection, \"rb\"))\n self.wfile = Writer(socket.SocketIO(self.connection, \"wb\"))\n\n def __init__(self, connection):\n if connection:\n self.connection = connection\n self.ip_address = Address(connection.getpeername())\n self._makefile()\n else:\n self.connection = None\n self.ip_address = None\n self.rfile = None\n self.wfile = None\n\n self.ssl_established = False\n self.finished = False\n\n def get_current_cipher(self):\n if not self.ssl_established:\n return None\n\n name = self.connection.get_cipher_name()\n bits = self.connection.get_cipher_bits()\n version = self.connection.get_cipher_version()\n return name, bits, version\n\n def finish(self):\n self.finished = True\n # If we have an SSL connection, wfile.close == connection.close\n # (We call _FileLike.set_descriptor(conn))\n # Closing the socket is not our task, therefore we don't call close\n # then.\n if not isinstance(self.connection, SSL.Connection):\n if not getattr(self.wfile, \"closed\", False):\n try:\n self.wfile.flush()\n self.wfile.close()\n except exceptions.TcpDisconnect:\n pass\n\n self.rfile.close()\n else:\n try:\n self.connection.shutdown()\n except SSL.Error:\n pass\n\n def _create_ssl_context(self,\n method=SSL_DEFAULT_METHOD,\n options=SSL_DEFAULT_OPTIONS,\n verify_options=SSL.VERIFY_NONE,\n ca_path=None,\n ca_pemfile=None,\n cipher_list=None,\n alpn_protos=None,\n alpn_select=None,\n alpn_select_callback=None,\n sni=None,\n ):\n \"\"\"\n Creates an SSL Context.\n\n :param method: One of SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD, TLSv1_1_METHOD, or TLSv1_2_METHOD\n :param options: A bit field consisting of OpenSSL.SSL.OP_* values\n :param verify_options: A bit field consisting of OpenSSL.SSL.VERIFY_* values\n :param ca_path: Path to a directory of trusted CA certificates prepared using the c_rehash tool\n :param ca_pemfile: Path to a PEM formatted trusted CA certificate\n :param cipher_list: A textual OpenSSL cipher list, see https://www.openssl.org/docs/apps/ciphers.html\n :rtype : SSL.Context\n \"\"\"\n try:\n context = SSL.Context(method)\n except ValueError as e:\n method_name = ssl_method_names.get(method, \"unknown\")\n raise exceptions.TlsException(\n \"SSL method \\\"%s\\\" is most likely not supported \"\n \"or disabled (for security reasons) in your libssl. \"\n \"Please refer to https://github.com/mitmproxy/mitmproxy/issues/1101 \"\n \"for more details.\" % method_name\n )\n\n # Options (NO_SSLv2/3)\n if options is not None:\n context.set_options(options)\n\n # Verify Options (NONE/PEER and trusted CAs)\n if verify_options is not None:\n def verify_cert(conn, x509, errno, err_depth, is_cert_verified):\n if not is_cert_verified:\n self.ssl_verification_error = exceptions.InvalidCertificateException(\n \"Certificate Verification Error for {}: {} (errno: {}, depth: {})\".format(\n sni,\n strutils.native(SSL._ffi.string(SSL._lib.X509_verify_cert_error_string(errno)), \"utf8\"),\n errno,\n err_depth\n )\n )\n return is_cert_verified\n\n context.set_verify(verify_options, verify_cert)\n if ca_path is None and ca_pemfile is None:\n ca_pemfile = certifi.where()\n try:\n context.load_verify_locations(ca_pemfile, ca_path)\n except SSL.Error:\n raise exceptions.TlsException(\n \"Cannot load trusted certificates ({}, {}).\".format(\n ca_pemfile, ca_path\n )\n )\n\n # Workaround for\n # https://github.com/pyca/pyopenssl/issues/190\n # https://github.com/mitmproxy/mitmproxy/issues/472\n # Options already set before are not cleared.\n context.set_mode(SSL._lib.SSL_MODE_AUTO_RETRY)\n\n # Cipher List\n if cipher_list:\n try:\n context.set_cipher_list(cipher_list)\n\n # TODO: maybe change this to with newer pyOpenSSL APIs\n context.set_tmp_ecdh(OpenSSL.crypto.get_elliptic_curve('prime256v1'))\n except SSL.Error as v:\n raise exceptions.TlsException(\"SSL cipher specification error: %s\" % str(v))\n\n # SSLKEYLOGFILE\n if log_ssl_key:\n context.set_info_callback(log_ssl_key)\n\n if HAS_ALPN:\n if alpn_protos is not None:\n # advertise application layer protocols\n context.set_alpn_protos(alpn_protos)\n elif alpn_select is not None and alpn_select_callback is None:\n # select application layer protocol\n def alpn_select_callback(conn_, options):\n if alpn_select in options:\n return bytes(alpn_select)\n else: # pragma no cover\n return options[0]\n context.set_alpn_select_callback(alpn_select_callback)\n elif alpn_select_callback is not None and alpn_select is None:\n context.set_alpn_select_callback(alpn_select_callback)\n elif alpn_select_callback is not None and alpn_select is not None:\n raise exceptions.TlsException(\"ALPN error: only define alpn_select (string) OR alpn_select_callback (method).\")\n\n return context\n\n\nclass ConnectionCloser:\n def __init__(self, conn):\n self.conn = conn\n self._canceled = False\n\n def pop(self):\n \"\"\"\n Cancel the current closer, and return a fresh one.\n \"\"\"\n self._canceled = True\n return ConnectionCloser(self.conn)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n if not self._canceled:\n self.conn.close()\n\n\nclass TCPClient(_Connection):\n\n def __init__(self, address, source_address=None, spoof_source_address=None):\n super().__init__(None)\n self.address = address\n self.source_address = source_address\n self.cert = None\n self.server_certs = []\n self.ssl_verification_error = None # type: Optional[exceptions.InvalidCertificateException]\n self.sni = None\n self.spoof_source_address = spoof_source_address\n\n @property\n def address(self):\n return self.__address\n\n @address.setter\n def address(self, address):\n if address:\n self.__address = Address.wrap(address)\n else:\n self.__address = None\n\n @property\n def source_address(self):\n return self.__source_address\n\n @source_address.setter\n def source_address(self, source_address):\n if source_address:\n self.__source_address = Address.wrap(source_address)\n else:\n self.__source_address = None\n\n def close(self):\n # Make sure to close the real socket, not the SSL proxy.\n # OpenSSL is really good at screwing up, i.e. when trying to recv from a failed connection,\n # it tries to renegotiate...\n if isinstance(self.connection, SSL.Connection):\n close_socket(self.connection._socket)\n else:\n close_socket(self.connection)\n\n def create_ssl_context(self, cert=None, alpn_protos=None, **sslctx_kwargs):\n context = self._create_ssl_context(\n alpn_protos=alpn_protos,\n **sslctx_kwargs)\n # Client Certs\n if cert:\n try:\n context.use_privatekey_file(cert)\n context.use_certificate_file(cert)\n except SSL.Error as v:\n raise exceptions.TlsException(\"SSL client certificate error: %s\" % str(v))\n return context\n\n def convert_to_ssl(self, sni=None, alpn_protos=None, **sslctx_kwargs):\n \"\"\"\n cert: Path to a file containing both client cert and private key.\n\n options: A bit field consisting of OpenSSL.SSL.OP_* values\n verify_options: A bit field consisting of OpenSSL.SSL.VERIFY_* values\n ca_path: Path to a directory of trusted CA certificates prepared using the c_rehash tool\n ca_pemfile: Path to a PEM formatted trusted CA certificate\n \"\"\"\n verification_mode = sslctx_kwargs.get('verify_options', None)\n if verification_mode == SSL.VERIFY_PEER and not sni:\n raise exceptions.TlsException(\"Cannot validate certificate hostname without SNI\")\n\n context = self.create_ssl_context(\n alpn_protos=alpn_protos,\n sni=sni,\n **sslctx_kwargs\n )\n self.connection = SSL.Connection(context, self.connection)\n if sni:\n self.sni = sni\n self.connection.set_tlsext_host_name(sni.encode(\"idna\"))\n self.connection.set_connect_state()\n try:\n self.connection.do_handshake()\n except SSL.Error as v:\n if self.ssl_verification_error:\n raise self.ssl_verification_error\n else:\n raise exceptions.TlsException(\"SSL handshake error: %s\" % repr(v))\n else:\n # Fix for pre v1.0 OpenSSL, which doesn't throw an exception on\n # certificate validation failure\n if verification_mode == SSL.VERIFY_PEER and self.ssl_verification_error:\n raise self.ssl_verification_error\n\n self.cert = certs.SSLCert(self.connection.get_peer_certificate())\n\n # Keep all server certificates in a list\n for i in self.connection.get_peer_cert_chain():\n self.server_certs.append(certs.SSLCert(i))\n\n # Validate TLS Hostname\n try:\n crt = dict(\n subjectAltName=[(\"DNS\", x.decode(\"ascii\", \"strict\")) for x in self.cert.altnames]\n )\n if self.cert.cn:\n crt[\"subject\"] = [[[\"commonName\", self.cert.cn.decode(\"ascii\", \"strict\")]]]\n if sni:\n hostname = sni\n else:\n hostname = \"no-hostname\"\n ssl_match_hostname.match_hostname(crt, hostname)\n except (ValueError, ssl_match_hostname.CertificateError) as e:\n self.ssl_verification_error = exceptions.InvalidCertificateException(\n \"Certificate Verification Error for {}: {}\".format(\n sni or repr(self.address),\n str(e)\n )\n )\n if verification_mode == SSL.VERIFY_PEER:\n raise self.ssl_verification_error\n\n self.ssl_established = True\n self.rfile.set_descriptor(self.connection)\n self.wfile.set_descriptor(self.connection)\n\n def makesocket(self):\n # some parties (cuckoo sandbox) need to hook this\n return socket.socket(self.address.family, socket.SOCK_STREAM)\n\n def connect(self):\n try:\n connection = self.makesocket()\n\n if self.spoof_source_address:\n try:\n # 19 is `IP_TRANSPARENT`, which is only available on Python 3.3+ on some OSes\n if not connection.getsockopt(socket.SOL_IP, 19):\n connection.setsockopt(socket.SOL_IP, 19, 1)\n except socket.error as e:\n raise exceptions.TcpException(\n \"Failed to spoof the source address: \" + e.strerror\n )\n if self.source_address:\n connection.bind(self.source_address())\n connection.connect(self.address())\n self.source_address = Address(connection.getsockname())\n except (socket.error, IOError) as err:\n raise exceptions.TcpException(\n 'Error connecting to \"%s\": %s' %\n (self.address.host, err)\n )\n self.connection = connection\n self.ip_address = Address(connection.getpeername())\n self._makefile()\n return ConnectionCloser(self)\n\n def settimeout(self, n):\n self.connection.settimeout(n)\n\n def gettimeout(self):\n return self.connection.gettimeout()\n\n def get_alpn_proto_negotiated(self):\n if HAS_ALPN and self.ssl_established:\n return self.connection.get_alpn_proto_negotiated()\n else:\n return b\"\"\n\n\nclass BaseHandler(_Connection):\n\n \"\"\"\n The instantiator is expected to call the handle() and finish() methods.\n \"\"\"\n\n def __init__(self, connection, address, server):\n super().__init__(connection)\n self.address = Address.wrap(address)\n self.server = server\n self.clientcert = None\n\n def create_ssl_context(self,\n cert, key,\n handle_sni=None,\n request_client_cert=None,\n chain_file=None,\n dhparams=None,\n extra_chain_certs=None,\n **sslctx_kwargs):\n \"\"\"\n cert: A certs.SSLCert object or the path to a certificate\n chain file.\n\n handle_sni: SNI handler, should take a connection object. Server\n name can be retrieved like this:\n\n connection.get_servername()\n\n And you can specify the connection keys as follows:\n\n new_context = Context(TLSv1_METHOD)\n new_context.use_privatekey(key)\n new_context.use_certificate(cert)\n connection.set_context(new_context)\n\n The request_client_cert argument requires some explanation. We're\n supposed to be able to do this with no negative effects - if the\n client has no cert to present, we're notified and proceed as usual.\n Unfortunately, Android seems to have a bug (tested on 4.2.2) - when\n an Android client is asked to present a certificate it does not\n have, it hangs up, which is frankly bogus. Some time down the track\n we may be able to make the proper behaviour the default again, but\n until then we're conservative.\n \"\"\"\n\n context = self._create_ssl_context(ca_pemfile=chain_file, **sslctx_kwargs)\n\n context.use_privatekey(key)\n if isinstance(cert, certs.SSLCert):\n context.use_certificate(cert.x509)\n else:\n context.use_certificate_chain_file(cert)\n\n if extra_chain_certs:\n for i in extra_chain_certs:\n context.add_extra_chain_cert(i.x509)\n\n if handle_sni:\n # SNI callback happens during do_handshake()\n context.set_tlsext_servername_callback(handle_sni)\n\n if request_client_cert:\n def save_cert(conn_, cert, errno_, depth_, preverify_ok_):\n self.clientcert = certs.SSLCert(cert)\n # Return true to prevent cert verification error\n return True\n context.set_verify(SSL.VERIFY_PEER, save_cert)\n\n if dhparams:\n SSL._lib.SSL_CTX_set_tmp_dh(context._context, dhparams)\n\n return context\n\n def convert_to_ssl(self, cert, key, **sslctx_kwargs):\n \"\"\"\n Convert connection to SSL.\n For a list of parameters, see BaseHandler._create_ssl_context(...)\n \"\"\"\n\n context = self.create_ssl_context(\n cert,\n key,\n **sslctx_kwargs)\n self.connection = SSL.Connection(context, self.connection)\n self.connection.set_accept_state()\n try:\n self.connection.do_handshake()\n except SSL.Error as v:\n raise exceptions.TlsException(\"SSL handshake error: %s\" % repr(v))\n self.ssl_established = True\n self.rfile.set_descriptor(self.connection)\n self.wfile.set_descriptor(self.connection)\n\n def handle(self): # pragma: no cover\n raise NotImplementedError\n\n def settimeout(self, n):\n self.connection.settimeout(n)\n\n def get_alpn_proto_negotiated(self):\n if HAS_ALPN and self.ssl_established:\n return self.connection.get_alpn_proto_negotiated()\n else:\n return b\"\"\n\n\nclass Counter:\n def __init__(self):\n self._count = 0\n self._lock = threading.Lock()\n\n @property\n def count(self):\n with self._lock:\n return self._count\n\n def __enter__(self):\n with self._lock:\n self._count += 1\n\n def __exit__(self, *args):\n with self._lock:\n self._count -= 1\n\n\nclass TCPServer:\n request_queue_size = 20\n\n def __init__(self, address):\n self.address = Address.wrap(address)\n self.__is_shut_down = threading.Event()\n self.__shutdown_request = False\n self.socket = socket.socket(self.address.family, socket.SOCK_STREAM)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.socket.bind(self.address())\n self.address = Address.wrap(self.socket.getsockname())\n self.socket.listen(self.request_queue_size)\n self.handler_counter = Counter()\n\n def connection_thread(self, connection, client_address):\n with self.handler_counter:\n client_address = Address(client_address)\n try:\n self.handle_client_connection(connection, client_address)\n except:\n self.handle_error(connection, client_address)\n finally:\n close_socket(connection)\n\n def serve_forever(self, poll_interval=0.1):\n self.__is_shut_down.clear()\n try:\n while not self.__shutdown_request:\n try:\n r, w_, e_ = select.select(\n [self.socket], [], [], poll_interval)\n except select.error as ex: # pragma: no cover\n if ex[0] == EINTR:\n continue\n else:\n raise\n if self.socket in r:\n connection, client_address = self.socket.accept()\n t = basethread.BaseThread(\n \"TCPConnectionHandler (%s: %s:%s -> %s:%s)\" % (\n self.__class__.__name__,\n client_address[0],\n client_address[1],\n self.address.host,\n self.address.port\n ),\n target=self.connection_thread,\n args=(connection, client_address),\n )\n t.setDaemon(1)\n try:\n t.start()\n except threading.ThreadError:\n self.handle_error(connection, Address(client_address))\n connection.close()\n finally:\n self.__shutdown_request = False\n self.__is_shut_down.set()\n\n def shutdown(self):\n self.__shutdown_request = True\n self.__is_shut_down.wait()\n self.socket.close()\n self.handle_shutdown()\n\n def handle_error(self, connection_, client_address, fp=sys.stderr):\n \"\"\"\n Called when handle_client_connection raises an exception.\n \"\"\"\n # If a thread has persisted after interpreter exit, the module might be\n # none.\n if traceback:\n exc = str(traceback.format_exc())\n print(u'-' * 40, file=fp)\n print(\n u\"Error in processing of request from %s\" % repr(client_address), file=fp)\n print(exc, file=fp)\n print(u'-' * 40, file=fp)\n\n def handle_client_connection(self, conn, client_address): # pragma: no cover\n \"\"\"\n Called after client connection.\n \"\"\"\n raise NotImplementedError\n\n def handle_shutdown(self):\n \"\"\"\n Called after server shutdown.\n \"\"\"\n\n def wait_for_silence(self, timeout=5):\n start = time.time()\n while 1:\n if time.time() - start >= timeout:\n raise exceptions.Timeout(\n \"%s service threads still alive\" %\n self.handler_counter.count\n )\n if self.handler_counter.count == 0:\n return\n", "path": "mitmproxy/net/tcp.py" } ]
[ { "content": "import os\nimport select\nimport socket\nimport sys\nimport threading\nimport time\nimport traceback\n\nimport binascii\n\nfrom typing import Optional # noqa\n\nfrom mitmproxy.utils import strutils\n\nimport certifi\nfrom backports import ssl_match_hostname\nimport OpenSSL\nfrom OpenSSL import SSL\n\nfrom mitmproxy import certs\nfrom mitmproxy.utils import version_check\nfrom mitmproxy.types import serializable\nfrom mitmproxy import exceptions\nfrom mitmproxy.types import basethread\n\n# This is a rather hackish way to make sure that\n# the latest version of pyOpenSSL is actually installed.\nversion_check.check_pyopenssl_version()\n\nsocket_fileobject = socket.SocketIO\n\nEINTR = 4\nHAS_ALPN = SSL._lib.Cryptography_HAS_ALPN\n\n# To enable all SSL methods use: SSLv23\n# then add options to disable certain methods\n# https://bugs.launchpad.net/pyopenssl/+bug/1020632/comments/3\nSSL_BASIC_OPTIONS = (\n SSL.OP_CIPHER_SERVER_PREFERENCE\n)\nif hasattr(SSL, \"OP_NO_COMPRESSION\"):\n SSL_BASIC_OPTIONS |= SSL.OP_NO_COMPRESSION\n\nSSL_DEFAULT_METHOD = SSL.SSLv23_METHOD\nSSL_DEFAULT_OPTIONS = (\n SSL.OP_NO_SSLv2 |\n SSL.OP_NO_SSLv3 |\n SSL_BASIC_OPTIONS\n)\nif hasattr(SSL, \"OP_NO_COMPRESSION\"):\n SSL_DEFAULT_OPTIONS |= SSL.OP_NO_COMPRESSION\n\n\"\"\"\nMap a reasonable SSL version specification into the format OpenSSL expects.\nDon't ask...\nhttps://bugs.launchpad.net/pyopenssl/+bug/1020632/comments/3\n\"\"\"\nsslversion_choices = {\n \"all\": (SSL.SSLv23_METHOD, SSL_BASIC_OPTIONS),\n # SSLv23_METHOD + NO_SSLv2 + NO_SSLv3 == TLS 1.0+\n # TLSv1_METHOD would be TLS 1.0 only\n \"secure\": (SSL.SSLv23_METHOD, (SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3 | SSL_BASIC_OPTIONS)),\n \"SSLv2\": (SSL.SSLv2_METHOD, SSL_BASIC_OPTIONS),\n \"SSLv3\": (SSL.SSLv3_METHOD, SSL_BASIC_OPTIONS),\n \"TLSv1\": (SSL.TLSv1_METHOD, SSL_BASIC_OPTIONS),\n \"TLSv1_1\": (SSL.TLSv1_1_METHOD, SSL_BASIC_OPTIONS),\n \"TLSv1_2\": (SSL.TLSv1_2_METHOD, SSL_BASIC_OPTIONS),\n}\n\nssl_method_names = {\n SSL.SSLv2_METHOD: \"SSLv2\",\n SSL.SSLv3_METHOD: \"SSLv3\",\n SSL.SSLv23_METHOD: \"SSLv23\",\n SSL.TLSv1_METHOD: \"TLSv1\",\n SSL.TLSv1_1_METHOD: \"TLSv1.1\",\n SSL.TLSv1_2_METHOD: \"TLSv1.2\",\n}\n\n\nclass SSLKeyLogger:\n\n def __init__(self, filename):\n self.filename = filename\n self.f = None\n self.lock = threading.Lock()\n\n # required for functools.wraps, which pyOpenSSL uses.\n __name__ = \"SSLKeyLogger\"\n\n def __call__(self, connection, where, ret):\n if where == SSL.SSL_CB_HANDSHAKE_DONE and ret == 1:\n with self.lock:\n if not self.f:\n d = os.path.dirname(self.filename)\n if not os.path.isdir(d):\n os.makedirs(d)\n self.f = open(self.filename, \"ab\")\n self.f.write(b\"\\r\\n\")\n client_random = binascii.hexlify(connection.client_random())\n masterkey = binascii.hexlify(connection.master_key())\n self.f.write(b\"CLIENT_RANDOM %s %s\\r\\n\" % (client_random, masterkey))\n self.f.flush()\n\n def close(self):\n with self.lock:\n if self.f:\n self.f.close()\n\n @staticmethod\n def create_logfun(filename):\n if filename:\n return SSLKeyLogger(filename)\n return False\n\n\nlog_ssl_key = SSLKeyLogger.create_logfun(\n os.getenv(\"MITMPROXY_SSLKEYLOGFILE\") or os.getenv(\"SSLKEYLOGFILE\"))\n\n\nclass _FileLike:\n BLOCKSIZE = 1024 * 32\n\n def __init__(self, o):\n self.o = o\n self._log = None\n self.first_byte_timestamp = None\n\n def set_descriptor(self, o):\n self.o = o\n\n def __getattr__(self, attr):\n return getattr(self.o, attr)\n\n def start_log(self):\n \"\"\"\n Starts or resets the log.\n\n This will store all bytes read or written.\n \"\"\"\n self._log = []\n\n def stop_log(self):\n \"\"\"\n Stops the log.\n \"\"\"\n self._log = None\n\n def is_logging(self):\n return self._log is not None\n\n def get_log(self):\n \"\"\"\n Returns the log as a string.\n \"\"\"\n if not self.is_logging():\n raise ValueError(\"Not logging!\")\n return b\"\".join(self._log)\n\n def add_log(self, v):\n if self.is_logging():\n self._log.append(v)\n\n def reset_timestamps(self):\n self.first_byte_timestamp = None\n\n\nclass Writer(_FileLike):\n\n def flush(self):\n \"\"\"\n May raise exceptions.TcpDisconnect\n \"\"\"\n if hasattr(self.o, \"flush\"):\n try:\n self.o.flush()\n except (socket.error, IOError) as v:\n raise exceptions.TcpDisconnect(str(v))\n\n def write(self, v):\n \"\"\"\n May raise exceptions.TcpDisconnect\n \"\"\"\n if v:\n self.first_byte_timestamp = self.first_byte_timestamp or time.time()\n try:\n if hasattr(self.o, \"sendall\"):\n self.add_log(v)\n return self.o.sendall(v)\n else:\n r = self.o.write(v)\n self.add_log(v[:r])\n return r\n except (SSL.Error, socket.error) as e:\n raise exceptions.TcpDisconnect(str(e))\n\n\nclass Reader(_FileLike):\n\n def read(self, length):\n \"\"\"\n If length is -1, we read until connection closes.\n \"\"\"\n result = b''\n start = time.time()\n while length == -1 or length > 0:\n if length == -1 or length > self.BLOCKSIZE:\n rlen = self.BLOCKSIZE\n else:\n rlen = length\n try:\n data = self.o.read(rlen)\n except SSL.ZeroReturnError:\n # TLS connection was shut down cleanly\n break\n except (SSL.WantWriteError, SSL.WantReadError):\n # From the OpenSSL docs:\n # If the underlying BIO is non-blocking, SSL_read() will also return when the\n # underlying BIO could not satisfy the needs of SSL_read() to continue the\n # operation. In this case a call to SSL_get_error with the return value of\n # SSL_read() will yield SSL_ERROR_WANT_READ or SSL_ERROR_WANT_WRITE.\n if (time.time() - start) < self.o.gettimeout():\n time.sleep(0.1)\n continue\n else:\n raise exceptions.TcpTimeout()\n except socket.timeout:\n raise exceptions.TcpTimeout()\n except socket.error as e:\n raise exceptions.TcpDisconnect(str(e))\n except SSL.SysCallError as e:\n if e.args == (-1, 'Unexpected EOF'):\n break\n raise exceptions.TlsException(str(e))\n except SSL.Error as e:\n raise exceptions.TlsException(str(e))\n self.first_byte_timestamp = self.first_byte_timestamp or time.time()\n if not data:\n break\n result += data\n if length != -1:\n length -= len(data)\n self.add_log(result)\n return result\n\n def readline(self, size=None):\n result = b''\n bytes_read = 0\n while True:\n if size is not None and bytes_read >= size:\n break\n ch = self.read(1)\n bytes_read += 1\n if not ch:\n break\n else:\n result += ch\n if ch == b'\\n':\n break\n return result\n\n def safe_read(self, length):\n \"\"\"\n Like .read, but is guaranteed to either return length bytes, or\n raise an exception.\n \"\"\"\n result = self.read(length)\n if length != -1 and len(result) != length:\n if not result:\n raise exceptions.TcpDisconnect()\n else:\n raise exceptions.TcpReadIncomplete(\n \"Expected %s bytes, got %s\" % (length, len(result))\n )\n return result\n\n def peek(self, length):\n \"\"\"\n Tries to peek into the underlying file object.\n\n Returns:\n Up to the next N bytes if peeking is successful.\n\n Raises:\n exceptions.TcpException if there was an error with the socket\n TlsException if there was an error with pyOpenSSL.\n NotImplementedError if the underlying file object is not a [pyOpenSSL] socket\n \"\"\"\n if isinstance(self.o, socket_fileobject):\n try:\n return self.o._sock.recv(length, socket.MSG_PEEK)\n except socket.error as e:\n raise exceptions.TcpException(repr(e))\n elif isinstance(self.o, SSL.Connection):\n try:\n return self.o.recv(length, socket.MSG_PEEK)\n except SSL.Error as e:\n raise exceptions.TlsException(str(e))\n else:\n raise NotImplementedError(\"Can only peek into (pyOpenSSL) sockets\")\n\n\nclass Address(serializable.Serializable):\n\n \"\"\"\n This class wraps an IPv4/IPv6 tuple to provide named attributes and\n ipv6 information.\n \"\"\"\n\n def __init__(self, address, use_ipv6=False):\n self.address = tuple(address)\n self.use_ipv6 = use_ipv6\n\n def get_state(self):\n return {\n \"address\": self.address,\n \"use_ipv6\": self.use_ipv6\n }\n\n def set_state(self, state):\n self.address = state[\"address\"]\n self.use_ipv6 = state[\"use_ipv6\"]\n\n @classmethod\n def from_state(cls, state):\n return Address(**state)\n\n @classmethod\n def wrap(cls, t):\n if isinstance(t, cls):\n return t\n else:\n return cls(t)\n\n def __call__(self):\n return self.address\n\n @property\n def host(self):\n return self.address[0]\n\n @property\n def port(self):\n return self.address[1]\n\n @property\n def use_ipv6(self):\n return self.family == socket.AF_INET6\n\n @use_ipv6.setter\n def use_ipv6(self, b):\n self.family = socket.AF_INET6 if b else socket.AF_INET\n\n def __repr__(self):\n return \"{}:{}\".format(self.host, self.port)\n\n def __eq__(self, other):\n if not other:\n return False\n other = Address.wrap(other)\n return (self.address, self.family) == (other.address, other.family)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash(self.address) ^ 42 # different hash than the tuple alone.\n\n\ndef ssl_read_select(rlist, timeout):\n \"\"\"\n This is a wrapper around select.select() which also works for SSL.Connections\n by taking ssl_connection.pending() into account.\n\n Caveats:\n If .pending() > 0 for any of the connections in rlist, we avoid the select syscall\n and **will not include any other connections which may or may not be ready**.\n\n Args:\n rlist: wait until ready for reading\n\n Returns:\n subset of rlist which is ready for reading.\n \"\"\"\n return [\n conn for conn in rlist\n if isinstance(conn, SSL.Connection) and conn.pending() > 0\n ] or select.select(rlist, (), (), timeout)[0]\n\n\ndef close_socket(sock):\n \"\"\"\n Does a hard close of a socket, without emitting a RST.\n \"\"\"\n try:\n # We already indicate that we close our end.\n # may raise \"Transport endpoint is not connected\" on Linux\n sock.shutdown(socket.SHUT_WR)\n\n # Section 4.2.2.13 of RFC 1122 tells us that a close() with any pending\n # readable data could lead to an immediate RST being sent (which is the\n # case on Windows).\n # http://ia600609.us.archive.org/22/items/TheUltimateSo_lingerPageOrWhyIsMyTcpNotReliable/the-ultimate-so_linger-page-or-why-is-my-tcp-not-reliable.html\n #\n # This in turn results in the following issue: If we send an error page\n # to the client and then close the socket, the RST may be received by\n # the client before the error page and the users sees a connection\n # error rather than the error page. Thus, we try to empty the read\n # buffer on Windows first. (see\n # https://github.com/mitmproxy/mitmproxy/issues/527#issuecomment-93782988)\n #\n\n if os.name == \"nt\": # pragma: no cover\n # We cannot rely on the shutdown()-followed-by-read()-eof technique\n # proposed by the page above: Some remote machines just don't send\n # a TCP FIN, which would leave us in the unfortunate situation that\n # recv() would block infinitely. As a workaround, we set a timeout\n # here even if we are in blocking mode.\n sock.settimeout(sock.gettimeout() or 20)\n\n # limit at a megabyte so that we don't read infinitely\n for _ in range(1024 ** 3 // 4096):\n # may raise a timeout/disconnect exception.\n if not sock.recv(4096):\n break\n\n # Now we can close the other half as well.\n sock.shutdown(socket.SHUT_RD)\n\n except socket.error:\n pass\n\n sock.close()\n\n\nclass _Connection:\n\n rbufsize = -1\n wbufsize = -1\n\n def _makefile(self):\n \"\"\"\n Set up .rfile and .wfile attributes from .connection\n \"\"\"\n # Ideally, we would use the Buffered IO in Python 3 by default.\n # Unfortunately, the implementation of .peek() is broken for n>1 bytes,\n # as it may just return what's left in the buffer and not all the bytes we want.\n # As a workaround, we just use unbuffered sockets directly.\n # https://mail.python.org/pipermail/python-dev/2009-June/089986.html\n self.rfile = Reader(socket.SocketIO(self.connection, \"rb\"))\n self.wfile = Writer(socket.SocketIO(self.connection, \"wb\"))\n\n def __init__(self, connection):\n if connection:\n self.connection = connection\n self.ip_address = Address(connection.getpeername())\n self._makefile()\n else:\n self.connection = None\n self.ip_address = None\n self.rfile = None\n self.wfile = None\n\n self.ssl_established = False\n self.finished = False\n\n def get_current_cipher(self):\n if not self.ssl_established:\n return None\n\n name = self.connection.get_cipher_name()\n bits = self.connection.get_cipher_bits()\n version = self.connection.get_cipher_version()\n return name, bits, version\n\n def finish(self):\n self.finished = True\n # If we have an SSL connection, wfile.close == connection.close\n # (We call _FileLike.set_descriptor(conn))\n # Closing the socket is not our task, therefore we don't call close\n # then.\n if not isinstance(self.connection, SSL.Connection):\n if not getattr(self.wfile, \"closed\", False):\n try:\n self.wfile.flush()\n self.wfile.close()\n except exceptions.TcpDisconnect:\n pass\n\n self.rfile.close()\n else:\n try:\n self.connection.shutdown()\n except SSL.Error:\n pass\n\n def _create_ssl_context(self,\n method=SSL_DEFAULT_METHOD,\n options=SSL_DEFAULT_OPTIONS,\n verify_options=SSL.VERIFY_NONE,\n ca_path=None,\n ca_pemfile=None,\n cipher_list=None,\n alpn_protos=None,\n alpn_select=None,\n alpn_select_callback=None,\n sni=None,\n ):\n \"\"\"\n Creates an SSL Context.\n\n :param method: One of SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD, TLSv1_1_METHOD, or TLSv1_2_METHOD\n :param options: A bit field consisting of OpenSSL.SSL.OP_* values\n :param verify_options: A bit field consisting of OpenSSL.SSL.VERIFY_* values\n :param ca_path: Path to a directory of trusted CA certificates prepared using the c_rehash tool\n :param ca_pemfile: Path to a PEM formatted trusted CA certificate\n :param cipher_list: A textual OpenSSL cipher list, see https://www.openssl.org/docs/apps/ciphers.html\n :rtype : SSL.Context\n \"\"\"\n try:\n context = SSL.Context(method)\n except ValueError as e:\n method_name = ssl_method_names.get(method, \"unknown\")\n raise exceptions.TlsException(\n \"SSL method \\\"%s\\\" is most likely not supported \"\n \"or disabled (for security reasons) in your libssl. \"\n \"Please refer to https://github.com/mitmproxy/mitmproxy/issues/1101 \"\n \"for more details.\" % method_name\n )\n\n # Options (NO_SSLv2/3)\n if options is not None:\n context.set_options(options)\n\n # Verify Options (NONE/PEER and trusted CAs)\n if verify_options is not None:\n def verify_cert(conn, x509, errno, err_depth, is_cert_verified):\n if not is_cert_verified:\n self.ssl_verification_error = exceptions.InvalidCertificateException(\n \"Certificate Verification Error for {}: {} (errno: {}, depth: {})\".format(\n sni,\n strutils.native(SSL._ffi.string(SSL._lib.X509_verify_cert_error_string(errno)), \"utf8\"),\n errno,\n err_depth\n )\n )\n return is_cert_verified\n\n context.set_verify(verify_options, verify_cert)\n if ca_path is None and ca_pemfile is None:\n ca_pemfile = certifi.where()\n try:\n context.load_verify_locations(ca_pemfile, ca_path)\n except SSL.Error:\n raise exceptions.TlsException(\n \"Cannot load trusted certificates ({}, {}).\".format(\n ca_pemfile, ca_path\n )\n )\n\n # Workaround for\n # https://github.com/pyca/pyopenssl/issues/190\n # https://github.com/mitmproxy/mitmproxy/issues/472\n # Options already set before are not cleared.\n context.set_mode(SSL._lib.SSL_MODE_AUTO_RETRY)\n\n # Cipher List\n if cipher_list:\n try:\n context.set_cipher_list(cipher_list)\n\n # TODO: maybe change this to with newer pyOpenSSL APIs\n context.set_tmp_ecdh(OpenSSL.crypto.get_elliptic_curve('prime256v1'))\n except SSL.Error as v:\n raise exceptions.TlsException(\"SSL cipher specification error: %s\" % str(v))\n\n # SSLKEYLOGFILE\n if log_ssl_key:\n context.set_info_callback(log_ssl_key)\n\n if HAS_ALPN:\n if alpn_protos is not None:\n # advertise application layer protocols\n context.set_alpn_protos(alpn_protos)\n elif alpn_select is not None and alpn_select_callback is None:\n # select application layer protocol\n def alpn_select_callback(conn_, options):\n if alpn_select in options:\n return bytes(alpn_select)\n else: # pragma no cover\n return options[0]\n context.set_alpn_select_callback(alpn_select_callback)\n elif alpn_select_callback is not None and alpn_select is None:\n context.set_alpn_select_callback(alpn_select_callback)\n elif alpn_select_callback is not None and alpn_select is not None:\n raise exceptions.TlsException(\"ALPN error: only define alpn_select (string) OR alpn_select_callback (method).\")\n\n return context\n\n\nclass ConnectionCloser:\n def __init__(self, conn):\n self.conn = conn\n self._canceled = False\n\n def pop(self):\n \"\"\"\n Cancel the current closer, and return a fresh one.\n \"\"\"\n self._canceled = True\n return ConnectionCloser(self.conn)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n if not self._canceled:\n self.conn.close()\n\n\nclass TCPClient(_Connection):\n\n def __init__(self, address, source_address=None, spoof_source_address=None):\n super().__init__(None)\n self.address = address\n self.source_address = source_address\n self.cert = None\n self.server_certs = []\n self.ssl_verification_error = None # type: Optional[exceptions.InvalidCertificateException]\n self.sni = None\n self.spoof_source_address = spoof_source_address\n\n @property\n def address(self):\n return self.__address\n\n @address.setter\n def address(self, address):\n if address:\n self.__address = Address.wrap(address)\n else:\n self.__address = None\n\n @property\n def source_address(self):\n return self.__source_address\n\n @source_address.setter\n def source_address(self, source_address):\n if source_address:\n self.__source_address = Address.wrap(source_address)\n else:\n self.__source_address = None\n\n def close(self):\n # Make sure to close the real socket, not the SSL proxy.\n # OpenSSL is really good at screwing up, i.e. when trying to recv from a failed connection,\n # it tries to renegotiate...\n if isinstance(self.connection, SSL.Connection):\n close_socket(self.connection._socket)\n else:\n close_socket(self.connection)\n\n def create_ssl_context(self, cert=None, alpn_protos=None, **sslctx_kwargs):\n context = self._create_ssl_context(\n alpn_protos=alpn_protos,\n **sslctx_kwargs)\n # Client Certs\n if cert:\n try:\n context.use_privatekey_file(cert)\n context.use_certificate_file(cert)\n except SSL.Error as v:\n raise exceptions.TlsException(\"SSL client certificate error: %s\" % str(v))\n return context\n\n def convert_to_ssl(self, sni=None, alpn_protos=None, **sslctx_kwargs):\n \"\"\"\n cert: Path to a file containing both client cert and private key.\n\n options: A bit field consisting of OpenSSL.SSL.OP_* values\n verify_options: A bit field consisting of OpenSSL.SSL.VERIFY_* values\n ca_path: Path to a directory of trusted CA certificates prepared using the c_rehash tool\n ca_pemfile: Path to a PEM formatted trusted CA certificate\n \"\"\"\n verification_mode = sslctx_kwargs.get('verify_options', None)\n if verification_mode == SSL.VERIFY_PEER and not sni:\n raise exceptions.TlsException(\"Cannot validate certificate hostname without SNI\")\n\n context = self.create_ssl_context(\n alpn_protos=alpn_protos,\n sni=sni,\n **sslctx_kwargs\n )\n self.connection = SSL.Connection(context, self.connection)\n if sni:\n self.sni = sni\n self.connection.set_tlsext_host_name(sni.encode(\"idna\"))\n self.connection.set_connect_state()\n try:\n self.connection.do_handshake()\n except SSL.Error as v:\n if self.ssl_verification_error:\n raise self.ssl_verification_error\n else:\n raise exceptions.TlsException(\"SSL handshake error: %s\" % repr(v))\n else:\n # Fix for pre v1.0 OpenSSL, which doesn't throw an exception on\n # certificate validation failure\n if verification_mode == SSL.VERIFY_PEER and self.ssl_verification_error:\n raise self.ssl_verification_error\n\n self.cert = certs.SSLCert(self.connection.get_peer_certificate())\n\n # Keep all server certificates in a list\n for i in self.connection.get_peer_cert_chain():\n self.server_certs.append(certs.SSLCert(i))\n\n # Validate TLS Hostname\n try:\n crt = dict(\n subjectAltName=[(\"DNS\", x.decode(\"ascii\", \"strict\")) for x in self.cert.altnames]\n )\n if self.cert.cn:\n crt[\"subject\"] = [[[\"commonName\", self.cert.cn.decode(\"ascii\", \"strict\")]]]\n if sni:\n hostname = sni\n else:\n hostname = \"no-hostname\"\n ssl_match_hostname.match_hostname(crt, hostname)\n except (ValueError, ssl_match_hostname.CertificateError) as e:\n self.ssl_verification_error = exceptions.InvalidCertificateException(\n \"Certificate Verification Error for {}: {}\".format(\n sni or repr(self.address),\n str(e)\n )\n )\n if verification_mode == SSL.VERIFY_PEER:\n raise self.ssl_verification_error\n\n self.ssl_established = True\n self.rfile.set_descriptor(self.connection)\n self.wfile.set_descriptor(self.connection)\n\n def makesocket(self):\n # some parties (cuckoo sandbox) need to hook this\n return socket.socket(self.address.family, socket.SOCK_STREAM)\n\n def connect(self):\n try:\n connection = self.makesocket()\n\n if self.spoof_source_address:\n try:\n # 19 is `IP_TRANSPARENT`, which is only available on Python 3.3+ on some OSes\n if not connection.getsockopt(socket.SOL_IP, 19):\n connection.setsockopt(socket.SOL_IP, 19, 1)\n except socket.error as e:\n raise exceptions.TcpException(\n \"Failed to spoof the source address: \" + e.strerror\n )\n if self.source_address:\n connection.bind(self.source_address())\n connection.connect(self.address())\n self.source_address = Address(connection.getsockname())\n except (socket.error, IOError) as err:\n raise exceptions.TcpException(\n 'Error connecting to \"%s\": %s' %\n (self.address.host, err)\n )\n self.connection = connection\n self.ip_address = Address(connection.getpeername())\n self._makefile()\n return ConnectionCloser(self)\n\n def settimeout(self, n):\n self.connection.settimeout(n)\n\n def gettimeout(self):\n return self.connection.gettimeout()\n\n def get_alpn_proto_negotiated(self):\n if HAS_ALPN and self.ssl_established:\n return self.connection.get_alpn_proto_negotiated()\n else:\n return b\"\"\n\n\nclass BaseHandler(_Connection):\n\n \"\"\"\n The instantiator is expected to call the handle() and finish() methods.\n \"\"\"\n\n def __init__(self, connection, address, server):\n super().__init__(connection)\n self.address = Address.wrap(address)\n self.server = server\n self.clientcert = None\n\n def create_ssl_context(self,\n cert, key,\n handle_sni=None,\n request_client_cert=None,\n chain_file=None,\n dhparams=None,\n extra_chain_certs=None,\n **sslctx_kwargs):\n \"\"\"\n cert: A certs.SSLCert object or the path to a certificate\n chain file.\n\n handle_sni: SNI handler, should take a connection object. Server\n name can be retrieved like this:\n\n connection.get_servername()\n\n And you can specify the connection keys as follows:\n\n new_context = Context(TLSv1_METHOD)\n new_context.use_privatekey(key)\n new_context.use_certificate(cert)\n connection.set_context(new_context)\n\n The request_client_cert argument requires some explanation. We're\n supposed to be able to do this with no negative effects - if the\n client has no cert to present, we're notified and proceed as usual.\n Unfortunately, Android seems to have a bug (tested on 4.2.2) - when\n an Android client is asked to present a certificate it does not\n have, it hangs up, which is frankly bogus. Some time down the track\n we may be able to make the proper behaviour the default again, but\n until then we're conservative.\n \"\"\"\n\n context = self._create_ssl_context(ca_pemfile=chain_file, **sslctx_kwargs)\n\n context.use_privatekey(key)\n if isinstance(cert, certs.SSLCert):\n context.use_certificate(cert.x509)\n else:\n context.use_certificate_chain_file(cert)\n\n if extra_chain_certs:\n for i in extra_chain_certs:\n context.add_extra_chain_cert(i.x509)\n\n if handle_sni:\n # SNI callback happens during do_handshake()\n context.set_tlsext_servername_callback(handle_sni)\n\n if request_client_cert:\n def save_cert(conn_, cert, errno_, depth_, preverify_ok_):\n self.clientcert = certs.SSLCert(cert)\n # Return true to prevent cert verification error\n return True\n context.set_verify(SSL.VERIFY_PEER, save_cert)\n\n if dhparams:\n SSL._lib.SSL_CTX_set_tmp_dh(context._context, dhparams)\n\n return context\n\n def convert_to_ssl(self, cert, key, **sslctx_kwargs):\n \"\"\"\n Convert connection to SSL.\n For a list of parameters, see BaseHandler._create_ssl_context(...)\n \"\"\"\n\n context = self.create_ssl_context(\n cert,\n key,\n **sslctx_kwargs)\n self.connection = SSL.Connection(context, self.connection)\n self.connection.set_accept_state()\n try:\n self.connection.do_handshake()\n except SSL.Error as v:\n raise exceptions.TlsException(\"SSL handshake error: %s\" % repr(v))\n self.ssl_established = True\n self.rfile.set_descriptor(self.connection)\n self.wfile.set_descriptor(self.connection)\n\n def handle(self): # pragma: no cover\n raise NotImplementedError\n\n def settimeout(self, n):\n self.connection.settimeout(n)\n\n def get_alpn_proto_negotiated(self):\n if HAS_ALPN and self.ssl_established:\n return self.connection.get_alpn_proto_negotiated()\n else:\n return b\"\"\n\n\nclass Counter:\n def __init__(self):\n self._count = 0\n self._lock = threading.Lock()\n\n @property\n def count(self):\n with self._lock:\n return self._count\n\n def __enter__(self):\n with self._lock:\n self._count += 1\n\n def __exit__(self, *args):\n with self._lock:\n self._count -= 1\n\n\nclass TCPServer:\n request_queue_size = 20\n\n def __init__(self, address):\n self.address = Address.wrap(address)\n self.__is_shut_down = threading.Event()\n self.__shutdown_request = False\n self.socket = socket.socket(self.address.family, socket.SOCK_STREAM)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.socket.bind(self.address())\n self.address = Address.wrap(self.socket.getsockname())\n self.socket.listen(self.request_queue_size)\n self.handler_counter = Counter()\n\n def connection_thread(self, connection, client_address):\n with self.handler_counter:\n client_address = Address(client_address)\n try:\n self.handle_client_connection(connection, client_address)\n except:\n self.handle_error(connection, client_address)\n finally:\n close_socket(connection)\n\n def serve_forever(self, poll_interval=0.1):\n self.__is_shut_down.clear()\n try:\n while not self.__shutdown_request:\n try:\n r, w_, e_ = select.select(\n [self.socket], [], [], poll_interval)\n except select.error as ex: # pragma: no cover\n if ex[0] == EINTR:\n continue\n else:\n raise\n if self.socket in r:\n connection, client_address = self.socket.accept()\n t = basethread.BaseThread(\n \"TCPConnectionHandler (%s: %s:%s -> %s:%s)\" % (\n self.__class__.__name__,\n client_address[0],\n client_address[1],\n self.address.host,\n self.address.port\n ),\n target=self.connection_thread,\n args=(connection, client_address),\n )\n t.setDaemon(1)\n try:\n t.start()\n except threading.ThreadError:\n self.handle_error(connection, Address(client_address))\n connection.close()\n finally:\n self.__shutdown_request = False\n self.__is_shut_down.set()\n\n def shutdown(self):\n self.__shutdown_request = True\n self.__is_shut_down.wait()\n self.socket.close()\n self.handle_shutdown()\n\n def handle_error(self, connection_, client_address, fp=sys.stderr):\n \"\"\"\n Called when handle_client_connection raises an exception.\n \"\"\"\n # If a thread has persisted after interpreter exit, the module might be\n # none.\n if traceback:\n exc = str(traceback.format_exc())\n print(u'-' * 40, file=fp)\n print(\n u\"Error in processing of request from %s\" % repr(client_address), file=fp)\n print(exc, file=fp)\n print(u'-' * 40, file=fp)\n\n def handle_client_connection(self, conn, client_address): # pragma: no cover\n \"\"\"\n Called after client connection.\n \"\"\"\n raise NotImplementedError\n\n def handle_shutdown(self):\n \"\"\"\n Called after server shutdown.\n \"\"\"\n\n def wait_for_silence(self, timeout=5):\n start = time.time()\n while 1:\n if time.time() - start >= timeout:\n raise exceptions.Timeout(\n \"%s service threads still alive\" %\n self.handler_counter.count\n )\n if self.handler_counter.count == 0:\n return\n", "path": "mitmproxy/net/tcp.py" } ]
diff --git a/.travis.yml b/.travis.yml index 0df3289967..c078e30ac0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,15 +1,6 @@ sudo: false language: python -addons: - apt: - sources: - # Debian sid currently holds OpenSSL 1.0.2 - # change this with future releases! - - debian-sid - packages: - - libssl-dev - env: global: - CI_DEPS=codecov>=2.0.5 @@ -25,9 +16,21 @@ matrix: language: generic env: TOXENV=py35 BDIST=1 - python: 3.5 - env: TOXENV=py35 BDIST=1 + env: TOXENV=py35 OPENSSL_OLD + addons: + apt: + packages: + - libssl-dev - python: 3.5 - env: TOXENV=py35 NO_ALPN=1 + env: TOXENV=py35 BDIST=1 OPENSSL_ALPN + addons: + apt: + sources: + # Debian sid currently holds OpenSSL 1.1.0 + # change this with future releases! + - debian-sid + packages: + - libssl-dev - python: 3.5 env: TOXENV=docs git: @@ -39,10 +42,8 @@ install: - | if [[ $TRAVIS_OS_NAME == "osx" ]] then - brew update || brew update # try again if it fails - brew upgrade - brew reinstall openssl - brew reinstall pyenv + brew update || brew update + brew outdated pyenv || brew upgrade pyenv eval "$(pyenv init -)" env PYTHON_CONFIGURE_OPTS="--enable-framework" pyenv install --skip-existing 3.5.2 pyenv global 3.5.2 @@ -52,8 +53,8 @@ install: - pip install tox script: + - tox -- --cov mitmproxy --cov pathod -v - | - tox -- --cov mitmproxy --cov pathod -v if [[ $BDIST == "1" ]] then git fetch --unshallow --tags @@ -80,3 +81,4 @@ cache: directories: - $HOME/.pyenv - $HOME/.cache/pip + # - $HOME/build/mitmproxy/mitmproxy/.tox diff --git a/mitmproxy/net/tcp.py b/mitmproxy/net/tcp.py index ac78e70d40..11cabf07e1 100644 --- a/mitmproxy/net/tcp.py +++ b/mitmproxy/net/tcp.py @@ -30,10 +30,7 @@ socket_fileobject = socket.SocketIO EINTR = 4 -if os.environ.get("NO_ALPN"): - HAS_ALPN = False -else: - HAS_ALPN = SSL._lib.Cryptography_HAS_ALPN +HAS_ALPN = SSL._lib.Cryptography_HAS_ALPN # To enable all SSL methods use: SSLv23 # then add options to disable certain methods diff --git a/test/conftest.py b/test/conftest.py new file mode 100644 index 0000000000..3d129ecf70 --- /dev/null +++ b/test/conftest.py @@ -0,0 +1,14 @@ +import pytest +import OpenSSL +import mitmproxy.net.tcp + + +requires_alpn = pytest.mark.skipif( + not mitmproxy.net.tcp.HAS_ALPN, + reason='requires OpenSSL with ALPN support') + + [email protected]() +def disable_alpn(monkeypatch): + monkeypatch.setattr(mitmproxy.net.tcp, 'HAS_ALPN', False) + monkeypatch.setattr(OpenSSL.SSL._lib, 'Cryptography_HAS_ALPN', False) diff --git a/test/mitmproxy/net/test_tcp.py b/test/mitmproxy/net/test_tcp.py index cf3d30f7c8..fe44973bdc 100644 --- a/test/mitmproxy/net/test_tcp.py +++ b/test/mitmproxy/net/test_tcp.py @@ -6,7 +6,7 @@ import os import threading import mock - +import pytest from OpenSSL import SSL from mitmproxy import certs @@ -15,6 +15,7 @@ from mitmproxy import exceptions from . import tservers +from ...conftest import requires_alpn class EchoHandler(tcp.BaseHandler): @@ -526,40 +527,47 @@ def test_timeout(self): tutils.raises(exceptions.TcpTimeout, c.rfile.read, 10) +class TestCryptographyALPN: + + def test_has_alpn(self): + if 'OPENSSL_ALPN' in os.environ: + assert tcp.HAS_ALPN + assert SSL._lib.Cryptography_HAS_ALPN + elif 'OPENSSL_OLD' in os.environ: + assert not tcp.HAS_ALPN + assert not SSL._lib.Cryptography_HAS_ALPN + + class TestALPNClient(tservers.ServerTestBase): handler = ALPNHandler ssl = dict( alpn_select=b"bar" ) - if tcp.HAS_ALPN: - def test_alpn(self): - c = tcp.TCPClient(("127.0.0.1", self.port)) - with c.connect(): - c.convert_to_ssl(alpn_protos=[b"foo", b"bar", b"fasel"]) - assert c.get_alpn_proto_negotiated() == b"bar" - assert c.rfile.readline().strip() == b"bar" - - def test_no_alpn(self): - c = tcp.TCPClient(("127.0.0.1", self.port)) - with c.connect(): - c.convert_to_ssl() - assert c.get_alpn_proto_negotiated() == b"" - assert c.rfile.readline().strip() == b"NONE" + @requires_alpn + @pytest.mark.parametrize('has_alpn,alpn_protos, expected_negotiated, expected_response', [ + (True, [b"foo", b"bar", b"fasel"], b'bar', b'bar'), + (True, [], b'', b'NONE'), + (True, None, b'', b'NONE'), + (False, [b"foo", b"bar", b"fasel"], b'', b'NONE'), + (False, [], b'', b'NONE'), + (False, None, b'', b'NONE'), + ]) + def test_alpn(self, monkeypatch, has_alpn, alpn_protos, expected_negotiated, expected_response): + monkeypatch.setattr(tcp, 'HAS_ALPN', has_alpn) + monkeypatch.setattr(SSL._lib, 'Cryptography_HAS_ALPN', has_alpn) - else: - def test_none_alpn(self): - c = tcp.TCPClient(("127.0.0.1", self.port)) - with c.connect(): - c.convert_to_ssl(alpn_protos=[b"foo", b"bar", b"fasel"]) - assert c.get_alpn_proto_negotiated() == b"" - assert c.rfile.readline() == b"NONE" + c = tcp.TCPClient(("127.0.0.1", self.port)) + with c.connect(): + c.convert_to_ssl(alpn_protos=alpn_protos) + assert c.get_alpn_proto_negotiated() == expected_negotiated + assert c.rfile.readline().strip() == expected_response class TestNoSSLNoALPNClient(tservers.ServerTestBase): handler = ALPNHandler - def test_no_ssl_no_alpn(self): + def test_no_ssl_no_alpn(self, disable_alpn): c = tcp.TCPClient(("127.0.0.1", self.port)) with c.connect(): assert c.get_alpn_proto_negotiated() == b"" diff --git a/test/mitmproxy/protocol/test_http2.py b/test/mitmproxy/protocol/test_http2.py index d135cf0870..8e8ba6448a 100644 --- a/test/mitmproxy/protocol/test_http2.py +++ b/test/mitmproxy/protocol/test_http2.py @@ -1,7 +1,6 @@ # coding=utf-8 -import pytest import os import tempfile import traceback @@ -17,6 +16,7 @@ from mitmproxy.net.http import http1, http2 from .. import tservers +from ...conftest import requires_alpn import logging logging.getLogger("hyper.packages.hpack.hpack").setLevel(logging.WARNING) @@ -27,11 +27,6 @@ logging.getLogger("PIL.PngImagePlugin").setLevel(logging.WARNING) -requires_alpn = pytest.mark.skipif( - not mitmproxy.net.tcp.HAS_ALPN, - reason='requires OpenSSL with ALPN support') - - # inspect the log: # for msg in self.proxy.tmaster.tlog: # print(msg) diff --git a/test/mitmproxy/test_dump.py b/test/mitmproxy/test_dump.py index e331637d9d..c6b15c845c 100644 --- a/test/mitmproxy/test_dump.py +++ b/test/mitmproxy/test_dump.py @@ -51,14 +51,14 @@ def test_error(self): assert "error" in o.tfile.getvalue() def test_replay(self): - o = dump.Options(server_replay=["nonexistent"], replay_kill_extra=True) + o = dump.Options(http2=False, server_replay=["nonexistent"], replay_kill_extra=True) tutils.raises(exceptions.OptionsError, dump.DumpMaster, o, proxy.DummyServer()) with tutils.tmpdir() as t: p = os.path.join(t, "rep") self.flowfile(p) - o = dump.Options(server_replay=[p], replay_kill_extra=True) + o = dump.Options(http2=False, server_replay=[p], replay_kill_extra=True) o.verbosity = 0 o.flow_detail = 0 m = dump.DumpMaster(o, proxy.DummyServer()) @@ -66,13 +66,13 @@ def test_replay(self): self.cycle(m, b"content") self.cycle(m, b"content") - o = dump.Options(server_replay=[p], replay_kill_extra=False) + o = dump.Options(http2=False, server_replay=[p], replay_kill_extra=False) o.verbosity = 0 o.flow_detail = 0 m = dump.DumpMaster(o, proxy.DummyServer()) self.cycle(m, b"nonexistent") - o = dump.Options(client_replay=[p], replay_kill_extra=False) + o = dump.Options(http2=False, client_replay=[p], replay_kill_extra=False) o.verbosity = 0 o.flow_detail = 0 m = dump.DumpMaster(o, proxy.DummyServer()) diff --git a/test/pathod/test_pathoc.py b/test/pathod/test_pathoc.py index 69baae545e..274e2be7f0 100644 --- a/test/pathod/test_pathoc.py +++ b/test/pathod/test_pathoc.py @@ -1,8 +1,8 @@ import io from mock import Mock +import pytest from mitmproxy.net import http -from mitmproxy.net import tcp from mitmproxy.net.http import http1 from mitmproxy import exceptions @@ -11,6 +11,7 @@ from mitmproxy.test import tutils from . import tservers +from ..conftest import requires_alpn def test_response(): @@ -211,45 +212,57 @@ class TestDaemonHTTP2(PathocTestDaemon): ssl = True explain = False - if tcp.HAS_ALPN: - - def test_http2(self): - c = pathoc.Pathoc( - ("127.0.0.1", self.d.port), - fp=None, - ssl=True, - use_http2=True, - ) - assert isinstance(c.protocol, HTTP2StateProtocol) - - c = pathoc.Pathoc( - ("127.0.0.1", self.d.port), - ) - assert c.protocol == http1 - - def test_http2_alpn(self): - c = pathoc.Pathoc( - ("127.0.0.1", self.d.port), - fp=None, - ssl=True, - use_http2=True, - http2_skip_connection_preface=True, - ) - - tmp_convert_to_ssl = c.convert_to_ssl - c.convert_to_ssl = Mock() - c.convert_to_ssl.side_effect = tmp_convert_to_ssl - with c.connect(): - _, kwargs = c.convert_to_ssl.call_args - assert set(kwargs['alpn_protos']) == set([b'http/1.1', b'h2']) - - def test_request(self): - c = pathoc.Pathoc( - ("127.0.0.1", self.d.port), - fp=None, - ssl=True, - use_http2=True, - ) + @requires_alpn + def test_http2(self): + c = pathoc.Pathoc( + ("127.0.0.1", self.d.port), + fp=None, + ssl=True, + use_http2=True, + ) + assert isinstance(c.protocol, HTTP2StateProtocol) + + c = pathoc.Pathoc( + ("127.0.0.1", self.d.port), + ) + assert c.protocol == http1 + + @requires_alpn + def test_http2_alpn(self): + c = pathoc.Pathoc( + ("127.0.0.1", self.d.port), + fp=None, + ssl=True, + use_http2=True, + http2_skip_connection_preface=True, + ) + + tmp_convert_to_ssl = c.convert_to_ssl + c.convert_to_ssl = Mock() + c.convert_to_ssl.side_effect = tmp_convert_to_ssl + with c.connect(): + _, kwargs = c.convert_to_ssl.call_args + assert set(kwargs['alpn_protos']) == set([b'http/1.1', b'h2']) + + @requires_alpn + def test_request(self): + c = pathoc.Pathoc( + ("127.0.0.1", self.d.port), + fp=None, + ssl=True, + use_http2=True, + ) + with c.connect(): + resp = c.request("get:/p/200") + assert resp.status_code == 200 + + def test_failing_request(self, disable_alpn): + c = pathoc.Pathoc( + ("127.0.0.1", self.d.port), + fp=None, + ssl=True, + use_http2=True, + ) + with pytest.raises(NotImplementedError): with c.connect(): - resp = c.request("get:/p/200") - assert resp.status_code == 200 + c.request("get:/p/200") diff --git a/test/pathod/test_pathod.py b/test/pathod/test_pathod.py index 6a4e1c6239..1e34af23d9 100644 --- a/test/pathod/test_pathod.py +++ b/test/pathod/test_pathod.py @@ -1,11 +1,14 @@ import io +import pytest + from pathod import pathod from mitmproxy.net import tcp from mitmproxy import exceptions from mitmproxy.test import tutils from . import tservers +from ..conftest import requires_alpn class TestPathod: @@ -257,8 +260,11 @@ class TestHTTP2(tservers.DaemonTests): ssl = True nohang = True - if tcp.HAS_ALPN: + @requires_alpn + def test_http2(self): + r, _ = self.pathoc(["GET:/"], ssl=True, use_http2=True) + assert r[0].status_code == 800 - def test_http2(self): + def test_no_http2(self, disable_alpn): + with pytest.raises(NotImplementedError): r, _ = self.pathoc(["GET:/"], ssl=True, use_http2=True) - assert r[0].status_code == 800 diff --git a/test/pathod/test_protocols_http2.py b/test/pathod/test_protocols_http2.py index d77702a3a8..8531887b18 100644 --- a/test/pathod/test_protocols_http2.py +++ b/test/pathod/test_protocols_http2.py @@ -11,6 +11,8 @@ from pathod.protocols.http2 import HTTP2StateProtocol, TCPHandler +from ..conftest import requires_alpn + class TestTCPHandlerWrapper: def test_wrapped(self): @@ -66,37 +68,35 @@ def test_perform_connection_preface_server(self, mock_client_method, mock_server assert mock_server_method.called +@requires_alpn class TestCheckALPNMatch(net_tservers.ServerTestBase): handler = EchoHandler ssl = dict( alpn_select=b'h2', ) - if tcp.HAS_ALPN: - - def test_check_alpn(self): - c = tcp.TCPClient(("127.0.0.1", self.port)) - with c.connect(): - c.convert_to_ssl(alpn_protos=[b'h2']) - protocol = HTTP2StateProtocol(c) - assert protocol.check_alpn() + def test_check_alpn(self): + c = tcp.TCPClient(("127.0.0.1", self.port)) + with c.connect(): + c.convert_to_ssl(alpn_protos=[b'h2']) + protocol = HTTP2StateProtocol(c) + assert protocol.check_alpn() +@requires_alpn class TestCheckALPNMismatch(net_tservers.ServerTestBase): handler = EchoHandler ssl = dict( alpn_select=None, ) - if tcp.HAS_ALPN: - - def test_check_alpn(self): - c = tcp.TCPClient(("127.0.0.1", self.port)) - with c.connect(): - c.convert_to_ssl(alpn_protos=[b'h2']) - protocol = HTTP2StateProtocol(c) - with raises(NotImplementedError): - protocol.check_alpn() + def test_check_alpn(self): + c = tcp.TCPClient(("127.0.0.1", self.port)) + with c.connect(): + c.convert_to_ssl(alpn_protos=[b'h2']) + protocol = HTTP2StateProtocol(c) + with raises(NotImplementedError): + protocol.check_alpn() class TestPerformServerConnectionPreface(net_tservers.ServerTestBase): diff --git a/tox.ini b/tox.ini index 3f8040d736..dc76cb704e 100644 --- a/tox.ini +++ b/tox.ini @@ -8,7 +8,7 @@ basepython = python3.5 deps = {env:CI_DEPS:} -rrequirements.txt -passenv = CODECOV_TOKEN CI CI_* TRAVIS TRAVIS_* APPVEYOR APPVEYOR_* SNAPSHOT_* +passenv = CODECOV_TOKEN CI CI_* TRAVIS TRAVIS_* APPVEYOR APPVEYOR_* SNAPSHOT_* OPENSSL_* setenv = HOME = {envtmpdir} commands = mitmdump --sysinfo
ansible-collections__community.aws-1206
ec2_customer_gateway: bgp_asn is not required ### Summary The ec2_customer_gateway module has incorrect documentation for the bgp_asn parameter. It says the ASN must be passed when state=present, but the code defaults to 25000 if the parameter is absent. See the ensure_cgw_present() method: ``` def ensure_cgw_present(self, bgp_asn, ip_address): if not bgp_asn: bgp_asn = 65000 response = self.ec2.create_customer_gateway( DryRun=False, Type='ipsec.1', PublicIp=ip_address, BgpAsn=bgp_asn, ) return response ### Issue Type Documentation Report ### Component Name ec2_customer_gateway ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.12.4] config file = None configured module search path = ['/home/neil/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/neil/.local/share/virtualenvs/community.aws-uRL047Ho/lib/python3.10/site-packages/ansible ansible collection location = /home/neil/.ansible/collections:/usr/share/ansible/collections executable location = /home/neil/.local/share/virtualenvs/community.aws-uRL047Ho/bin/ansible python version = 3.10.1 (main, Jan 10 2022, 00:00:00) [GCC 11.2.1 20211203 (Red Hat 11.2.1-7)] jinja version = 3.1.1 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment main branch, as of 2022-04-18. ### Additional Information Suggested rewording: ``` options: bgp_asn: description: - Border Gateway Protocol (BGP) Autonomous System Number (ASN), defaults to 25000. type: int ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
[ { "content": "#!/usr/bin/python\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: ec2_customer_gateway\nversion_added: 1.0.0\nshort_description: Manage an AWS customer gateway\ndescription:\n - Manage an AWS customer gateway.\nauthor: Michael Baydoun (@MichaelBaydoun)\nnotes:\n - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the\n first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent\n requests do not create new customer gateway resources.\n - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use\n customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.\noptions:\n bgp_asn:\n description:\n - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present).\n type: int\n ip_address:\n description:\n - Internet-routable IP address for customers gateway, must be a static address.\n required: true\n type: str\n name:\n description:\n - Name of the customer gateway.\n required: true\n type: str\n routing:\n description:\n - The type of routing.\n choices: ['static', 'dynamic']\n default: dynamic\n type: str\n state:\n description:\n - Create or terminate the Customer Gateway.\n default: present\n choices: [ 'present', 'absent' ]\n type: str\nextends_documentation_fragment:\n- amazon.aws.aws\n- amazon.aws.ec2\n\n'''\n\nEXAMPLES = '''\n- name: Create Customer Gateway\n community.aws.ec2_customer_gateway:\n bgp_asn: 12345\n ip_address: 1.2.3.4\n name: IndianapolisOffice\n region: us-east-1\n register: cgw\n\n- name: Delete Customer Gateway\n community.aws.ec2_customer_gateway:\n ip_address: 1.2.3.4\n name: IndianapolisOffice\n state: absent\n region: us-east-1\n register: cgw\n'''\n\nRETURN = '''\ngateway.customer_gateways:\n description: details about the gateway that was created.\n returned: success\n type: complex\n contains:\n bgp_asn:\n description: The Border Gateway Autonomous System Number.\n returned: when exists and gateway is available.\n sample: 65123\n type: str\n customer_gateway_id:\n description: gateway id assigned by amazon.\n returned: when exists and gateway is available.\n sample: cgw-cb6386a2\n type: str\n ip_address:\n description: ip address of your gateway device.\n returned: when exists and gateway is available.\n sample: 1.2.3.4\n type: str\n state:\n description: state of gateway.\n returned: when gateway exists and is available.\n sample: available\n type: str\n tags:\n description: Any tags on the gateway.\n returned: when gateway exists and is available, and when tags exist.\n type: list\n type:\n description: encryption type.\n returned: when gateway exists and is available.\n sample: ipsec.1\n type: str\n'''\n\ntry:\n import botocore\nexcept ImportError:\n pass # Handled by AnsibleAWSModule\n\nfrom ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry\n\n\nclass Ec2CustomerGatewayManager:\n\n def __init__(self, module):\n self.module = module\n\n try:\n self.ec2 = module.client('ec2')\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg='Failed to connect to AWS')\n\n @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState'])\n def ensure_cgw_absent(self, gw_id):\n response = self.ec2.delete_customer_gateway(\n DryRun=False,\n CustomerGatewayId=gw_id\n )\n return response\n\n def ensure_cgw_present(self, bgp_asn, ip_address):\n if not bgp_asn:\n bgp_asn = 65000\n response = self.ec2.create_customer_gateway(\n DryRun=False,\n Type='ipsec.1',\n PublicIp=ip_address,\n BgpAsn=bgp_asn,\n )\n return response\n\n def tag_cgw_name(self, gw_id, name):\n response = self.ec2.create_tags(\n DryRun=False,\n Resources=[\n gw_id,\n ],\n Tags=[\n {\n 'Key': 'Name',\n 'Value': name\n },\n ]\n )\n return response\n\n def describe_gateways(self, ip_address):\n response = self.ec2.describe_customer_gateways(\n DryRun=False,\n Filters=[\n {\n 'Name': 'state',\n 'Values': [\n 'available',\n ]\n },\n {\n 'Name': 'ip-address',\n 'Values': [\n ip_address,\n ]\n }\n ]\n )\n return response\n\n\ndef main():\n argument_spec = dict(\n bgp_asn=dict(required=False, type='int'),\n ip_address=dict(required=True),\n name=dict(required=True),\n routing=dict(default='dynamic', choices=['dynamic', 'static']),\n state=dict(default='present', choices=['present', 'absent']),\n )\n\n module = AnsibleAWSModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n required_if=[\n ('routing', 'dynamic', ['bgp_asn'])\n ]\n )\n\n gw_mgr = Ec2CustomerGatewayManager(module)\n\n name = module.params.get('name')\n\n existing = gw_mgr.describe_gateways(module.params['ip_address'])\n\n results = dict(changed=False)\n if module.params['state'] == 'present':\n if existing['CustomerGateways']:\n existing['CustomerGateway'] = existing['CustomerGateways'][0]\n results['gateway'] = existing\n if existing['CustomerGateway']['Tags']:\n tag_array = existing['CustomerGateway']['Tags']\n for key, value in enumerate(tag_array):\n if value['Key'] == 'Name':\n current_name = value['Value']\n if current_name != name:\n results['name'] = gw_mgr.tag_cgw_name(\n results['gateway']['CustomerGateway']['CustomerGatewayId'],\n module.params['name'],\n )\n results['changed'] = True\n else:\n if not module.check_mode:\n results['gateway'] = gw_mgr.ensure_cgw_present(\n module.params['bgp_asn'],\n module.params['ip_address'],\n )\n results['name'] = gw_mgr.tag_cgw_name(\n results['gateway']['CustomerGateway']['CustomerGatewayId'],\n module.params['name'],\n )\n results['changed'] = True\n\n elif module.params['state'] == 'absent':\n if existing['CustomerGateways']:\n existing['CustomerGateway'] = existing['CustomerGateways'][0]\n results['gateway'] = existing\n if not module.check_mode:\n results['gateway'] = gw_mgr.ensure_cgw_absent(\n existing['CustomerGateway']['CustomerGatewayId']\n )\n results['changed'] = True\n\n pretty_results = camel_dict_to_snake_dict(results)\n module.exit_json(**pretty_results)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/ec2_customer_gateway.py" } ]
[ { "content": "#!/usr/bin/python\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: ec2_customer_gateway\nversion_added: 1.0.0\nshort_description: Manage an AWS customer gateway\ndescription:\n - Manage an AWS customer gateway.\nauthor: Michael Baydoun (@MichaelBaydoun)\nnotes:\n - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the\n first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent\n requests do not create new customer gateway resources.\n - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use\n customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.\noptions:\n bgp_asn:\n description:\n - Border Gateway Protocol (BGP) Autonomous System Number (ASN).\n - Defaults to C(65000) if not specified when I(state=present).\n type: int\n ip_address:\n description:\n - Internet-routable IP address for customers gateway, must be a static address.\n required: true\n type: str\n name:\n description:\n - Name of the customer gateway.\n required: true\n type: str\n routing:\n description:\n - The type of routing.\n choices: ['static', 'dynamic']\n default: dynamic\n type: str\n state:\n description:\n - Create or terminate the Customer Gateway.\n default: present\n choices: [ 'present', 'absent' ]\n type: str\nextends_documentation_fragment:\n- amazon.aws.aws\n- amazon.aws.ec2\n\n'''\n\nEXAMPLES = '''\n- name: Create Customer Gateway\n community.aws.ec2_customer_gateway:\n bgp_asn: 12345\n ip_address: 1.2.3.4\n name: IndianapolisOffice\n region: us-east-1\n register: cgw\n\n- name: Delete Customer Gateway\n community.aws.ec2_customer_gateway:\n ip_address: 1.2.3.4\n name: IndianapolisOffice\n state: absent\n region: us-east-1\n register: cgw\n'''\n\nRETURN = '''\ngateway.customer_gateways:\n description: details about the gateway that was created.\n returned: success\n type: complex\n contains:\n bgp_asn:\n description: The Border Gateway Autonomous System Number.\n returned: when exists and gateway is available.\n sample: 65123\n type: str\n customer_gateway_id:\n description: gateway id assigned by amazon.\n returned: when exists and gateway is available.\n sample: cgw-cb6386a2\n type: str\n ip_address:\n description: ip address of your gateway device.\n returned: when exists and gateway is available.\n sample: 1.2.3.4\n type: str\n state:\n description: state of gateway.\n returned: when gateway exists and is available.\n sample: available\n type: str\n tags:\n description: Any tags on the gateway.\n returned: when gateway exists and is available, and when tags exist.\n type: list\n type:\n description: encryption type.\n returned: when gateway exists and is available.\n sample: ipsec.1\n type: str\n'''\n\ntry:\n import botocore\nexcept ImportError:\n pass # Handled by AnsibleAWSModule\n\nfrom ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry\n\n\nclass Ec2CustomerGatewayManager:\n\n def __init__(self, module):\n self.module = module\n\n try:\n self.ec2 = module.client('ec2')\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg='Failed to connect to AWS')\n\n @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState'])\n def ensure_cgw_absent(self, gw_id):\n response = self.ec2.delete_customer_gateway(\n DryRun=False,\n CustomerGatewayId=gw_id\n )\n return response\n\n def ensure_cgw_present(self, bgp_asn, ip_address):\n if not bgp_asn:\n bgp_asn = 65000\n response = self.ec2.create_customer_gateway(\n DryRun=False,\n Type='ipsec.1',\n PublicIp=ip_address,\n BgpAsn=bgp_asn,\n )\n return response\n\n def tag_cgw_name(self, gw_id, name):\n response = self.ec2.create_tags(\n DryRun=False,\n Resources=[\n gw_id,\n ],\n Tags=[\n {\n 'Key': 'Name',\n 'Value': name\n },\n ]\n )\n return response\n\n def describe_gateways(self, ip_address):\n response = self.ec2.describe_customer_gateways(\n DryRun=False,\n Filters=[\n {\n 'Name': 'state',\n 'Values': [\n 'available',\n ]\n },\n {\n 'Name': 'ip-address',\n 'Values': [\n ip_address,\n ]\n }\n ]\n )\n return response\n\n\ndef main():\n argument_spec = dict(\n bgp_asn=dict(required=False, type='int'),\n ip_address=dict(required=True),\n name=dict(required=True),\n routing=dict(default='dynamic', choices=['dynamic', 'static']),\n state=dict(default='present', choices=['present', 'absent']),\n )\n\n module = AnsibleAWSModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n required_if=[\n ('routing', 'dynamic', ['bgp_asn'])\n ]\n )\n\n gw_mgr = Ec2CustomerGatewayManager(module)\n\n name = module.params.get('name')\n\n existing = gw_mgr.describe_gateways(module.params['ip_address'])\n\n results = dict(changed=False)\n if module.params['state'] == 'present':\n if existing['CustomerGateways']:\n existing['CustomerGateway'] = existing['CustomerGateways'][0]\n results['gateway'] = existing\n if existing['CustomerGateway']['Tags']:\n tag_array = existing['CustomerGateway']['Tags']\n for key, value in enumerate(tag_array):\n if value['Key'] == 'Name':\n current_name = value['Value']\n if current_name != name:\n results['name'] = gw_mgr.tag_cgw_name(\n results['gateway']['CustomerGateway']['CustomerGatewayId'],\n module.params['name'],\n )\n results['changed'] = True\n else:\n if not module.check_mode:\n results['gateway'] = gw_mgr.ensure_cgw_present(\n module.params['bgp_asn'],\n module.params['ip_address'],\n )\n results['name'] = gw_mgr.tag_cgw_name(\n results['gateway']['CustomerGateway']['CustomerGatewayId'],\n module.params['name'],\n )\n results['changed'] = True\n\n elif module.params['state'] == 'absent':\n if existing['CustomerGateways']:\n existing['CustomerGateway'] = existing['CustomerGateways'][0]\n results['gateway'] = existing\n if not module.check_mode:\n results['gateway'] = gw_mgr.ensure_cgw_absent(\n existing['CustomerGateway']['CustomerGatewayId']\n )\n results['changed'] = True\n\n pretty_results = camel_dict_to_snake_dict(results)\n module.exit_json(**pretty_results)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/ec2_customer_gateway.py" } ]
diff --git a/plugins/modules/ec2_customer_gateway.py b/plugins/modules/ec2_customer_gateway.py index 9c00783a58a..f07e92f4f7c 100644 --- a/plugins/modules/ec2_customer_gateway.py +++ b/plugins/modules/ec2_customer_gateway.py @@ -23,7 +23,8 @@ options: bgp_asn: description: - - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present). + - Border Gateway Protocol (BGP) Autonomous System Number (ASN). + - Defaults to C(65000) if not specified when I(state=present). type: int ip_address: description:
kedro-org__kedro-1977
pickle.PickleDataSet docstring examples are incorrect ## Description Kind of a small issue but the "advanced" example in the [pickle.PickleDataSet API docs](https://kedro.readthedocs.io/en/stable/kedro.extras.datasets.pickle.PickleDataSet.html) is wrong. `compression` is not a valid [`joblib.dump`](https://joblib.readthedocs.io/en/latest/generated/joblib.dump.html) parameter (it should simply be `compress`) and [`joblib.load`](https://joblib.readthedocs.io/en/latest/generated/joblib.load.html) does not require a `compression` kwarg at all since it can automagically discover the correct compression algorithm used. ## Context Even if it's a trivial issue I stumbled upon it and I hope to fix it so that future users will not have to go the joblib docs to find the problem. ## Possible Alternatives I'a m working on a trivial fix, I'm going to open a PR as soon as possible.
[ { "content": "\"\"\"``PickleDataSet`` loads/saves data from/to a Pickle file using an underlying\nfilesystem (e.g.: local, S3, GCS). The underlying functionality is supported by\nthe specified backend library passed in (defaults to the ``pickle`` library), so it\nsupports all allowed options for loading and saving pickle files.\n\"\"\"\nimport importlib\nfrom copy import deepcopy\nfrom pathlib import PurePosixPath\nfrom typing import Any, Dict\n\nimport fsspec\n\nfrom kedro.io.core import (\n AbstractVersionedDataSet,\n DataSetError,\n Version,\n get_filepath_str,\n get_protocol_and_path,\n)\n\n\nclass PickleDataSet(AbstractVersionedDataSet[Any, Any]):\n \"\"\"``PickleDataSet`` loads/saves data from/to a Pickle file using an underlying\n filesystem (e.g.: local, S3, GCS). The underlying functionality is supported by\n the specified backend library passed in (defaults to the ``pickle`` library), so it\n supports all allowed options for loading and saving pickle files.\n\n Example adding a catalog entry with\n `YAML API <https://kedro.readthedocs.io/en/stable/data/\\\n data_catalog.html#use-the-data-catalog-with-the-yaml-api>`_:\n\n .. code-block:: yaml\n\n >>> test_model: # simple example without compression\n >>> type: pickle.PickleDataSet\n >>> filepath: data/07_model_output/test_model.pkl\n >>> backend: pickle\n >>>\n >>> final_model: # example with load and save args\n >>> type: pickle.PickleDataSet\n >>> filepath: s3://your_bucket/final_model.pkl.lz4\n >>> backend: joblib\n >>> credentials: s3_credentials\n >>> save_args:\n >>> compression: lz4\n >>> load_args:\n >>> compression: lz4\n\n Example using Python API:\n ::\n\n >>> from kedro.extras.datasets.pickle import PickleDataSet\n >>> import pandas as pd\n >>>\n >>> data = pd.DataFrame({'col1': [1, 2], 'col2': [4, 5],\n >>> 'col3': [5, 6]})\n >>>\n >>> # data_set = PickleDataSet(filepath=\"gcs://bucket/test.pkl\")\n >>> data_set = PickleDataSet(filepath=\"test.pkl\", backend=\"pickle\")\n >>> data_set.save(data)\n >>> reloaded = data_set.load()\n >>> assert data.equals(reloaded)\n >>>\n >>> # Add \"compress_pickle[lz4]\" to requirements.txt\n >>> data_set = PickleDataSet(filepath=\"test.pickle.lz4\",\n >>> backend=\"compress_pickle\",\n >>> load_args={\"compression\":\"lz4\"},\n >>> save_args={\"compression\":\"lz4\"})\n >>> data_set.save(data)\n >>> reloaded = data_set.load()\n >>> assert data.equals(reloaded)\n \"\"\"\n\n DEFAULT_LOAD_ARGS = {} # type: Dict[str, Any]\n DEFAULT_SAVE_ARGS = {} # type: Dict[str, Any]\n\n # pylint: disable=too-many-arguments,too-many-locals\n def __init__(\n self,\n filepath: str,\n backend: str = \"pickle\",\n load_args: Dict[str, Any] = None,\n save_args: Dict[str, Any] = None,\n version: Version = None,\n credentials: Dict[str, Any] = None,\n fs_args: Dict[str, Any] = None,\n ) -> None:\n \"\"\"Creates a new instance of ``PickleDataSet`` pointing to a concrete Pickle\n file on a specific filesystem. ``PickleDataSet`` supports custom backends to\n serialise/deserialise objects.\n\n Example backends that are compatible (non-exhaustive):\n * `pickle`\n * `joblib`\n * `dill`\n * `compress_pickle`\n\n Example backends that are incompatible:\n * `torch`\n\n Args:\n filepath: Filepath in POSIX format to a Pickle file prefixed with a protocol like\n `s3://`. If prefix is not provided, `file` protocol (local filesystem) will be used.\n The prefix should be any protocol supported by ``fsspec``.\n Note: `http(s)` doesn't support versioning.\n backend: Backend to use, must be an import path to a module which satisfies the\n ``pickle`` interface. That is, contains a `load` and `dump` function.\n Defaults to 'pickle'.\n load_args: Pickle options for loading pickle files.\n You can pass in arguments that the backend load function specified accepts, e.g:\n pickle.load: https://docs.python.org/3/library/pickle.html#pickle.load\n joblib.load: https://joblib.readthedocs.io/en/latest/generated/joblib.load.html\n dill.load: https://dill.readthedocs.io/en/latest/dill.html#dill._dill.load\n compress_pickle.load:\n https://lucianopaz.github.io/compress_pickle/html/api/compress_pickle.html#compress_pickle.compress_pickle.load\n All defaults are preserved.\n save_args: Pickle options for saving pickle files.\n You can pass in arguments that the backend dump function specified accepts, e.g:\n pickle.dump: https://docs.python.org/3/library/pickle.html#pickle.dump\n joblib.dump: https://joblib.readthedocs.io/en/latest/generated/joblib.dump.html\n dill.dump: https://dill.readthedocs.io/en/latest/dill.html#dill._dill.dump\n compress_pickle.dump:\n https://lucianopaz.github.io/compress_pickle/html/api/compress_pickle.html#compress_pickle.compress_pickle.dump\n All defaults are preserved.\n version: If specified, should be an instance of\n ``kedro.io.core.Version``. If its ``load`` attribute is\n None, the latest version will be loaded. If its ``save``\n attribute is None, save version will be autogenerated.\n credentials: Credentials required to get access to the underlying filesystem.\n E.g. for ``GCSFileSystem`` it should look like `{\"token\": None}`.\n fs_args: Extra arguments to pass into underlying filesystem class constructor\n (e.g. `{\"project\": \"my-project\"}` for ``GCSFileSystem``), as well as\n to pass to the filesystem's `open` method through nested keys\n `open_args_load` and `open_args_save`.\n Here you can find all available arguments for `open`:\n https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.open\n All defaults are preserved, except `mode`, which is set to `wb` when saving.\n\n Raises:\n ValueError: If ``backend`` does not satisfy the `pickle` interface.\n ImportError: If the ``backend`` module could not be imported.\n \"\"\"\n # We do not store `imported_backend` as an attribute to be used in `load`/`save`\n # as this would mean the dataset cannot be deepcopied (module objects cannot be\n # pickled). The import here is purely to raise any errors as early as possible.\n # Repeated imports in the `load` and `save` methods should not be a significant\n # performance hit as Python caches imports.\n try:\n imported_backend = importlib.import_module(backend)\n except ImportError as exc:\n raise ImportError(\n f\"Selected backend '{backend}' could not be imported. \"\n \"Make sure it is installed and importable.\"\n ) from exc\n\n if not (\n hasattr(imported_backend, \"load\") and hasattr(imported_backend, \"dump\")\n ):\n raise ValueError(\n f\"Selected backend '{backend}' should satisfy the pickle interface. \"\n \"Missing one of 'load' and 'dump' on the backend.\"\n )\n\n _fs_args = deepcopy(fs_args) or {}\n _fs_open_args_load = _fs_args.pop(\"open_args_load\", {})\n _fs_open_args_save = _fs_args.pop(\"open_args_save\", {})\n _credentials = deepcopy(credentials) or {}\n\n protocol, path = get_protocol_and_path(filepath, version)\n if protocol == \"file\":\n _fs_args.setdefault(\"auto_mkdir\", True)\n\n self._protocol = protocol\n self._fs = fsspec.filesystem(self._protocol, **_credentials, **_fs_args)\n\n super().__init__(\n filepath=PurePosixPath(path),\n version=version,\n exists_function=self._fs.exists,\n glob_function=self._fs.glob,\n )\n\n self._backend = backend\n\n # Handle default load and save arguments\n self._load_args = deepcopy(self.DEFAULT_LOAD_ARGS)\n if load_args is not None:\n self._load_args.update(load_args)\n self._save_args = deepcopy(self.DEFAULT_SAVE_ARGS)\n if save_args is not None:\n self._save_args.update(save_args)\n\n _fs_open_args_save.setdefault(\"mode\", \"wb\")\n self._fs_open_args_load = _fs_open_args_load\n self._fs_open_args_save = _fs_open_args_save\n\n def _describe(self) -> Dict[str, Any]:\n return dict(\n filepath=self._filepath,\n backend=self._backend,\n protocol=self._protocol,\n load_args=self._load_args,\n save_args=self._save_args,\n version=self._version,\n )\n\n def _load(self) -> Any:\n load_path = get_filepath_str(self._get_load_path(), self._protocol)\n\n with self._fs.open(load_path, **self._fs_open_args_load) as fs_file:\n imported_backend = importlib.import_module(self._backend)\n return imported_backend.load(fs_file, **self._load_args) # type: ignore\n\n def _save(self, data: Any) -> None:\n save_path = get_filepath_str(self._get_save_path(), self._protocol)\n\n with self._fs.open(save_path, **self._fs_open_args_save) as fs_file:\n try:\n imported_backend = importlib.import_module(self._backend)\n imported_backend.dump(data, fs_file, **self._save_args) # type: ignore\n except Exception as exc:\n raise DataSetError(\n f\"{data.__class__} was not serialised due to: {exc}\"\n ) from exc\n\n self._invalidate_cache()\n\n def _exists(self) -> bool:\n try:\n load_path = get_filepath_str(self._get_load_path(), self._protocol)\n except DataSetError:\n return False\n\n return self._fs.exists(load_path)\n\n def _release(self) -> None:\n super()._release()\n self._invalidate_cache()\n\n def _invalidate_cache(self) -> None:\n \"\"\"Invalidate underlying filesystem caches.\"\"\"\n filepath = get_filepath_str(self._filepath, self._protocol)\n self._fs.invalidate_cache(filepath)\n", "path": "kedro/extras/datasets/pickle/pickle_dataset.py" } ]
[ { "content": "\"\"\"``PickleDataSet`` loads/saves data from/to a Pickle file using an underlying\nfilesystem (e.g.: local, S3, GCS). The underlying functionality is supported by\nthe specified backend library passed in (defaults to the ``pickle`` library), so it\nsupports all allowed options for loading and saving pickle files.\n\"\"\"\nimport importlib\nfrom copy import deepcopy\nfrom pathlib import PurePosixPath\nfrom typing import Any, Dict\n\nimport fsspec\n\nfrom kedro.io.core import (\n AbstractVersionedDataSet,\n DataSetError,\n Version,\n get_filepath_str,\n get_protocol_and_path,\n)\n\n\nclass PickleDataSet(AbstractVersionedDataSet[Any, Any]):\n \"\"\"``PickleDataSet`` loads/saves data from/to a Pickle file using an underlying\n filesystem (e.g.: local, S3, GCS). The underlying functionality is supported by\n the specified backend library passed in (defaults to the ``pickle`` library), so it\n supports all allowed options for loading and saving pickle files.\n\n Example adding a catalog entry with\n `YAML API <https://kedro.readthedocs.io/en/stable/data/\\\n data_catalog.html#use-the-data-catalog-with-the-yaml-api>`_:\n\n .. code-block:: yaml\n\n >>> test_model: # simple example without compression\n >>> type: pickle.PickleDataSet\n >>> filepath: data/07_model_output/test_model.pkl\n >>> backend: pickle\n >>>\n >>> final_model: # example with load and save args\n >>> type: pickle.PickleDataSet\n >>> filepath: s3://your_bucket/final_model.pkl.lz4\n >>> backend: joblib\n >>> credentials: s3_credentials\n >>> save_args:\n >>> compress: lz4\n\n Example using Python API:\n ::\n\n >>> from kedro.extras.datasets.pickle import PickleDataSet\n >>> import pandas as pd\n >>>\n >>> data = pd.DataFrame({'col1': [1, 2], 'col2': [4, 5],\n >>> 'col3': [5, 6]})\n >>>\n >>> # data_set = PickleDataSet(filepath=\"gcs://bucket/test.pkl\")\n >>> data_set = PickleDataSet(filepath=\"test.pkl\", backend=\"pickle\")\n >>> data_set.save(data)\n >>> reloaded = data_set.load()\n >>> assert data.equals(reloaded)\n >>>\n >>> # Add \"compress_pickle[lz4]\" to requirements.txt\n >>> data_set = PickleDataSet(filepath=\"test.pickle.lz4\",\n >>> backend=\"compress_pickle\",\n >>> load_args={\"compression\":\"lz4\"},\n >>> save_args={\"compression\":\"lz4\"})\n >>> data_set.save(data)\n >>> reloaded = data_set.load()\n >>> assert data.equals(reloaded)\n \"\"\"\n\n DEFAULT_LOAD_ARGS = {} # type: Dict[str, Any]\n DEFAULT_SAVE_ARGS = {} # type: Dict[str, Any]\n\n # pylint: disable=too-many-arguments,too-many-locals\n def __init__(\n self,\n filepath: str,\n backend: str = \"pickle\",\n load_args: Dict[str, Any] = None,\n save_args: Dict[str, Any] = None,\n version: Version = None,\n credentials: Dict[str, Any] = None,\n fs_args: Dict[str, Any] = None,\n ) -> None:\n \"\"\"Creates a new instance of ``PickleDataSet`` pointing to a concrete Pickle\n file on a specific filesystem. ``PickleDataSet`` supports custom backends to\n serialise/deserialise objects.\n\n Example backends that are compatible (non-exhaustive):\n * `pickle`\n * `joblib`\n * `dill`\n * `compress_pickle`\n\n Example backends that are incompatible:\n * `torch`\n\n Args:\n filepath: Filepath in POSIX format to a Pickle file prefixed with a protocol like\n `s3://`. If prefix is not provided, `file` protocol (local filesystem) will be used.\n The prefix should be any protocol supported by ``fsspec``.\n Note: `http(s)` doesn't support versioning.\n backend: Backend to use, must be an import path to a module which satisfies the\n ``pickle`` interface. That is, contains a `load` and `dump` function.\n Defaults to 'pickle'.\n load_args: Pickle options for loading pickle files.\n You can pass in arguments that the backend load function specified accepts, e.g:\n pickle.load: https://docs.python.org/3/library/pickle.html#pickle.load\n joblib.load: https://joblib.readthedocs.io/en/latest/generated/joblib.load.html\n dill.load: https://dill.readthedocs.io/en/latest/dill.html#dill._dill.load\n compress_pickle.load:\n https://lucianopaz.github.io/compress_pickle/html/api/compress_pickle.html#compress_pickle.compress_pickle.load\n All defaults are preserved.\n save_args: Pickle options for saving pickle files.\n You can pass in arguments that the backend dump function specified accepts, e.g:\n pickle.dump: https://docs.python.org/3/library/pickle.html#pickle.dump\n joblib.dump: https://joblib.readthedocs.io/en/latest/generated/joblib.dump.html\n dill.dump: https://dill.readthedocs.io/en/latest/dill.html#dill._dill.dump\n compress_pickle.dump:\n https://lucianopaz.github.io/compress_pickle/html/api/compress_pickle.html#compress_pickle.compress_pickle.dump\n All defaults are preserved.\n version: If specified, should be an instance of\n ``kedro.io.core.Version``. If its ``load`` attribute is\n None, the latest version will be loaded. If its ``save``\n attribute is None, save version will be autogenerated.\n credentials: Credentials required to get access to the underlying filesystem.\n E.g. for ``GCSFileSystem`` it should look like `{\"token\": None}`.\n fs_args: Extra arguments to pass into underlying filesystem class constructor\n (e.g. `{\"project\": \"my-project\"}` for ``GCSFileSystem``), as well as\n to pass to the filesystem's `open` method through nested keys\n `open_args_load` and `open_args_save`.\n Here you can find all available arguments for `open`:\n https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.open\n All defaults are preserved, except `mode`, which is set to `wb` when saving.\n\n Raises:\n ValueError: If ``backend`` does not satisfy the `pickle` interface.\n ImportError: If the ``backend`` module could not be imported.\n \"\"\"\n # We do not store `imported_backend` as an attribute to be used in `load`/`save`\n # as this would mean the dataset cannot be deepcopied (module objects cannot be\n # pickled). The import here is purely to raise any errors as early as possible.\n # Repeated imports in the `load` and `save` methods should not be a significant\n # performance hit as Python caches imports.\n try:\n imported_backend = importlib.import_module(backend)\n except ImportError as exc:\n raise ImportError(\n f\"Selected backend '{backend}' could not be imported. \"\n \"Make sure it is installed and importable.\"\n ) from exc\n\n if not (\n hasattr(imported_backend, \"load\") and hasattr(imported_backend, \"dump\")\n ):\n raise ValueError(\n f\"Selected backend '{backend}' should satisfy the pickle interface. \"\n \"Missing one of 'load' and 'dump' on the backend.\"\n )\n\n _fs_args = deepcopy(fs_args) or {}\n _fs_open_args_load = _fs_args.pop(\"open_args_load\", {})\n _fs_open_args_save = _fs_args.pop(\"open_args_save\", {})\n _credentials = deepcopy(credentials) or {}\n\n protocol, path = get_protocol_and_path(filepath, version)\n if protocol == \"file\":\n _fs_args.setdefault(\"auto_mkdir\", True)\n\n self._protocol = protocol\n self._fs = fsspec.filesystem(self._protocol, **_credentials, **_fs_args)\n\n super().__init__(\n filepath=PurePosixPath(path),\n version=version,\n exists_function=self._fs.exists,\n glob_function=self._fs.glob,\n )\n\n self._backend = backend\n\n # Handle default load and save arguments\n self._load_args = deepcopy(self.DEFAULT_LOAD_ARGS)\n if load_args is not None:\n self._load_args.update(load_args)\n self._save_args = deepcopy(self.DEFAULT_SAVE_ARGS)\n if save_args is not None:\n self._save_args.update(save_args)\n\n _fs_open_args_save.setdefault(\"mode\", \"wb\")\n self._fs_open_args_load = _fs_open_args_load\n self._fs_open_args_save = _fs_open_args_save\n\n def _describe(self) -> Dict[str, Any]:\n return dict(\n filepath=self._filepath,\n backend=self._backend,\n protocol=self._protocol,\n load_args=self._load_args,\n save_args=self._save_args,\n version=self._version,\n )\n\n def _load(self) -> Any:\n load_path = get_filepath_str(self._get_load_path(), self._protocol)\n\n with self._fs.open(load_path, **self._fs_open_args_load) as fs_file:\n imported_backend = importlib.import_module(self._backend)\n return imported_backend.load(fs_file, **self._load_args) # type: ignore\n\n def _save(self, data: Any) -> None:\n save_path = get_filepath_str(self._get_save_path(), self._protocol)\n\n with self._fs.open(save_path, **self._fs_open_args_save) as fs_file:\n try:\n imported_backend = importlib.import_module(self._backend)\n imported_backend.dump(data, fs_file, **self._save_args) # type: ignore\n except Exception as exc:\n raise DataSetError(\n f\"{data.__class__} was not serialised due to: {exc}\"\n ) from exc\n\n self._invalidate_cache()\n\n def _exists(self) -> bool:\n try:\n load_path = get_filepath_str(self._get_load_path(), self._protocol)\n except DataSetError:\n return False\n\n return self._fs.exists(load_path)\n\n def _release(self) -> None:\n super()._release()\n self._invalidate_cache()\n\n def _invalidate_cache(self) -> None:\n \"\"\"Invalidate underlying filesystem caches.\"\"\"\n filepath = get_filepath_str(self._filepath, self._protocol)\n self._fs.invalidate_cache(filepath)\n", "path": "kedro/extras/datasets/pickle/pickle_dataset.py" } ]
diff --git a/docs/source/deployment/aws_sagemaker.md b/docs/source/deployment/aws_sagemaker.md index bf4904d5a0..c062252580 100644 --- a/docs/source/deployment/aws_sagemaker.md +++ b/docs/source/deployment/aws_sagemaker.md @@ -111,9 +111,9 @@ s3: ### Update the project settings -Now you need to tell Kedro to use the [`TemplatedConfigLoader`](/kedro.config.TemplatedConfigLoader) instead of the default `ConfigLoader` class by setting the `CONFIG_LOADER_CLASS` accordingly. +Now you need to tell Kedro to use the [`TemplatedConfigLoader`](/kedro.config.TemplatedConfigLoader) instead of the default `ConfigLoader` class by setting the `CONFIG_LOADER_CLASS` accordingly. -You also need to point Kedro to your `globals.yml` file. +You also need to point Kedro to your `globals.yml` file. To make both changes, open the `src/kedro_tutorial/settings.py` file and set the `CONFIG_LOADER_CLASS` and `CONFIG_LOADER_ARGS` variables: diff --git a/kedro/extras/datasets/pickle/pickle_dataset.py b/kedro/extras/datasets/pickle/pickle_dataset.py index f565edd37d..b52ee9ced3 100644 --- a/kedro/extras/datasets/pickle/pickle_dataset.py +++ b/kedro/extras/datasets/pickle/pickle_dataset.py @@ -42,9 +42,7 @@ class PickleDataSet(AbstractVersionedDataSet[Any, Any]): >>> backend: joblib >>> credentials: s3_credentials >>> save_args: - >>> compression: lz4 - >>> load_args: - >>> compression: lz4 + >>> compress: lz4 Example using Python API: ::
googleapis__python-bigquery-426
_from_api_repr_scalar fails, if parameter value is None Tested on latest (2.6.0) version, using python 3.8 on linux. If `ArrayQueryParameter` contaning at least one `None` value is added to `query_parameters`, when using `job.query_parameters` (result after submitting the job) `'NoneType' object has no attribute 'mode'` will be raised. This is because: - `_from_api_repr_scalar` is called on `ArrayQueryParameter` - `_QUERY_PARAMS_FROM_JSON[array_type](value, None) for value in values`, the `value` is None - this corresponds to `_int_from_json(None, None)` - `_not_null(None, None)` is called - `return value is not None or field.mode != "NULLABLE"` raises exception Stack trace: ``` AttributeError: 'NoneType' object has no attribute 'mode' (snip) File "(snip)/site-packages/google/cloud/bigquery/job/query.py", line 632, in query_parameters return self._configuration.query_parameters File "(snip)/site-packages/google/cloud/bigquery/job/query.py", line 314, in query_parameters return _from_api_repr_query_parameters(prop) File "(snip)/site-packages/google/cloud/bigquery/job/query.py", line 79, in _from_api_repr_query_parameters return [_query_param_from_api_repr(mapping) for mapping in resource] File "(snip)/site-packages/google/cloud/bigquery/job/query.py", line 79, in <listcomp> return [_query_param_from_api_repr(mapping) for mapping in resource] File "(snip)/site-packages/google/cloud/bigquery/query.py", line 632, in _query_param_from_api_repr return klass.from_api_repr(resource) File "(snip)/site-packages/google/cloud/bigquery/query.py", line 257, in from_api_repr return cls._from_api_repr_scalar(resource) File "(snip)/site-packages/google/cloud/bigquery/query.py", line 239, in _from_api_repr_scalar converted = [ File "(snip)/site-packages/google/cloud/bigquery/query.py", line 240, in <listcomp> _QUERY_PARAMS_FROM_JSON[array_type](value, None) for value in values File "(snip)/site-packages/google/cloud/bigquery/_helpers.py", line 48, in _int_from_json if _not_null(value, field): File "(snip)/site-packages/google/cloud/bigquery/_helpers.py", line 43, in _not_null return value is not None or field.mode != "NULLABLE" ```
[ { "content": "# Copyright 2015 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Shared helper functions for BigQuery API classes.\"\"\"\n\nimport base64\nimport datetime\nimport decimal\nimport re\nimport six\n\nfrom google.cloud._helpers import UTC\nfrom google.cloud._helpers import _date_from_iso8601_date\nfrom google.cloud._helpers import _datetime_from_microseconds\nfrom google.cloud._helpers import _RFC3339_MICROS\nfrom google.cloud._helpers import _RFC3339_NO_FRACTION\nfrom google.cloud._helpers import _to_bytes\n\n_RFC3339_MICROS_NO_ZULU = \"%Y-%m-%dT%H:%M:%S.%f\"\n_TIMEONLY_WO_MICROS = \"%H:%M:%S\"\n_TIMEONLY_W_MICROS = \"%H:%M:%S.%f\"\n_PROJECT_PREFIX_PATTERN = re.compile(\n r\"\"\"\n (?P<project_id>\\S+\\:[^.]+)\\.(?P<dataset_id>[^.]+)(?:$|\\.(?P<custom_id>[^.]+)$)\n\"\"\",\n re.VERBOSE,\n)\n\n\ndef _not_null(value, field):\n \"\"\"Check whether 'value' should be coerced to 'field' type.\"\"\"\n return value is not None or field.mode != \"NULLABLE\"\n\n\ndef _int_from_json(value, field):\n \"\"\"Coerce 'value' to an int, if set or not nullable.\"\"\"\n if _not_null(value, field):\n return int(value)\n\n\ndef _float_from_json(value, field):\n \"\"\"Coerce 'value' to a float, if set or not nullable.\"\"\"\n if _not_null(value, field):\n return float(value)\n\n\ndef _decimal_from_json(value, field):\n \"\"\"Coerce 'value' to a Decimal, if set or not nullable.\"\"\"\n if _not_null(value, field):\n return decimal.Decimal(value)\n\n\ndef _bool_from_json(value, field):\n \"\"\"Coerce 'value' to a bool, if set or not nullable.\"\"\"\n if _not_null(value, field):\n return value.lower() in [\"t\", \"true\", \"1\"]\n\n\ndef _string_from_json(value, _):\n \"\"\"NOOP string -> string coercion\"\"\"\n return value\n\n\ndef _bytes_from_json(value, field):\n \"\"\"Base64-decode value\"\"\"\n if _not_null(value, field):\n return base64.standard_b64decode(_to_bytes(value))\n\n\ndef _timestamp_from_json(value, field):\n \"\"\"Coerce 'value' to a datetime, if set or not nullable.\"\"\"\n if _not_null(value, field):\n # value will be a integer in seconds, to microsecond precision, in UTC.\n return _datetime_from_microseconds(int(value))\n\n\ndef _timestamp_query_param_from_json(value, field):\n \"\"\"Coerce 'value' to a datetime, if set or not nullable.\n\n Args:\n value (str): The timestamp.\n\n field (google.cloud.bigquery.schema.SchemaField):\n The field corresponding to the value.\n\n Returns:\n Optional[datetime.datetime]:\n The parsed datetime object from\n ``value`` if the ``field`` is not null (otherwise it is\n :data:`None`).\n \"\"\"\n if _not_null(value, field):\n # Canonical formats for timestamps in BigQuery are flexible. See:\n # g.co/cloud/bigquery/docs/reference/standard-sql/data-types#timestamp-type\n # The separator between the date and time can be 'T' or ' '.\n value = value.replace(\" \", \"T\", 1)\n # The UTC timezone may be formatted as Z or +00:00.\n value = value.replace(\"Z\", \"\")\n value = value.replace(\"+00:00\", \"\")\n\n if \".\" in value:\n # YYYY-MM-DDTHH:MM:SS.ffffff\n return datetime.datetime.strptime(value, _RFC3339_MICROS_NO_ZULU).replace(\n tzinfo=UTC\n )\n else:\n # YYYY-MM-DDTHH:MM:SS\n return datetime.datetime.strptime(value, _RFC3339_NO_FRACTION).replace(\n tzinfo=UTC\n )\n else:\n return None\n\n\ndef _datetime_from_json(value, field):\n \"\"\"Coerce 'value' to a datetime, if set or not nullable.\n\n Args:\n value (str): The timestamp.\n field (google.cloud.bigquery.schema.SchemaField):\n The field corresponding to the value.\n\n Returns:\n Optional[datetime.datetime]:\n The parsed datetime object from\n ``value`` if the ``field`` is not null (otherwise it is\n :data:`None`).\n \"\"\"\n if _not_null(value, field):\n if \".\" in value:\n # YYYY-MM-DDTHH:MM:SS.ffffff\n return datetime.datetime.strptime(value, _RFC3339_MICROS_NO_ZULU)\n else:\n # YYYY-MM-DDTHH:MM:SS\n return datetime.datetime.strptime(value, _RFC3339_NO_FRACTION)\n else:\n return None\n\n\ndef _date_from_json(value, field):\n \"\"\"Coerce 'value' to a datetime date, if set or not nullable\"\"\"\n if _not_null(value, field):\n # value will be a string, in YYYY-MM-DD form.\n return _date_from_iso8601_date(value)\n\n\ndef _time_from_json(value, field):\n \"\"\"Coerce 'value' to a datetime date, if set or not nullable\"\"\"\n if _not_null(value, field):\n if len(value) == 8: # HH:MM:SS\n fmt = _TIMEONLY_WO_MICROS\n elif len(value) == 15: # HH:MM:SS.micros\n fmt = _TIMEONLY_W_MICROS\n else:\n raise ValueError(\"Unknown time format: {}\".format(value))\n return datetime.datetime.strptime(value, fmt).time()\n\n\ndef _record_from_json(value, field):\n \"\"\"Coerce 'value' to a mapping, if set or not nullable.\"\"\"\n if _not_null(value, field):\n record = {}\n record_iter = zip(field.fields, value[\"f\"])\n for subfield, cell in record_iter:\n converter = _CELLDATA_FROM_JSON[subfield.field_type]\n if subfield.mode == \"REPEATED\":\n value = [converter(item[\"v\"], subfield) for item in cell[\"v\"]]\n else:\n value = converter(cell[\"v\"], subfield)\n record[subfield.name] = value\n return record\n\n\n_CELLDATA_FROM_JSON = {\n \"INTEGER\": _int_from_json,\n \"INT64\": _int_from_json,\n \"FLOAT\": _float_from_json,\n \"FLOAT64\": _float_from_json,\n \"NUMERIC\": _decimal_from_json,\n \"BIGNUMERIC\": _decimal_from_json,\n \"BOOLEAN\": _bool_from_json,\n \"BOOL\": _bool_from_json,\n \"STRING\": _string_from_json,\n \"GEOGRAPHY\": _string_from_json,\n \"BYTES\": _bytes_from_json,\n \"TIMESTAMP\": _timestamp_from_json,\n \"DATETIME\": _datetime_from_json,\n \"DATE\": _date_from_json,\n \"TIME\": _time_from_json,\n \"RECORD\": _record_from_json,\n}\n\n_QUERY_PARAMS_FROM_JSON = dict(_CELLDATA_FROM_JSON)\n_QUERY_PARAMS_FROM_JSON[\"TIMESTAMP\"] = _timestamp_query_param_from_json\n\n\ndef _field_to_index_mapping(schema):\n \"\"\"Create a mapping from schema field name to index of field.\"\"\"\n return {f.name: i for i, f in enumerate(schema)}\n\n\ndef _field_from_json(resource, field):\n converter = _CELLDATA_FROM_JSON.get(field.field_type, lambda value, _: value)\n if field.mode == \"REPEATED\":\n return [converter(item[\"v\"], field) for item in resource]\n else:\n return converter(resource, field)\n\n\ndef _row_tuple_from_json(row, schema):\n \"\"\"Convert JSON row data to row with appropriate types.\n\n Note: ``row['f']`` and ``schema`` are presumed to be of the same length.\n\n Args:\n row (Dict): A JSON response row to be converted.\n schema (Sequence[Union[ \\\n :class:`~google.cloud.bigquery.schema.SchemaField`, \\\n Mapping[str, Any] \\\n ]]): Specification of the field types in ``row``.\n\n Returns:\n Tuple: A tuple of data converted to native types.\n \"\"\"\n from google.cloud.bigquery.schema import _to_schema_fields\n\n schema = _to_schema_fields(schema)\n\n row_data = []\n for field, cell in zip(schema, row[\"f\"]):\n row_data.append(_field_from_json(cell[\"v\"], field))\n return tuple(row_data)\n\n\ndef _rows_from_json(values, schema):\n \"\"\"Convert JSON row data to rows with appropriate types.\n\n Args:\n values (Sequence[Dict]): The list of responses (JSON rows) to convert.\n schema (Sequence[Union[ \\\n :class:`~google.cloud.bigquery.schema.SchemaField`, \\\n Mapping[str, Any] \\\n ]]):\n The table's schema. If any item is a mapping, its content must be\n compatible with\n :meth:`~google.cloud.bigquery.schema.SchemaField.from_api_repr`.\n\n Returns:\n List[:class:`~google.cloud.bigquery.Row`]\n \"\"\"\n from google.cloud.bigquery import Row\n from google.cloud.bigquery.schema import _to_schema_fields\n\n schema = _to_schema_fields(schema)\n field_to_index = _field_to_index_mapping(schema)\n return [Row(_row_tuple_from_json(r, schema), field_to_index) for r in values]\n\n\ndef _int_to_json(value):\n \"\"\"Coerce 'value' to an JSON-compatible representation.\"\"\"\n if isinstance(value, int):\n value = str(value)\n return value\n\n\ndef _float_to_json(value):\n \"\"\"Coerce 'value' to an JSON-compatible representation.\"\"\"\n return value\n\n\ndef _decimal_to_json(value):\n \"\"\"Coerce 'value' to a JSON-compatible representation.\"\"\"\n if isinstance(value, decimal.Decimal):\n value = str(value)\n return value\n\n\ndef _bool_to_json(value):\n \"\"\"Coerce 'value' to an JSON-compatible representation.\"\"\"\n if isinstance(value, bool):\n value = \"true\" if value else \"false\"\n return value\n\n\ndef _bytes_to_json(value):\n \"\"\"Coerce 'value' to an JSON-compatible representation.\"\"\"\n if isinstance(value, bytes):\n value = base64.standard_b64encode(value).decode(\"ascii\")\n return value\n\n\ndef _timestamp_to_json_parameter(value):\n \"\"\"Coerce 'value' to an JSON-compatible representation.\n\n This version returns the string representation used in query parameters.\n \"\"\"\n if isinstance(value, datetime.datetime):\n if value.tzinfo not in (None, UTC):\n # Convert to UTC and remove the time zone info.\n value = value.replace(tzinfo=None) - value.utcoffset()\n value = \"%s %s+00:00\" % (value.date().isoformat(), value.time().isoformat())\n return value\n\n\ndef _timestamp_to_json_row(value):\n \"\"\"Coerce 'value' to an JSON-compatible representation.\"\"\"\n if isinstance(value, datetime.datetime):\n value = value.strftime(_RFC3339_MICROS)\n return value\n\n\ndef _datetime_to_json(value):\n \"\"\"Coerce 'value' to an JSON-compatible representation.\"\"\"\n if isinstance(value, datetime.datetime):\n value = value.strftime(_RFC3339_MICROS_NO_ZULU)\n return value\n\n\ndef _date_to_json(value):\n \"\"\"Coerce 'value' to an JSON-compatible representation.\"\"\"\n if isinstance(value, datetime.date):\n value = value.isoformat()\n return value\n\n\ndef _time_to_json(value):\n \"\"\"Coerce 'value' to an JSON-compatible representation.\"\"\"\n if isinstance(value, datetime.time):\n value = value.isoformat()\n return value\n\n\n# Converters used for scalar values marshalled as row data.\n_SCALAR_VALUE_TO_JSON_ROW = {\n \"INTEGER\": _int_to_json,\n \"INT64\": _int_to_json,\n \"FLOAT\": _float_to_json,\n \"FLOAT64\": _float_to_json,\n \"NUMERIC\": _decimal_to_json,\n \"BIGNUMERIC\": _decimal_to_json,\n \"BOOLEAN\": _bool_to_json,\n \"BOOL\": _bool_to_json,\n \"BYTES\": _bytes_to_json,\n \"TIMESTAMP\": _timestamp_to_json_row,\n \"DATETIME\": _datetime_to_json,\n \"DATE\": _date_to_json,\n \"TIME\": _time_to_json,\n}\n\n\n# Converters used for scalar values marshalled as query parameters.\n_SCALAR_VALUE_TO_JSON_PARAM = _SCALAR_VALUE_TO_JSON_ROW.copy()\n_SCALAR_VALUE_TO_JSON_PARAM[\"TIMESTAMP\"] = _timestamp_to_json_parameter\n\n\ndef _scalar_field_to_json(field, row_value):\n \"\"\"Maps a field and value to a JSON-safe value.\n\n Args:\n field (google.cloud.bigquery.schema.SchemaField):\n The SchemaField to use for type conversion and field name.\n row_value (Any):\n Value to be converted, based on the field's type.\n\n Returns:\n Any: A JSON-serializable object.\n \"\"\"\n converter = _SCALAR_VALUE_TO_JSON_ROW.get(field.field_type)\n if converter is None: # STRING doesn't need converting\n return row_value\n return converter(row_value)\n\n\ndef _repeated_field_to_json(field, row_value):\n \"\"\"Convert a repeated/array field to its JSON representation.\n\n Args:\n field (google.cloud.bigquery.schema.SchemaField):\n The SchemaField to use for type conversion and field name. The\n field mode must equal ``REPEATED``.\n row_value (Sequence[Any]):\n A sequence of values to convert to JSON-serializable values.\n\n Returns:\n List[Any]: A list of JSON-serializable objects.\n \"\"\"\n values = []\n for item in row_value:\n values.append(_single_field_to_json(field, item))\n return values\n\n\ndef _record_field_to_json(fields, row_value):\n \"\"\"Convert a record/struct field to its JSON representation.\n\n Args:\n fields (Sequence[google.cloud.bigquery.schema.SchemaField]):\n The :class:`~google.cloud.bigquery.schema.SchemaField`s of the\n record's subfields to use for type conversion and field names.\n row_value (Union[Tuple[Any], Mapping[str, Any]):\n A tuple or dictionary to convert to JSON-serializable values.\n\n Returns:\n Mapping[str, Any]: A JSON-serializable dictionary.\n \"\"\"\n isdict = isinstance(row_value, dict)\n\n # If row is passed as a tuple, make the length sanity check to avoid either\n # uninformative index errors a few lines below or silently omitting some of\n # the values from the result (we cannot know exactly which fields are missing\n # or redundant, since we don't have their names).\n if not isdict and len(row_value) != len(fields):\n msg = \"The number of row fields ({}) does not match schema length ({}).\".format(\n len(row_value), len(fields)\n )\n raise ValueError(msg)\n\n record = {}\n\n if isdict:\n processed_fields = set()\n\n for subindex, subfield in enumerate(fields):\n subname = subfield.name\n subvalue = row_value.get(subname) if isdict else row_value[subindex]\n\n # None values are unconditionally omitted\n if subvalue is not None:\n record[subname] = _field_to_json(subfield, subvalue)\n\n if isdict:\n processed_fields.add(subname)\n\n # Unknown fields should not be silently dropped, include them. Since there\n # is no schema information available for them, include them as strings\n # to make them JSON-serializable.\n if isdict:\n not_processed = set(row_value.keys()) - processed_fields\n\n for field_name in not_processed:\n value = row_value[field_name]\n if value is not None:\n record[field_name] = six.text_type(value)\n\n return record\n\n\ndef _single_field_to_json(field, row_value):\n \"\"\"Convert a single field into JSON-serializable values.\n\n Ignores mode so that this can function for ARRAY / REPEATING fields\n without requiring a deepcopy of the field. See:\n https://github.com/googleapis/python-bigquery/issues/6\n\n Args:\n field (google.cloud.bigquery.schema.SchemaField):\n The SchemaField to use for type conversion and field name.\n\n row_value (Any):\n Scalar or Struct to be inserted. The type\n is inferred from the SchemaField's field_type.\n\n Returns:\n Any: A JSON-serializable object.\n \"\"\"\n if row_value is None:\n return None\n\n if field.field_type == \"RECORD\":\n return _record_field_to_json(field.fields, row_value)\n\n return _scalar_field_to_json(field, row_value)\n\n\ndef _field_to_json(field, row_value):\n \"\"\"Convert a field into JSON-serializable values.\n\n Args:\n field (google.cloud.bigquery.schema.SchemaField):\n The SchemaField to use for type conversion and field name.\n\n row_value (Union[Sequence[List], Any]):\n Row data to be inserted. If the SchemaField's mode is\n REPEATED, assume this is a list. If not, the type\n is inferred from the SchemaField's field_type.\n\n Returns:\n Any: A JSON-serializable object.\n \"\"\"\n if row_value is None:\n return None\n\n if field.mode == \"REPEATED\":\n return _repeated_field_to_json(field, row_value)\n\n return _single_field_to_json(field, row_value)\n\n\ndef _snake_to_camel_case(value):\n \"\"\"Convert snake case string to camel case.\"\"\"\n words = value.split(\"_\")\n return words[0] + \"\".join(map(str.capitalize, words[1:]))\n\n\ndef _get_sub_prop(container, keys, default=None):\n \"\"\"Get a nested value from a dictionary.\n\n This method works like ``dict.get(key)``, but for nested values.\n\n Args:\n container (Dict):\n A dictionary which may contain other dictionaries as values.\n keys (Iterable):\n A sequence of keys to attempt to get the value for. Each item in\n the sequence represents a deeper nesting. The first key is for\n the top level. If there is a dictionary there, the second key\n attempts to get the value within that, and so on.\n default (Optional[object]):\n Value to returned if any of the keys are not found.\n Defaults to ``None``.\n\n Examples:\n Get a top-level value (equivalent to ``container.get('key')``).\n\n >>> _get_sub_prop({'key': 'value'}, ['key'])\n 'value'\n\n Get a top-level value, providing a default (equivalent to\n ``container.get('key', default='default')``).\n\n >>> _get_sub_prop({'nothere': 123}, ['key'], default='not found')\n 'not found'\n\n Get a nested value.\n\n >>> _get_sub_prop({'key': {'subkey': 'value'}}, ['key', 'subkey'])\n 'value'\n\n Returns:\n object: The value if present or the default.\n \"\"\"\n sub_val = container\n for key in keys:\n if key not in sub_val:\n return default\n sub_val = sub_val[key]\n return sub_val\n\n\ndef _set_sub_prop(container, keys, value):\n \"\"\"Set a nested value in a dictionary.\n\n Args:\n container (Dict):\n A dictionary which may contain other dictionaries as values.\n keys (Iterable):\n A sequence of keys to attempt to set the value for. Each item in\n the sequence represents a deeper nesting. The first key is for\n the top level. If there is a dictionary there, the second key\n attempts to get the value within that, and so on.\n value (object): Value to set within the container.\n\n Examples:\n Set a top-level value (equivalent to ``container['key'] = 'value'``).\n\n >>> container = {}\n >>> _set_sub_prop(container, ['key'], 'value')\n >>> container\n {'key': 'value'}\n\n Set a nested value.\n\n >>> container = {}\n >>> _set_sub_prop(container, ['key', 'subkey'], 'value')\n >>> container\n {'key': {'subkey': 'value'}}\n\n Replace a nested value.\n\n >>> container = {'key': {'subkey': 'prev'}}\n >>> _set_sub_prop(container, ['key', 'subkey'], 'new')\n >>> container\n {'key': {'subkey': 'new'}}\n \"\"\"\n sub_val = container\n for key in keys[:-1]:\n if key not in sub_val:\n sub_val[key] = {}\n sub_val = sub_val[key]\n sub_val[keys[-1]] = value\n\n\ndef _del_sub_prop(container, keys):\n \"\"\"Remove a nested key fro a dictionary.\n\n Args:\n container (Dict):\n A dictionary which may contain other dictionaries as values.\n keys (Iterable):\n A sequence of keys to attempt to clear the value for. Each item in\n the sequence represents a deeper nesting. The first key is for\n the top level. If there is a dictionary there, the second key\n attempts to get the value within that, and so on.\n\n Examples:\n Remove a top-level value (equivalent to ``del container['key']``).\n\n >>> container = {'key': 'value'}\n >>> _del_sub_prop(container, ['key'])\n >>> container\n {}\n\n Remove a nested value.\n\n >>> container = {'key': {'subkey': 'value'}}\n >>> _del_sub_prop(container, ['key', 'subkey'])\n >>> container\n {'key': {}}\n \"\"\"\n sub_val = container\n for key in keys[:-1]:\n if key not in sub_val:\n sub_val[key] = {}\n sub_val = sub_val[key]\n if keys[-1] in sub_val:\n del sub_val[keys[-1]]\n\n\ndef _int_or_none(value):\n \"\"\"Helper: deserialize int value from JSON string.\"\"\"\n if isinstance(value, int):\n return value\n if value is not None:\n return int(value)\n\n\ndef _str_or_none(value):\n \"\"\"Helper: serialize value to JSON string.\"\"\"\n if value is not None:\n return str(value)\n\n\ndef _split_id(full_id):\n \"\"\"Helper: split full_id into composite parts.\n\n Args:\n full_id (str): Fully-qualified ID in standard SQL format.\n\n Returns:\n List[str]: ID's parts separated into components.\n \"\"\"\n with_prefix = _PROJECT_PREFIX_PATTERN.match(full_id)\n if with_prefix is None:\n parts = full_id.split(\".\")\n else:\n parts = with_prefix.groups()\n parts = [part for part in parts if part]\n return parts\n\n\ndef _parse_3_part_id(full_id, default_project=None, property_name=\"table_id\"):\n output_project_id = default_project\n output_dataset_id = None\n output_resource_id = None\n parts = _split_id(full_id)\n\n if len(parts) != 2 and len(parts) != 3:\n raise ValueError(\n \"{property_name} must be a fully-qualified ID in \"\n 'standard SQL format, e.g., \"project.dataset.{property_name}\", '\n \"got {}\".format(full_id, property_name=property_name)\n )\n\n if len(parts) == 2 and not default_project:\n raise ValueError(\n \"When default_project is not set, {property_name} must be a \"\n \"fully-qualified ID in standard SQL format, \"\n 'e.g., \"project.dataset_id.{property_name}\", got {}'.format(\n full_id, property_name=property_name\n )\n )\n\n if len(parts) == 2:\n output_dataset_id, output_resource_id = parts\n else:\n output_project_id, output_dataset_id, output_resource_id = parts\n\n return output_project_id, output_dataset_id, output_resource_id\n\n\ndef _build_resource_from_properties(obj, filter_fields):\n \"\"\"Build a resource based on a ``_properties`` dictionary, filtered by\n ``filter_fields``, which follow the name of the Python object.\n \"\"\"\n partial = {}\n for filter_field in filter_fields:\n api_field = obj._PROPERTY_TO_API_FIELD.get(filter_field)\n if api_field is None and filter_field not in obj._properties:\n raise ValueError(\"No property %s\" % filter_field)\n elif api_field is not None:\n partial[api_field] = obj._properties.get(api_field)\n else:\n # allows properties that are not defined in the library\n # and properties that have the same name as API resource key\n partial[filter_field] = obj._properties[filter_field]\n\n return partial\n\n\ndef _verify_job_config_type(job_config, expected_type, param_name=\"job_config\"):\n if not isinstance(job_config, expected_type):\n msg = (\n \"Expected an instance of {expected_type} class for the {param_name} parameter, \"\n \"but received {param_name} = {job_config}\"\n )\n raise TypeError(\n msg.format(\n expected_type=expected_type.__name__,\n param_name=param_name,\n job_config=job_config,\n )\n )\n", "path": "google/cloud/bigquery/_helpers.py" } ]
[ { "content": "# Copyright 2015 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Shared helper functions for BigQuery API classes.\"\"\"\n\nimport base64\nimport datetime\nimport decimal\nimport re\nimport six\n\nfrom google.cloud._helpers import UTC\nfrom google.cloud._helpers import _date_from_iso8601_date\nfrom google.cloud._helpers import _datetime_from_microseconds\nfrom google.cloud._helpers import _RFC3339_MICROS\nfrom google.cloud._helpers import _RFC3339_NO_FRACTION\nfrom google.cloud._helpers import _to_bytes\n\n_RFC3339_MICROS_NO_ZULU = \"%Y-%m-%dT%H:%M:%S.%f\"\n_TIMEONLY_WO_MICROS = \"%H:%M:%S\"\n_TIMEONLY_W_MICROS = \"%H:%M:%S.%f\"\n_PROJECT_PREFIX_PATTERN = re.compile(\n r\"\"\"\n (?P<project_id>\\S+\\:[^.]+)\\.(?P<dataset_id>[^.]+)(?:$|\\.(?P<custom_id>[^.]+)$)\n\"\"\",\n re.VERBOSE,\n)\n\n\ndef _not_null(value, field):\n \"\"\"Check whether 'value' should be coerced to 'field' type.\"\"\"\n return value is not None or (field is not None and field.mode != \"NULLABLE\")\n\n\ndef _int_from_json(value, field):\n \"\"\"Coerce 'value' to an int, if set or not nullable.\"\"\"\n if _not_null(value, field):\n return int(value)\n\n\ndef _float_from_json(value, field):\n \"\"\"Coerce 'value' to a float, if set or not nullable.\"\"\"\n if _not_null(value, field):\n return float(value)\n\n\ndef _decimal_from_json(value, field):\n \"\"\"Coerce 'value' to a Decimal, if set or not nullable.\"\"\"\n if _not_null(value, field):\n return decimal.Decimal(value)\n\n\ndef _bool_from_json(value, field):\n \"\"\"Coerce 'value' to a bool, if set or not nullable.\"\"\"\n if _not_null(value, field):\n return value.lower() in [\"t\", \"true\", \"1\"]\n\n\ndef _string_from_json(value, _):\n \"\"\"NOOP string -> string coercion\"\"\"\n return value\n\n\ndef _bytes_from_json(value, field):\n \"\"\"Base64-decode value\"\"\"\n if _not_null(value, field):\n return base64.standard_b64decode(_to_bytes(value))\n\n\ndef _timestamp_from_json(value, field):\n \"\"\"Coerce 'value' to a datetime, if set or not nullable.\"\"\"\n if _not_null(value, field):\n # value will be a integer in seconds, to microsecond precision, in UTC.\n return _datetime_from_microseconds(int(value))\n\n\ndef _timestamp_query_param_from_json(value, field):\n \"\"\"Coerce 'value' to a datetime, if set or not nullable.\n\n Args:\n value (str): The timestamp.\n\n field (google.cloud.bigquery.schema.SchemaField):\n The field corresponding to the value.\n\n Returns:\n Optional[datetime.datetime]:\n The parsed datetime object from\n ``value`` if the ``field`` is not null (otherwise it is\n :data:`None`).\n \"\"\"\n if _not_null(value, field):\n # Canonical formats for timestamps in BigQuery are flexible. See:\n # g.co/cloud/bigquery/docs/reference/standard-sql/data-types#timestamp-type\n # The separator between the date and time can be 'T' or ' '.\n value = value.replace(\" \", \"T\", 1)\n # The UTC timezone may be formatted as Z or +00:00.\n value = value.replace(\"Z\", \"\")\n value = value.replace(\"+00:00\", \"\")\n\n if \".\" in value:\n # YYYY-MM-DDTHH:MM:SS.ffffff\n return datetime.datetime.strptime(value, _RFC3339_MICROS_NO_ZULU).replace(\n tzinfo=UTC\n )\n else:\n # YYYY-MM-DDTHH:MM:SS\n return datetime.datetime.strptime(value, _RFC3339_NO_FRACTION).replace(\n tzinfo=UTC\n )\n else:\n return None\n\n\ndef _datetime_from_json(value, field):\n \"\"\"Coerce 'value' to a datetime, if set or not nullable.\n\n Args:\n value (str): The timestamp.\n field (google.cloud.bigquery.schema.SchemaField):\n The field corresponding to the value.\n\n Returns:\n Optional[datetime.datetime]:\n The parsed datetime object from\n ``value`` if the ``field`` is not null (otherwise it is\n :data:`None`).\n \"\"\"\n if _not_null(value, field):\n if \".\" in value:\n # YYYY-MM-DDTHH:MM:SS.ffffff\n return datetime.datetime.strptime(value, _RFC3339_MICROS_NO_ZULU)\n else:\n # YYYY-MM-DDTHH:MM:SS\n return datetime.datetime.strptime(value, _RFC3339_NO_FRACTION)\n else:\n return None\n\n\ndef _date_from_json(value, field):\n \"\"\"Coerce 'value' to a datetime date, if set or not nullable\"\"\"\n if _not_null(value, field):\n # value will be a string, in YYYY-MM-DD form.\n return _date_from_iso8601_date(value)\n\n\ndef _time_from_json(value, field):\n \"\"\"Coerce 'value' to a datetime date, if set or not nullable\"\"\"\n if _not_null(value, field):\n if len(value) == 8: # HH:MM:SS\n fmt = _TIMEONLY_WO_MICROS\n elif len(value) == 15: # HH:MM:SS.micros\n fmt = _TIMEONLY_W_MICROS\n else:\n raise ValueError(\"Unknown time format: {}\".format(value))\n return datetime.datetime.strptime(value, fmt).time()\n\n\ndef _record_from_json(value, field):\n \"\"\"Coerce 'value' to a mapping, if set or not nullable.\"\"\"\n if _not_null(value, field):\n record = {}\n record_iter = zip(field.fields, value[\"f\"])\n for subfield, cell in record_iter:\n converter = _CELLDATA_FROM_JSON[subfield.field_type]\n if subfield.mode == \"REPEATED\":\n value = [converter(item[\"v\"], subfield) for item in cell[\"v\"]]\n else:\n value = converter(cell[\"v\"], subfield)\n record[subfield.name] = value\n return record\n\n\n_CELLDATA_FROM_JSON = {\n \"INTEGER\": _int_from_json,\n \"INT64\": _int_from_json,\n \"FLOAT\": _float_from_json,\n \"FLOAT64\": _float_from_json,\n \"NUMERIC\": _decimal_from_json,\n \"BIGNUMERIC\": _decimal_from_json,\n \"BOOLEAN\": _bool_from_json,\n \"BOOL\": _bool_from_json,\n \"STRING\": _string_from_json,\n \"GEOGRAPHY\": _string_from_json,\n \"BYTES\": _bytes_from_json,\n \"TIMESTAMP\": _timestamp_from_json,\n \"DATETIME\": _datetime_from_json,\n \"DATE\": _date_from_json,\n \"TIME\": _time_from_json,\n \"RECORD\": _record_from_json,\n}\n\n_QUERY_PARAMS_FROM_JSON = dict(_CELLDATA_FROM_JSON)\n_QUERY_PARAMS_FROM_JSON[\"TIMESTAMP\"] = _timestamp_query_param_from_json\n\n\ndef _field_to_index_mapping(schema):\n \"\"\"Create a mapping from schema field name to index of field.\"\"\"\n return {f.name: i for i, f in enumerate(schema)}\n\n\ndef _field_from_json(resource, field):\n converter = _CELLDATA_FROM_JSON.get(field.field_type, lambda value, _: value)\n if field.mode == \"REPEATED\":\n return [converter(item[\"v\"], field) for item in resource]\n else:\n return converter(resource, field)\n\n\ndef _row_tuple_from_json(row, schema):\n \"\"\"Convert JSON row data to row with appropriate types.\n\n Note: ``row['f']`` and ``schema`` are presumed to be of the same length.\n\n Args:\n row (Dict): A JSON response row to be converted.\n schema (Sequence[Union[ \\\n :class:`~google.cloud.bigquery.schema.SchemaField`, \\\n Mapping[str, Any] \\\n ]]): Specification of the field types in ``row``.\n\n Returns:\n Tuple: A tuple of data converted to native types.\n \"\"\"\n from google.cloud.bigquery.schema import _to_schema_fields\n\n schema = _to_schema_fields(schema)\n\n row_data = []\n for field, cell in zip(schema, row[\"f\"]):\n row_data.append(_field_from_json(cell[\"v\"], field))\n return tuple(row_data)\n\n\ndef _rows_from_json(values, schema):\n \"\"\"Convert JSON row data to rows with appropriate types.\n\n Args:\n values (Sequence[Dict]): The list of responses (JSON rows) to convert.\n schema (Sequence[Union[ \\\n :class:`~google.cloud.bigquery.schema.SchemaField`, \\\n Mapping[str, Any] \\\n ]]):\n The table's schema. If any item is a mapping, its content must be\n compatible with\n :meth:`~google.cloud.bigquery.schema.SchemaField.from_api_repr`.\n\n Returns:\n List[:class:`~google.cloud.bigquery.Row`]\n \"\"\"\n from google.cloud.bigquery import Row\n from google.cloud.bigquery.schema import _to_schema_fields\n\n schema = _to_schema_fields(schema)\n field_to_index = _field_to_index_mapping(schema)\n return [Row(_row_tuple_from_json(r, schema), field_to_index) for r in values]\n\n\ndef _int_to_json(value):\n \"\"\"Coerce 'value' to an JSON-compatible representation.\"\"\"\n if isinstance(value, int):\n value = str(value)\n return value\n\n\ndef _float_to_json(value):\n \"\"\"Coerce 'value' to an JSON-compatible representation.\"\"\"\n return value\n\n\ndef _decimal_to_json(value):\n \"\"\"Coerce 'value' to a JSON-compatible representation.\"\"\"\n if isinstance(value, decimal.Decimal):\n value = str(value)\n return value\n\n\ndef _bool_to_json(value):\n \"\"\"Coerce 'value' to an JSON-compatible representation.\"\"\"\n if isinstance(value, bool):\n value = \"true\" if value else \"false\"\n return value\n\n\ndef _bytes_to_json(value):\n \"\"\"Coerce 'value' to an JSON-compatible representation.\"\"\"\n if isinstance(value, bytes):\n value = base64.standard_b64encode(value).decode(\"ascii\")\n return value\n\n\ndef _timestamp_to_json_parameter(value):\n \"\"\"Coerce 'value' to an JSON-compatible representation.\n\n This version returns the string representation used in query parameters.\n \"\"\"\n if isinstance(value, datetime.datetime):\n if value.tzinfo not in (None, UTC):\n # Convert to UTC and remove the time zone info.\n value = value.replace(tzinfo=None) - value.utcoffset()\n value = \"%s %s+00:00\" % (value.date().isoformat(), value.time().isoformat())\n return value\n\n\ndef _timestamp_to_json_row(value):\n \"\"\"Coerce 'value' to an JSON-compatible representation.\"\"\"\n if isinstance(value, datetime.datetime):\n value = value.strftime(_RFC3339_MICROS)\n return value\n\n\ndef _datetime_to_json(value):\n \"\"\"Coerce 'value' to an JSON-compatible representation.\"\"\"\n if isinstance(value, datetime.datetime):\n value = value.strftime(_RFC3339_MICROS_NO_ZULU)\n return value\n\n\ndef _date_to_json(value):\n \"\"\"Coerce 'value' to an JSON-compatible representation.\"\"\"\n if isinstance(value, datetime.date):\n value = value.isoformat()\n return value\n\n\ndef _time_to_json(value):\n \"\"\"Coerce 'value' to an JSON-compatible representation.\"\"\"\n if isinstance(value, datetime.time):\n value = value.isoformat()\n return value\n\n\n# Converters used for scalar values marshalled as row data.\n_SCALAR_VALUE_TO_JSON_ROW = {\n \"INTEGER\": _int_to_json,\n \"INT64\": _int_to_json,\n \"FLOAT\": _float_to_json,\n \"FLOAT64\": _float_to_json,\n \"NUMERIC\": _decimal_to_json,\n \"BIGNUMERIC\": _decimal_to_json,\n \"BOOLEAN\": _bool_to_json,\n \"BOOL\": _bool_to_json,\n \"BYTES\": _bytes_to_json,\n \"TIMESTAMP\": _timestamp_to_json_row,\n \"DATETIME\": _datetime_to_json,\n \"DATE\": _date_to_json,\n \"TIME\": _time_to_json,\n}\n\n\n# Converters used for scalar values marshalled as query parameters.\n_SCALAR_VALUE_TO_JSON_PARAM = _SCALAR_VALUE_TO_JSON_ROW.copy()\n_SCALAR_VALUE_TO_JSON_PARAM[\"TIMESTAMP\"] = _timestamp_to_json_parameter\n\n\ndef _scalar_field_to_json(field, row_value):\n \"\"\"Maps a field and value to a JSON-safe value.\n\n Args:\n field (google.cloud.bigquery.schema.SchemaField):\n The SchemaField to use for type conversion and field name.\n row_value (Any):\n Value to be converted, based on the field's type.\n\n Returns:\n Any: A JSON-serializable object.\n \"\"\"\n converter = _SCALAR_VALUE_TO_JSON_ROW.get(field.field_type)\n if converter is None: # STRING doesn't need converting\n return row_value\n return converter(row_value)\n\n\ndef _repeated_field_to_json(field, row_value):\n \"\"\"Convert a repeated/array field to its JSON representation.\n\n Args:\n field (google.cloud.bigquery.schema.SchemaField):\n The SchemaField to use for type conversion and field name. The\n field mode must equal ``REPEATED``.\n row_value (Sequence[Any]):\n A sequence of values to convert to JSON-serializable values.\n\n Returns:\n List[Any]: A list of JSON-serializable objects.\n \"\"\"\n values = []\n for item in row_value:\n values.append(_single_field_to_json(field, item))\n return values\n\n\ndef _record_field_to_json(fields, row_value):\n \"\"\"Convert a record/struct field to its JSON representation.\n\n Args:\n fields (Sequence[google.cloud.bigquery.schema.SchemaField]):\n The :class:`~google.cloud.bigquery.schema.SchemaField`s of the\n record's subfields to use for type conversion and field names.\n row_value (Union[Tuple[Any], Mapping[str, Any]):\n A tuple or dictionary to convert to JSON-serializable values.\n\n Returns:\n Mapping[str, Any]: A JSON-serializable dictionary.\n \"\"\"\n isdict = isinstance(row_value, dict)\n\n # If row is passed as a tuple, make the length sanity check to avoid either\n # uninformative index errors a few lines below or silently omitting some of\n # the values from the result (we cannot know exactly which fields are missing\n # or redundant, since we don't have their names).\n if not isdict and len(row_value) != len(fields):\n msg = \"The number of row fields ({}) does not match schema length ({}).\".format(\n len(row_value), len(fields)\n )\n raise ValueError(msg)\n\n record = {}\n\n if isdict:\n processed_fields = set()\n\n for subindex, subfield in enumerate(fields):\n subname = subfield.name\n subvalue = row_value.get(subname) if isdict else row_value[subindex]\n\n # None values are unconditionally omitted\n if subvalue is not None:\n record[subname] = _field_to_json(subfield, subvalue)\n\n if isdict:\n processed_fields.add(subname)\n\n # Unknown fields should not be silently dropped, include them. Since there\n # is no schema information available for them, include them as strings\n # to make them JSON-serializable.\n if isdict:\n not_processed = set(row_value.keys()) - processed_fields\n\n for field_name in not_processed:\n value = row_value[field_name]\n if value is not None:\n record[field_name] = six.text_type(value)\n\n return record\n\n\ndef _single_field_to_json(field, row_value):\n \"\"\"Convert a single field into JSON-serializable values.\n\n Ignores mode so that this can function for ARRAY / REPEATING fields\n without requiring a deepcopy of the field. See:\n https://github.com/googleapis/python-bigquery/issues/6\n\n Args:\n field (google.cloud.bigquery.schema.SchemaField):\n The SchemaField to use for type conversion and field name.\n\n row_value (Any):\n Scalar or Struct to be inserted. The type\n is inferred from the SchemaField's field_type.\n\n Returns:\n Any: A JSON-serializable object.\n \"\"\"\n if row_value is None:\n return None\n\n if field.field_type == \"RECORD\":\n return _record_field_to_json(field.fields, row_value)\n\n return _scalar_field_to_json(field, row_value)\n\n\ndef _field_to_json(field, row_value):\n \"\"\"Convert a field into JSON-serializable values.\n\n Args:\n field (google.cloud.bigquery.schema.SchemaField):\n The SchemaField to use for type conversion and field name.\n\n row_value (Union[Sequence[List], Any]):\n Row data to be inserted. If the SchemaField's mode is\n REPEATED, assume this is a list. If not, the type\n is inferred from the SchemaField's field_type.\n\n Returns:\n Any: A JSON-serializable object.\n \"\"\"\n if row_value is None:\n return None\n\n if field.mode == \"REPEATED\":\n return _repeated_field_to_json(field, row_value)\n\n return _single_field_to_json(field, row_value)\n\n\ndef _snake_to_camel_case(value):\n \"\"\"Convert snake case string to camel case.\"\"\"\n words = value.split(\"_\")\n return words[0] + \"\".join(map(str.capitalize, words[1:]))\n\n\ndef _get_sub_prop(container, keys, default=None):\n \"\"\"Get a nested value from a dictionary.\n\n This method works like ``dict.get(key)``, but for nested values.\n\n Args:\n container (Dict):\n A dictionary which may contain other dictionaries as values.\n keys (Iterable):\n A sequence of keys to attempt to get the value for. Each item in\n the sequence represents a deeper nesting. The first key is for\n the top level. If there is a dictionary there, the second key\n attempts to get the value within that, and so on.\n default (Optional[object]):\n Value to returned if any of the keys are not found.\n Defaults to ``None``.\n\n Examples:\n Get a top-level value (equivalent to ``container.get('key')``).\n\n >>> _get_sub_prop({'key': 'value'}, ['key'])\n 'value'\n\n Get a top-level value, providing a default (equivalent to\n ``container.get('key', default='default')``).\n\n >>> _get_sub_prop({'nothere': 123}, ['key'], default='not found')\n 'not found'\n\n Get a nested value.\n\n >>> _get_sub_prop({'key': {'subkey': 'value'}}, ['key', 'subkey'])\n 'value'\n\n Returns:\n object: The value if present or the default.\n \"\"\"\n sub_val = container\n for key in keys:\n if key not in sub_val:\n return default\n sub_val = sub_val[key]\n return sub_val\n\n\ndef _set_sub_prop(container, keys, value):\n \"\"\"Set a nested value in a dictionary.\n\n Args:\n container (Dict):\n A dictionary which may contain other dictionaries as values.\n keys (Iterable):\n A sequence of keys to attempt to set the value for. Each item in\n the sequence represents a deeper nesting. The first key is for\n the top level. If there is a dictionary there, the second key\n attempts to get the value within that, and so on.\n value (object): Value to set within the container.\n\n Examples:\n Set a top-level value (equivalent to ``container['key'] = 'value'``).\n\n >>> container = {}\n >>> _set_sub_prop(container, ['key'], 'value')\n >>> container\n {'key': 'value'}\n\n Set a nested value.\n\n >>> container = {}\n >>> _set_sub_prop(container, ['key', 'subkey'], 'value')\n >>> container\n {'key': {'subkey': 'value'}}\n\n Replace a nested value.\n\n >>> container = {'key': {'subkey': 'prev'}}\n >>> _set_sub_prop(container, ['key', 'subkey'], 'new')\n >>> container\n {'key': {'subkey': 'new'}}\n \"\"\"\n sub_val = container\n for key in keys[:-1]:\n if key not in sub_val:\n sub_val[key] = {}\n sub_val = sub_val[key]\n sub_val[keys[-1]] = value\n\n\ndef _del_sub_prop(container, keys):\n \"\"\"Remove a nested key fro a dictionary.\n\n Args:\n container (Dict):\n A dictionary which may contain other dictionaries as values.\n keys (Iterable):\n A sequence of keys to attempt to clear the value for. Each item in\n the sequence represents a deeper nesting. The first key is for\n the top level. If there is a dictionary there, the second key\n attempts to get the value within that, and so on.\n\n Examples:\n Remove a top-level value (equivalent to ``del container['key']``).\n\n >>> container = {'key': 'value'}\n >>> _del_sub_prop(container, ['key'])\n >>> container\n {}\n\n Remove a nested value.\n\n >>> container = {'key': {'subkey': 'value'}}\n >>> _del_sub_prop(container, ['key', 'subkey'])\n >>> container\n {'key': {}}\n \"\"\"\n sub_val = container\n for key in keys[:-1]:\n if key not in sub_val:\n sub_val[key] = {}\n sub_val = sub_val[key]\n if keys[-1] in sub_val:\n del sub_val[keys[-1]]\n\n\ndef _int_or_none(value):\n \"\"\"Helper: deserialize int value from JSON string.\"\"\"\n if isinstance(value, int):\n return value\n if value is not None:\n return int(value)\n\n\ndef _str_or_none(value):\n \"\"\"Helper: serialize value to JSON string.\"\"\"\n if value is not None:\n return str(value)\n\n\ndef _split_id(full_id):\n \"\"\"Helper: split full_id into composite parts.\n\n Args:\n full_id (str): Fully-qualified ID in standard SQL format.\n\n Returns:\n List[str]: ID's parts separated into components.\n \"\"\"\n with_prefix = _PROJECT_PREFIX_PATTERN.match(full_id)\n if with_prefix is None:\n parts = full_id.split(\".\")\n else:\n parts = with_prefix.groups()\n parts = [part for part in parts if part]\n return parts\n\n\ndef _parse_3_part_id(full_id, default_project=None, property_name=\"table_id\"):\n output_project_id = default_project\n output_dataset_id = None\n output_resource_id = None\n parts = _split_id(full_id)\n\n if len(parts) != 2 and len(parts) != 3:\n raise ValueError(\n \"{property_name} must be a fully-qualified ID in \"\n 'standard SQL format, e.g., \"project.dataset.{property_name}\", '\n \"got {}\".format(full_id, property_name=property_name)\n )\n\n if len(parts) == 2 and not default_project:\n raise ValueError(\n \"When default_project is not set, {property_name} must be a \"\n \"fully-qualified ID in standard SQL format, \"\n 'e.g., \"project.dataset_id.{property_name}\", got {}'.format(\n full_id, property_name=property_name\n )\n )\n\n if len(parts) == 2:\n output_dataset_id, output_resource_id = parts\n else:\n output_project_id, output_dataset_id, output_resource_id = parts\n\n return output_project_id, output_dataset_id, output_resource_id\n\n\ndef _build_resource_from_properties(obj, filter_fields):\n \"\"\"Build a resource based on a ``_properties`` dictionary, filtered by\n ``filter_fields``, which follow the name of the Python object.\n \"\"\"\n partial = {}\n for filter_field in filter_fields:\n api_field = obj._PROPERTY_TO_API_FIELD.get(filter_field)\n if api_field is None and filter_field not in obj._properties:\n raise ValueError(\"No property %s\" % filter_field)\n elif api_field is not None:\n partial[api_field] = obj._properties.get(api_field)\n else:\n # allows properties that are not defined in the library\n # and properties that have the same name as API resource key\n partial[filter_field] = obj._properties[filter_field]\n\n return partial\n\n\ndef _verify_job_config_type(job_config, expected_type, param_name=\"job_config\"):\n if not isinstance(job_config, expected_type):\n msg = (\n \"Expected an instance of {expected_type} class for the {param_name} parameter, \"\n \"but received {param_name} = {job_config}\"\n )\n raise TypeError(\n msg.format(\n expected_type=expected_type.__name__,\n param_name=param_name,\n job_config=job_config,\n )\n )\n", "path": "google/cloud/bigquery/_helpers.py" } ]
diff --git a/google/cloud/bigquery/_helpers.py b/google/cloud/bigquery/_helpers.py index 716c8a394..100136108 100644 --- a/google/cloud/bigquery/_helpers.py +++ b/google/cloud/bigquery/_helpers.py @@ -40,7 +40,7 @@ def _not_null(value, field): """Check whether 'value' should be coerced to 'field' type.""" - return value is not None or field.mode != "NULLABLE" + return value is not None or (field is not None and field.mode != "NULLABLE") def _int_from_json(value, field): diff --git a/tests/unit/test_query.py b/tests/unit/test_query.py index a7c639ed1..cf268daf1 100644 --- a/tests/unit/test_query.py +++ b/tests/unit/test_query.py @@ -383,6 +383,16 @@ def test_from_api_repr_wo_values(self): self.assertEqual(param.array_type, "INT64") self.assertEqual(param.values, []) + def test_from_api_repr_w_none_values(self): + RESOURCE = { + "parameterType": {"type": "ARRAY", "arrayType": {"type": "INT64"}}, + "parameterValue": {"arrayValues": [{"value": "1"}, {"value": None}]}, + } + klass = self._get_target_class() + param = klass.from_api_repr(RESOURCE) + self.assertEqual(param.array_type, "INT64") + self.assertEqual(param.values, [1, None]) + def test_from_api_repr_w_struct_type(self): from google.cloud.bigquery.query import StructQueryParameter
Gallopsled__pwntools-2083
Remote SSH debugging is broken due to missing qemu_port When GDB is invoked on a remote host (via `gdb.debug(..., ssh=shell)`) the following error is thrown. It's not particularly helpful -- perhaps we should double-check that `gdbserver` is available first? This error only occurs when debugging a cross-arch binary on a remote host. ``` Traceback (most recent call last): File "exploit.py", line 66, in <module> io = start() File "exploit.py", line 46, in start return remote(argv, *a, **kw) File "exploit.py", line 37, in remote return gdb.debug([remote_path] + argv, gdbscript=gdbscript, ssh=shell, *a, **kw) File "/home/pwntools/pwntools/pwnlib/context/__init__.py", line 1449, in setter return function(*a, **kw) File "/home/pwntools/pwntools/pwnlib/gdb.py", line 454, in debug port = qemu_port UnboundLocalError: local variable 'qemu_port' referenced before assignment ``` This error can be reproduced as follows: ``` $ pwn template --user level3 --pass c1aXb9E2OrgybHXE --path /levels/level03 --host ioarm.netgarage.org --port 2201 > exploit.py $ python exploit.py GDB ```
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nDuring exploit development, it is frequently useful to debug the\ntarget binary under GDB.\n\nPwntools makes this easy-to-do with a handful of helper routines, designed\nto make your exploit-debug-update cycles much faster.\n\nUseful Functions\n----------------\n\n- :func:`attach` - Attach to an existing process\n- :func:`debug` - Start a new process under a debugger, stopped at the first instruction\n- :func:`debug_shellcode` - Build a binary with the provided shellcode, and start it under a debugger\n\nDebugging Tips\n--------------\n\nThe :func:`attach` and :func:`debug` functions will likely be your bread and\nbutter for debugging.\n\nBoth allow you to provide a script to pass to GDB when it is started, so that\nit can automatically set your breakpoints.\n\nAttaching to Processes\n~~~~~~~~~~~~~~~~~~~~~~\n\nTo attach to an existing process, just use :func:`attach`. It is surprisingly\nversatile, and can attach to a :class:`.process` for simple\nbinaries, or will automatically find the correct process to attach to for a\nforking server, if given a :class:`.remote` object.\n\nSpawning New Processes\n~~~~~~~~~~~~~~~~~~~~~~\n\nAttaching to processes with :func:`attach` is useful, but the state the process\nis in may vary. If you need to attach to a process very early, and debug it from\nthe very first instruction (or even the start of ``main``), you instead should use\n:func:`debug`.\n\nWhen you use :func:`debug`, the return value is a :class:`.tube` object\nthat you interact with exactly like normal.\n\nUsing GDB Python API\n~~~~~~~~~~~~~~~~~~~~\n\nGDB provides Python API, which is documented at\nhttps://sourceware.org/gdb/onlinedocs/gdb/Python-API.html. Pwntools allows you\nto call it right from the exploit, without having to write a gdbscript. This is\nuseful for inspecting program state, e.g. asserting that leaked values are\ncorrect, or that certain packets trigger a particular code path or put the heap\nin a desired state.\n\nPass ``api=True`` to :func:`attach` or :func:`debug` in order to enable GDB\nPython API access. Pwntools will then connect to GDB using RPyC library:\nhttps://rpyc.readthedocs.io/en/latest/.\n\nAt the moment this is an experimental feature with the following limitations:\n\n- Only Python 3 is supported.\n\n Well, technically that's not quite true. The real limitation is that your\n GDB's Python interpreter major version should be the same as that of\n Pwntools. However, most GDBs use Python 3 nowadays.\n\n Different minor versions are allowed as long as no incompatible values are\n sent in either direction. See\n https://rpyc.readthedocs.io/en/latest/install.html#cross-interpreter-compatibility\n for more information.\n\n Use\n\n ::\n\n $ gdb -batch -ex 'python import sys; print(sys.version)'\n\n in order to check your GDB's Python version.\n- If your GDB uses a different Python interpreter than Pwntools (for example,\n because you run Pwntools out of a virtualenv), you should install ``rpyc``\n package into its ``sys.path``. Use\n\n ::\n\n $ gdb -batch -ex 'python import rpyc'\n\n in order to check whether this is necessary.\n- Only local processes are supported.\n- It is not possible to tell whether ``gdb.execute('continue')`` will be\n executed synchronously or asynchronously (in gdbscripts it is always\n synchronous). Therefore it is recommended to use either the explicitly\n synchronous :func:`pwnlib.gdb.Gdb.continue_and_wait` or the explicitly\n asynchronous :func:`pwnlib.gdb.Gdb.continue_nowait` instead.\n\nTips and Troubleshooting\n------------------------\n\n``NOPTRACE`` magic argument\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nIt's quite cumbersom to comment and un-comment lines containing `attach`.\n\nYou can cause these lines to be a no-op by running your script with the\n``NOPTRACE`` argument appended, or with ``PWNLIB_NOPTRACE=1`` in the environment.\n\n::\n\n $ python exploit.py NOPTRACE\n [+] Starting local process '/bin/bash': Done\n [!] Skipping debug attach since context.noptrace==True\n ...\n\nKernel Yama ptrace_scope\n~~~~~~~~~~~~~~~~~~~~~~~~\n\nThe Linux kernel v3.4 introduced a security mechanism called ``ptrace_scope``,\nwhich is intended to prevent processes from debugging eachother unless there is\na direct parent-child relationship.\n\nThis causes some issues with the normal Pwntools workflow, since the process\nhierarchy looks like this:\n\n::\n\n python ---> target\n `--> gdb\n\nNote that ``python`` is the parent of ``target``, not ``gdb``.\n\nIn order to avoid this being a problem, Pwntools uses the function\n``prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY)``. This disables Yama\nfor any processes launched by Pwntools via :class:`.process` or via\n:meth:`.ssh.process`.\n\nOlder versions of Pwntools did not perform the ``prctl`` step, and\nrequired that the Yama security feature was disabled systemwide, which\nrequires ``root`` access.\n\nMember Documentation\n===============================\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom contextlib import contextmanager\nimport os\nimport platform\nimport psutil\nimport random\nimport re\nimport shlex\nimport six\nimport six.moves\nimport socket\nimport tempfile\nfrom threading import Event\nimport time\n\nfrom pwnlib import adb\nfrom pwnlib import atexit\nfrom pwnlib import elf\nfrom pwnlib import qemu\nfrom pwnlib import tubes\nfrom pwnlib.asm import _bfdname\nfrom pwnlib.asm import make_elf\nfrom pwnlib.asm import make_elf_from_assembly\nfrom pwnlib.context import LocalContext\nfrom pwnlib.context import context\nfrom pwnlib.log import getLogger\nfrom pwnlib.timeout import Timeout\nfrom pwnlib.util import misc\nfrom pwnlib.util import packing\nfrom pwnlib.util import proc\n\nlog = getLogger(__name__)\n\n@LocalContext\ndef debug_assembly(asm, gdbscript=None, vma=None, api=False):\n r\"\"\"debug_assembly(asm, gdbscript=None, vma=None, api=False) -> tube\n\n Creates an ELF file, and launches it under a debugger.\n\n This is identical to debug_shellcode, except that\n any defined symbols are available in GDB, and it\n saves you the explicit call to asm().\n\n Arguments:\n asm(str): Assembly code to debug\n gdbscript(str): Script to run in GDB\n vma(int): Base address to load the shellcode at\n api(bool): Enable access to GDB Python API\n \\**kwargs: Override any :obj:`pwnlib.context.context` values.\n\n Returns:\n :class:`.process`\n\n Example:\n\n >>> assembly = shellcraft.echo(\"Hello world!\\n\")\n >>> io = gdb.debug_assembly(assembly)\n >>> io.recvline()\n b'Hello world!\\n'\n \"\"\"\n tmp_elf = make_elf_from_assembly(asm, vma=vma, extract=False)\n os.chmod(tmp_elf, 0o777)\n\n atexit.register(lambda: os.unlink(tmp_elf))\n\n if context.os == 'android':\n android_path = '/data/data/%s' % os.path.basename(tmp_elf)\n adb.push(tmp_elf, android_path)\n tmp_elf = android_path\n\n return debug(tmp_elf, gdbscript=gdbscript, arch=context.arch, api=api)\n\n@LocalContext\ndef debug_shellcode(data, gdbscript=None, vma=None, api=False):\n r\"\"\"debug_shellcode(data, gdbscript=None, vma=None, api=False) -> tube\n Creates an ELF file, and launches it under a debugger.\n\n Arguments:\n data(str): Assembled shellcode bytes\n gdbscript(str): Script to run in GDB\n vma(int): Base address to load the shellcode at\n api(bool): Enable access to GDB Python API\n \\**kwargs: Override any :obj:`pwnlib.context.context` values.\n\n Returns:\n :class:`.process`\n\n Example:\n\n >>> assembly = shellcraft.echo(\"Hello world!\\n\")\n >>> shellcode = asm(assembly)\n >>> io = gdb.debug_shellcode(shellcode)\n >>> io.recvline()\n b'Hello world!\\n'\n \"\"\"\n if isinstance(data, six.text_type):\n log.error(\"Shellcode is cannot be unicode. Did you mean debug_assembly?\")\n tmp_elf = make_elf(data, extract=False, vma=vma)\n os.chmod(tmp_elf, 0o777)\n\n atexit.register(lambda: os.unlink(tmp_elf))\n\n if context.os == 'android':\n android_path = '/data/data/%s' % os.path.basename(tmp_elf)\n adb.push(tmp_elf, android_path)\n tmp_elf = android_path\n\n return debug(tmp_elf, gdbscript=gdbscript, arch=context.arch, api=api)\n\ndef _gdbserver_args(pid=None, path=None, args=None, which=None, env=None):\n \"\"\"_gdbserver_args(pid=None, path=None, args=None, which=None, env=None) -> list\n\n Sets up a listening gdbserver, to either connect to the specified\n PID, or launch the specified binary by its full path.\n\n Arguments:\n pid(int): Process ID to attach to\n path(str): Process to launch\n args(list): List of arguments to provide on the debugger command line\n which(callaable): Function to find the path of a binary.\n\n Returns:\n A list of arguments to invoke gdbserver.\n \"\"\"\n if [pid, path, args].count(None) != 2:\n log.error(\"Must specify exactly one of pid, path, or args\")\n\n if not which:\n log.error(\"Must specify which.\")\n\n gdbserver = ''\n\n if not args:\n args = [str(path or pid)]\n\n # Android targets have a distinct gdbserver\n if context.bits == 64:\n gdbserver = which('gdbserver64')\n\n if not gdbserver:\n gdbserver = which('gdbserver')\n\n if not gdbserver:\n log.error(\"gdbserver is not installed\")\n\n orig_args = args\n\n gdbserver_args = [gdbserver, '--multi']\n if context.aslr:\n gdbserver_args += ['--no-disable-randomization']\n else:\n log.warn_once(\"Debugging process with ASLR disabled\")\n\n if pid:\n gdbserver_args += ['--once', '--attach']\n\n if env is not None:\n env_args = []\n for key in tuple(env):\n if key.startswith(b'LD_'): # LD_PRELOAD / LD_LIBRARY_PATH etc.\n env_args.append(b'%s=%s' % (key, env.pop(key)))\n else:\n env_args.append(b'%s=%s' % (key, env[key]))\n gdbserver_args += ['--wrapper', which('env'), '-i'] + env_args + ['--']\n\n gdbserver_args += ['localhost:0']\n gdbserver_args += args\n\n return gdbserver_args\n\ndef _gdbserver_port(gdbserver, ssh):\n which = _get_which(ssh)\n\n # Process /bin/bash created; pid = 14366\n # Listening on port 34816\n process_created = gdbserver.recvline()\n\n if process_created.startswith(b'ERROR:'):\n raise ValueError(\n 'Failed to spawn process under gdbserver. gdbserver error message: %r' % process_created\n )\n\n try:\n gdbserver.pid = int(process_created.split()[-1], 0)\n except ValueError:\n log.error('gdbserver did not output its pid (maybe chmod +x?): %r', process_created)\n\n listening_on = b''\n while b'Listening' not in listening_on:\n listening_on = gdbserver.recvline()\n\n port = int(listening_on.split()[-1])\n\n # Set up port forarding for SSH\n if ssh:\n remote = ssh.connect_remote('127.0.0.1', port)\n listener = tubes.listen.listen(0)\n port = listener.lport\n\n # Disable showing GDB traffic when debugging verbosity is increased\n remote.level = 'error'\n listener.level = 'error'\n\n # Hook them up\n remote.connect_both(listener)\n\n # Set up port forwarding for ADB\n elif context.os == 'android':\n adb.forward(port)\n\n return port\n\ndef _get_which(ssh=None):\n if ssh: return ssh.which\n elif context.os == 'android': return adb.which\n else: return misc.which\n\ndef _get_runner(ssh=None):\n if ssh: return ssh.process\n elif context.os == 'android': return adb.process\n else: return tubes.process.process\n\n@LocalContext\ndef debug(args, gdbscript=None, exe=None, ssh=None, env=None, sysroot=None, api=False, **kwargs):\n r\"\"\"\n Launch a GDB server with the specified command line,\n and launches GDB to attach to it.\n\n Arguments:\n args(list): Arguments to the process, similar to :class:`.process`.\n gdbscript(str): GDB script to run.\n exe(str): Path to the executable on disk\n env(dict): Environment to start the binary in\n ssh(:class:`.ssh`): Remote ssh session to use to launch the process.\n sysroot(str): Foreign-architecture sysroot, used for QEMU-emulated binaries\n and Android targets.\n api(bool): Enable access to GDB Python API.\n\n Returns:\n :class:`.process` or :class:`.ssh_channel`: A tube connected to the target process.\n When ``api=True``, ``gdb`` member of the returned object contains a :class:`Gdb`\n instance.\n\n Notes:\n\n The debugger is attached automatically, and you can debug everything\n from the very beginning. This requires that both ``gdb`` and ``gdbserver``\n are installed on your machine.\n\n When GDB opens via :func:`debug`, it will initially be stopped on the very first\n instruction of the dynamic linker (``ld.so``) for dynamically-linked binaries.\n\n Only the target binary and the linker will be loaded in memory, so you cannot\n set breakpoints on shared library routines like ``malloc`` since ``libc.so``\n has not even been loaded yet.\n\n There are several ways to handle this:\n\n 1. Set a breakpoint on the executable's entry point (generally, ``_start``)\n - This is only invoked after all of the required shared libraries\n are loaded.\n - You can generally get the address via the GDB command ``info file``.\n 2. Use pending breakpoints via ``set breakpoint pending on``\n - This has the side-effect of setting breakpoints for **every** function\n which matches the name. For ``malloc``, this will generally set a\n breakpoint in the executable's PLT, in the linker's internal ``malloc``,\n and eventaully in ``libc``'s malloc.\n 3. Wait for libraries to be loaded with ``set stop-on-solib-event 1``\n - There is no way to stop on any specific library being loaded, and sometimes\n multiple libraries are loaded and only a single breakpoint is issued.\n - Generally, you just add a few ``continue`` commands until things are set up\n the way you want it to be.\n\n Examples:\n\n Create a new process, and stop it at 'main'\n\n >>> io = gdb.debug('bash', '''\n ... break main\n ... continue\n ... ''')\n\n Send a command to Bash\n\n >>> io.sendline(b\"echo hello\")\n >>> io.recvline()\n b'hello\\n'\n\n Interact with the process\n\n >>> io.interactive() # doctest: +SKIP\n >>> io.close()\n\n Create a new process, and stop it at '_start'\n\n >>> io = gdb.debug('bash', '''\n ... # Wait until we hit the main executable's entry point\n ... break _start\n ... continue\n ...\n ... # Now set breakpoint on shared library routines\n ... break malloc\n ... break free\n ... continue\n ... ''')\n\n Send a command to Bash\n\n >>> io.sendline(b\"echo hello\")\n >>> io.recvline()\n b'hello\\n'\n\n Interact with the process\n\n >>> io.interactive() # doctest: +SKIP\n >>> io.close()\n\n Using GDB Python API:\n\n .. doctest\n :skipif: six.PY2\n\n Debug a new process\n\n >>> io = gdb.debug(['echo', 'foo'], api=True)\n\n Stop at 'write'\n\n >>> bp = io.gdb.Breakpoint('write', temporary=True)\n >>> io.gdb.continue_and_wait()\n\n Dump 'count'\n\n >>> count = io.gdb.parse_and_eval('$rdx')\n >>> long = io.gdb.lookup_type('long')\n >>> int(count.cast(long))\n 4\n\n Resume the program\n\n >>> io.gdb.continue_nowait()\n >>> io.recvline()\n b'foo\\n'\n\n\n Using SSH:\n\n You can use :func:`debug` to spawn new processes on remote machines as well,\n by using the ``ssh=`` keyword to pass in your :class:`.ssh` instance.\n\n Connect to the SSH server and start a process on the server\n\n >>> shell = ssh('travis', 'example.pwnme', password='demopass')\n >>> io = gdb.debug(['whoami'],\n ... ssh = shell,\n ... gdbscript = '''\n ... break main\n ... continue\n ... ''')\n\n Send a command to Bash\n\n >>> io.sendline(b\"echo hello\")\n\n Interact with the process\n >>> io.interactive() # doctest: +SKIP\n >>> io.close()\n \"\"\"\n if isinstance(args, six.integer_types + (tubes.process.process, tubes.ssh.ssh_channel)):\n log.error(\"Use gdb.attach() to debug a running process\")\n\n if isinstance(args, (bytes, six.text_type)):\n args = [args]\n\n orig_args = args\n\n runner = _get_runner(ssh)\n which = _get_which(ssh)\n gdbscript = gdbscript or ''\n\n if api and runner is not tubes.process.process:\n raise ValueError('GDB Python API is supported only for local processes')\n\n args, env = misc.normalize_argv_env(args, env, log)\n if env:\n env = {bytes(k): bytes(v) for k, v in env}\n\n if context.noptrace:\n log.warn_once(\"Skipping debugger since context.noptrace==True\")\n return runner(args, executable=exe, env=env)\n\n if ssh or context.native or (context.os == 'android'):\n args = _gdbserver_args(args=args, which=which, env=env)\n else:\n qemu_port = random.randint(1024, 65535)\n qemu_user = qemu.user_path()\n sysroot = sysroot or qemu.ld_prefix(env=env)\n if not qemu_user:\n log.error(\"Cannot debug %s binaries without appropriate QEMU binaries\" % context.arch)\n if context.os == 'baremetal':\n qemu_args = [qemu_user, '-S', '-gdb', 'tcp::' + str(qemu_port)]\n else:\n qemu_args = [qemu_user, '-g', str(qemu_port)]\n if sysroot:\n qemu_args += ['-L', sysroot]\n args = qemu_args + args\n\n # Use a sane default sysroot for Android\n if not sysroot and context.os == 'android':\n sysroot = 'remote:/'\n\n # Make sure gdbserver/qemu is installed\n if not which(args[0]):\n log.error(\"%s is not installed\" % args[0])\n\n if not ssh:\n exe = exe or which(orig_args[0])\n if not (exe and os.path.exists(exe)):\n log.error(\"%s does not exist\" % exe)\n\n # Start gdbserver/qemu\n # (Note: We override ASLR here for the gdbserver process itself.)\n gdbserver = runner(args, env=env, aslr=1, **kwargs)\n\n # Set the .executable on the process object.\n gdbserver.executable = exe\n\n # Find what port we need to connect to\n if context.native or (context.os == 'android'):\n port = _gdbserver_port(gdbserver, ssh)\n else:\n port = qemu_port\n\n host = '127.0.0.1'\n if not ssh and context.os == 'android':\n host = context.adb_host\n\n tmp = attach((host, port), exe=exe, gdbscript=gdbscript, ssh=ssh, sysroot=sysroot, api=api)\n if api:\n _, gdb = tmp\n gdbserver.gdb = gdb\n\n # gdbserver outputs a message when a client connects\n garbage = gdbserver.recvline(timeout=1)\n\n # Some versions of gdbserver output an additional message\n garbage2 = gdbserver.recvline_startswith(b\"Remote debugging from host \", timeout=2)\n\n return gdbserver\n\ndef get_gdb_arch():\n return {\n 'amd64': 'i386:x86-64',\n 'powerpc': 'powerpc:common',\n 'powerpc64': 'powerpc:common64',\n 'mips64': 'mips:isa64',\n 'thumb': 'arm',\n 'sparc64': 'sparc:v9'\n }.get(context.arch, context.arch)\n\ndef binary():\n \"\"\"binary() -> str\n\n Returns:\n str: Path to the appropriate ``gdb`` binary to use.\n\n Example:\n\n >>> gdb.binary() # doctest: +SKIP\n '/usr/bin/gdb'\n \"\"\"\n gdb = misc.which('pwntools-gdb') or misc.which('gdb')\n\n if not context.native:\n multiarch = misc.which('gdb-multiarch')\n\n if multiarch:\n return multiarch\n log.warn_once('Cross-architecture debugging usually requires gdb-multiarch\\n'\n '$ apt-get install gdb-multiarch')\n\n if not gdb:\n log.error('GDB is not installed\\n'\n '$ apt-get install gdb')\n\n return gdb\n\nclass Breakpoint:\n \"\"\"Mirror of ``gdb.Breakpoint`` class.\n\n See https://sourceware.org/gdb/onlinedocs/gdb/Breakpoints-In-Python.html\n for more information.\n \"\"\"\n\n def __init__(self, conn, *args, **kwargs):\n \"\"\"Do not create instances of this class directly.\n\n Use ``pwnlib.gdb.Gdb.Breakpoint`` instead.\n \"\"\"\n # Creates a real breakpoint and connects it with this mirror\n self.conn = conn\n self.server_breakpoint = conn.root.set_breakpoint(\n self, hasattr(self, 'stop'), *args, **kwargs)\n\n def __getattr__(self, item):\n \"\"\"Return attributes of the real breakpoint.\"\"\"\n if item in (\n '____id_pack__',\n '__name__',\n '____conn__',\n 'stop',\n ):\n # Ignore RPyC netref attributes.\n # Also, if stop() is not defined, hasattr() call in our\n # __init__() will bring us here. Don't contact the\n # server in this case either.\n raise AttributeError()\n return getattr(self.server_breakpoint, item)\n\n def exposed_stop(self):\n # Handle stop() call from the server.\n return self.stop()\n\nclass Gdb:\n \"\"\"Mirror of ``gdb`` module.\n\n See https://sourceware.org/gdb/onlinedocs/gdb/Basic-Python.html for more\n information.\n \"\"\"\n\n def __init__(self, conn):\n \"\"\"Do not create instances of this class directly.\n\n Use :func:`attach` or :func:`debug` with ``api=True`` instead.\n \"\"\"\n self.conn = conn\n\n class _Breakpoint(Breakpoint):\n def __init__(self, *args, **kwargs):\n super().__init__(conn, *args, **kwargs)\n\n self.Breakpoint = _Breakpoint\n self.stopped = Event()\n\n def stop_handler(event):\n self.stopped.set()\n\n self.events.stop.connect(stop_handler)\n\n def __getattr__(self, item):\n \"\"\"Provide access to the attributes of `gdb` module.\"\"\"\n return getattr(self.conn.root.gdb, item)\n\n def wait(self):\n \"\"\"Wait until the program stops.\"\"\"\n self.stopped.wait()\n self.stopped.clear()\n\n def interrupt_and_wait(self):\n \"\"\"Interrupt the program and wait until it stops.\"\"\"\n self.execute('interrupt')\n self.wait()\n\n def continue_nowait(self):\n \"\"\"Continue the program. Do not wait until it stops again.\"\"\"\n self.execute('continue &')\n\n def continue_and_wait(self):\n \"\"\"Continue the program and wait until it stops again.\"\"\"\n self.continue_nowait()\n self.wait()\n\n def quit(self):\n \"\"\"Terminate GDB.\"\"\"\n self.conn.root.quit()\n\n@LocalContext\ndef attach(target, gdbscript = '', exe = None, gdb_args = None, ssh = None, sysroot = None, api = False):\n r\"\"\"\n Start GDB in a new terminal and attach to `target`.\n\n Arguments:\n target: The target to attach to.\n gdbscript(:obj:`str` or :obj:`file`): GDB script to run after attaching.\n exe(str): The path of the target binary.\n arch(str): Architechture of the target binary. If `exe` known GDB will\n detect the architechture automatically (if it is supported).\n gdb_args(list): List of additional arguments to pass to GDB.\n sysroot(str): Foreign-architecture sysroot, used for QEMU-emulated binaries\n and Android targets.\n api(bool): Enable access to GDB Python API.\n\n Returns:\n PID of the GDB process (or the window which it is running in).\n When ``api=True``, a (PID, :class:`Gdb`) tuple.\n\n Notes:\n\n The ``target`` argument is very robust, and can be any of the following:\n\n :obj:`int`\n PID of a process\n :obj:`str`\n Process name. The youngest process is selected.\n :obj:`tuple`\n Host, port pair of a listening ``gdbserver``\n :class:`.process`\n Process to connect to\n :class:`.sock`\n Connected socket. The executable on the other end of the connection is attached to.\n Can be any socket type, including :class:`.listen` or :class:`.remote`.\n :class:`.ssh_channel`\n Remote process spawned via :meth:`.ssh.process`.\n This will use the GDB installed on the remote machine.\n If a password is required to connect, the ``sshpass`` program must be installed.\n\n Examples:\n\n Attach to a process by PID\n\n >>> pid = gdb.attach(1234) # doctest: +SKIP\n\n Attach to the youngest process by name\n\n >>> pid = gdb.attach('bash') # doctest: +SKIP\n\n Attach a debugger to a :class:`.process` tube and automate interaction\n\n >>> io = process('bash')\n >>> pid = gdb.attach(io, gdbscript='''\n ... call puts(\"Hello from process debugger!\")\n ... detach\n ... quit\n ... ''')\n >>> io.recvline()\n b'Hello from process debugger!\\n'\n >>> io.sendline(b'echo Hello from bash && exit')\n >>> io.recvall()\n b'Hello from bash\\n'\n\n Using GDB Python API:\n\n .. doctest\n :skipif: six.PY2\n\n >>> io = process('bash')\n\n Attach a debugger\n\n >>> pid, io_gdb = gdb.attach(io, api=True)\n\n Force the program to write something it normally wouldn't\n\n >>> io_gdb.execute('call puts(\"Hello from process debugger!\")')\n\n Resume the program\n\n >>> io_gdb.continue_nowait()\n\n Observe the forced line\n\n >>> io.recvline()\n b'Hello from process debugger!\\n'\n\n Interact with the program in a regular way\n\n >>> io.sendline(b'echo Hello from bash && exit')\n\n Observe the results\n\n >>> io.recvall()\n b'Hello from bash\\n'\n\n Attach to the remote process from a :class:`.remote` or :class:`.listen` tube,\n as long as it is running on the same machine.\n\n >>> server = process(['socat', 'tcp-listen:12345,reuseaddr,fork', 'exec:/bin/bash,nofork'])\n >>> sleep(1) # Wait for socat to start\n >>> io = remote('127.0.0.1', 12345)\n >>> sleep(1) # Wait for process to fork\n >>> pid = gdb.attach(io, gdbscript='''\n ... call puts(\"Hello from remote debugger!\")\n ... detach\n ... quit\n ... ''')\n >>> io.recvline()\n b'Hello from remote debugger!\\n'\n >>> io.sendline(b'echo Hello from bash && exit')\n >>> io.recvall()\n b'Hello from bash\\n'\n\n Attach to processes running on a remote machine via an SSH :class:`.ssh` process\n\n >>> shell = ssh('travis', 'example.pwnme', password='demopass')\n >>> io = shell.process(['cat'])\n >>> pid = gdb.attach(io, gdbscript='''\n ... call sleep(5)\n ... call puts(\"Hello from ssh debugger!\")\n ... detach\n ... quit\n ... ''')\n >>> io.recvline(timeout=5) # doctest: +SKIP\n b'Hello from ssh debugger!\\n'\n >>> io.sendline(b'This will be echoed back')\n >>> io.recvline()\n b'This will be echoed back\\n'\n >>> io.close()\n \"\"\"\n if context.noptrace:\n log.warn_once(\"Skipping debug attach since context.noptrace==True\")\n return\n\n # if gdbscript is a file object, then read it; we probably need to run some\n # more gdb script anyway\n if hasattr(gdbscript, 'read'):\n with gdbscript:\n gdbscript = gdbscript.read()\n\n # enable gdb.attach(p, 'continue')\n if gdbscript and not gdbscript.endswith('\\n'):\n gdbscript += '\\n'\n\n # Use a sane default sysroot for Android\n if not sysroot and context.os == 'android':\n sysroot = 'remote:/'\n\n # gdb script to run before `gdbscript`\n pre = ''\n if not context.native:\n pre += 'set endian %s\\n' % context.endian\n pre += 'set architecture %s\\n' % get_gdb_arch()\n if sysroot:\n pre += 'set sysroot %s\\n' % sysroot\n\n if context.os == 'android':\n pre += 'set gnutarget ' + _bfdname() + '\\n'\n\n if exe and context.os != 'baremetal':\n pre += 'file \"%s\"\\n' % exe\n\n # let's see if we can find a pid to attach to\n pid = None\n if isinstance(target, six.integer_types):\n # target is a pid, easy peasy\n pid = target\n elif isinstance(target, str):\n # pidof picks the youngest process\n pidof = proc.pidof\n\n if context.os == 'android':\n pidof = adb.pidof\n\n pids = list(pidof(target))\n if not pids:\n log.error('No such process: %s', target)\n pid = pids[0]\n log.info('Attaching to youngest process \"%s\" (PID = %d)' %\n (target, pid))\n elif isinstance(target, tubes.ssh.ssh_channel):\n if not target.pid:\n log.error(\"PID unknown for channel\")\n\n shell = target.parent\n\n tmpfile = shell.mktemp()\n gdbscript = b'shell rm %s\\n%s' % (tmpfile, packing._need_bytes(gdbscript, 2, 0x80))\n shell.upload_data(gdbscript or b'', tmpfile)\n\n cmd = ['ssh', '-C', '-t', '-p', str(shell.port), '-l', shell.user, shell.host]\n if shell.password:\n if not misc.which('sshpass'):\n log.error(\"sshpass must be installed to debug ssh processes\")\n cmd = ['sshpass', '-p', shell.password] + cmd\n if shell.keyfile:\n cmd += ['-i', shell.keyfile]\n cmd += ['gdb', '-q', target.executable, str(target.pid), '-x', tmpfile]\n\n misc.run_in_new_terminal(cmd)\n return\n\n elif isinstance(target, tubes.sock.sock):\n pids = proc.pidof(target)\n if not pids:\n log.error('Could not find remote process (%s:%d) on this machine' %\n target.sock.getpeername())\n pid = pids[0]\n\n # Specifically check for socat, since it has an intermediary process\n # if you do not specify \"nofork\" to the EXEC: argument\n # python(2640)───socat(2642)───socat(2643)───bash(2644)\n if proc.exe(pid).endswith('/socat') and time.sleep(0.1) and proc.children(pid):\n pid = proc.children(pid)[0]\n\n # We may attach to the remote process after the fork but before it performs an exec. \n # If an exe is provided, wait until the process is actually running the expected exe\n # before we attach the debugger.\n t = Timeout()\n with t.countdown(2):\n while exe and os.path.realpath(proc.exe(pid)) != os.path.realpath(exe) and t.timeout:\n time.sleep(0.1)\n\n elif isinstance(target, tubes.process.process):\n pid = proc.pidof(target)[0]\n exe = exe or target.executable\n elif isinstance(target, tuple) and len(target) == 2:\n host, port = target\n\n if context.os != 'android':\n pre += 'target remote %s:%d\\n' % (host, port)\n else:\n # Android debugging is done over gdbserver, which can't follow\n # new inferiors (tldr; follow-fork-mode child) unless it is run\n # in extended-remote mode.\n pre += 'target extended-remote %s:%d\\n' % (host, port)\n pre += 'set detach-on-fork off\\n'\n\n def findexe():\n for spid in proc.pidof(target):\n sexe = proc.exe(spid)\n name = os.path.basename(sexe)\n # XXX: parse cmdline\n if name.startswith('qemu-') or name.startswith('gdbserver'):\n exe = proc.cmdline(spid)[-1]\n return os.path.join(proc.cwd(spid), exe)\n\n exe = exe or findexe()\n elif isinstance(target, elf.corefile.Corefile):\n pre += 'target core \"%s\"\\n' % target.path\n else:\n log.error(\"don't know how to attach to target: %r\", target)\n\n # if we have a pid but no exe, just look it up in /proc/\n if pid and not exe:\n exe_fn = proc.exe\n if context.os == 'android':\n exe_fn = adb.proc_exe\n exe = exe_fn(pid)\n\n if not pid and not exe and not ssh:\n log.error('could not find target process')\n\n gdb_binary = binary()\n cmd = [gdb_binary]\n\n if gdb_args:\n cmd += gdb_args\n\n if context.gdbinit:\n cmd += ['-nh'] # ignore ~/.gdbinit\n cmd += ['-x', context.gdbinit] # load custom gdbinit\n\n cmd += ['-q']\n\n if exe and context.native:\n if not ssh and not os.path.isfile(exe):\n log.error('No such file: %s', exe)\n cmd += [exe]\n\n if pid and not context.os == 'android':\n cmd += [str(pid)]\n\n if context.os == 'android' and pid:\n runner = _get_runner()\n which = _get_which()\n gdb_cmd = _gdbserver_args(pid=pid, which=which)\n gdbserver = runner(gdb_cmd)\n port = _gdbserver_port(gdbserver, None)\n host = context.adb_host\n pre += 'target extended-remote %s:%i\\n' % (context.adb_host, port)\n\n # gdbserver on Android sets 'detach-on-fork on' which breaks things\n # when you're trying to debug anything that forks.\n pre += 'set detach-on-fork off\\n'\n\n if api:\n # create a UNIX socket for talking to GDB\n socket_dir = tempfile.mkdtemp()\n socket_path = os.path.join(socket_dir, 'socket')\n bridge = os.path.join(os.path.dirname(__file__), 'gdb_api_bridge.py')\n\n # inject the socket path and the GDB Python API bridge\n pre = 'python socket_path = ' + repr(socket_path) + '\\n' + \\\n 'source ' + bridge + '\\n' + \\\n pre\n\n gdbscript = pre + (gdbscript or '')\n\n if gdbscript:\n tmp = tempfile.NamedTemporaryFile(prefix = 'pwn', suffix = '.gdb',\n delete = False, mode = 'w+')\n log.debug('Wrote gdb script to %r\\n%s', tmp.name, gdbscript)\n gdbscript = 'shell rm %s\\n%s' % (tmp.name, gdbscript)\n\n tmp.write(gdbscript)\n tmp.close()\n cmd += ['-x', tmp.name]\n\n log.info('running in new terminal: %s', cmd)\n\n if api:\n # prevent gdb_faketerminal.py from messing up api doctests\n def preexec_fn():\n os.environ['GDB_FAKETERMINAL'] = '0'\n else:\n preexec_fn = None\n gdb_pid = misc.run_in_new_terminal(cmd, preexec_fn = preexec_fn)\n\n if pid and context.native:\n proc.wait_for_debugger(pid, gdb_pid)\n\n if not api:\n return gdb_pid\n\n # connect to the GDB Python API bridge\n from rpyc import BgServingThread\n from rpyc.utils.factory import unix_connect\n if six.PY2:\n retriable = socket.error\n else:\n retriable = ConnectionRefusedError, FileNotFoundError\n\n t = Timeout()\n with t.countdown(10):\n while t.timeout:\n try:\n conn = unix_connect(socket_path)\n break\n except retriable:\n time.sleep(0.1)\n else:\n # Check to see if RPyC is installed at all in GDB\n rpyc_check = [gdb_binary, '--nx', '-batch', '-ex',\n 'python import rpyc; import sys; sys.exit(123)']\n\n if 123 != tubes.process.process(rpyc_check).poll(block=True):\n log.error('Failed to connect to GDB: rpyc is not installed')\n\n # Check to see if the socket ever got created\n if not os.path.exists(socket_path):\n log.error('Failed to connect to GDB: Unix socket %s was never created', socket_path)\n\n # Check to see if the remote RPyC client is a compatible version\n version_check = [gdb_binary, '--nx', '-batch', '-ex',\n 'python import platform; print(platform.python_version())']\n gdb_python_version = tubes.process.process(version_check).recvall().strip()\n python_version = str(platform.python_version())\n\n if gdb_python_version != python_version:\n log.error('Failed to connect to GDB: Version mismatch (%s vs %s)',\n gdb_python_version,\n python_version)\n\n # Don't know what happened\n log.error('Failed to connect to GDB: Unknown error')\n\n # now that connection is up, remove the socket from the filesystem\n os.unlink(socket_path)\n os.rmdir(socket_dir)\n\n # create a thread for receiving breakpoint notifications\n BgServingThread(conn, callback=lambda: None)\n\n return gdb_pid, Gdb(conn)\n\n\ndef ssh_gdb(ssh, argv, gdbscript = None, arch = None, **kwargs):\n if not isinstance(argv, (list, tuple)):\n argv = [argv]\n\n exe = argv[0]\n argv = [\"gdbserver\", \"--multi\", \"127.0.0.1:0\"] + argv\n\n # Download the executable\n local_exe = os.path.basename(exe)\n ssh.download_file(ssh.which(exe), local_exe)\n\n # Run the process\n c = ssh.process(argv, **kwargs)\n\n # Find the port for the gdb server\n c.recvuntil(b'port ')\n line = c.recvline().strip()\n gdbport = re.match(b'[0-9]+', line)\n if gdbport:\n gdbport = int(gdbport.group(0))\n\n l = tubes.listen.listen(0)\n forwardport = l.lport\n\n attach(('127.0.0.1', forwardport), gdbscript, local_exe, arch, ssh=ssh)\n l.wait_for_connection().connect_both(ssh.connect_remote('127.0.0.1', gdbport))\n return c\n\ndef find_module_addresses(binary, ssh=None, ulimit=False):\n \"\"\"\n Cheat to find modules by using GDB.\n\n We can't use ``/proc/$pid/map`` since some servers forbid it.\n This breaks ``info proc`` in GDB, but ``info sharedlibrary`` still works.\n Additionally, ``info sharedlibrary`` works on FreeBSD, which may not have\n procfs enabled or accessible.\n\n The output looks like this:\n\n ::\n\n info proc mapping\n process 13961\n warning: unable to open /proc file '/proc/13961/maps'\n\n info sharedlibrary\n From To Syms Read Shared Object Library\n 0xf7fdc820 0xf7ff505f Yes (*) /lib/ld-linux.so.2\n 0xf7fbb650 0xf7fc79f8 Yes /lib32/libpthread.so.0\n 0xf7e26f10 0xf7f5b51c Yes (*) /lib32/libc.so.6\n (*): Shared library is missing debugging information.\n\n Note that the raw addresses provided by ``info sharedlibrary`` are actually\n the address of the ``.text`` segment, not the image base address.\n\n This routine automates the entire process of:\n\n 1. Downloading the binaries from the remote server\n 2. Scraping GDB for the information\n 3. Loading each library into an ELF\n 4. Fixing up the base address vs. the ``.text`` segment address\n\n Arguments:\n binary(str): Path to the binary on the remote server\n ssh(pwnlib.tubes.tube): SSH connection through which to load the libraries.\n If left as :const:`None`, will use a :class:`pwnlib.tubes.process.process`.\n ulimit(bool): Set to :const:`True` to run \"ulimit -s unlimited\" before GDB.\n\n Returns:\n A list of pwnlib.elf.ELF objects, with correct base addresses.\n\n Example:\n\n >>> with context.local(log_level=9999):\n ... shell = ssh(host='example.pwnme', user='travis', password='demopass')\n ... bash_libs = gdb.find_module_addresses('/bin/bash', shell)\n >>> os.path.basename(bash_libs[0].path)\n 'libc.so.6'\n >>> hex(bash_libs[0].symbols['system']) # doctest: +SKIP\n '0x7ffff7634660'\n \"\"\"\n #\n # Download all of the remote libraries\n #\n if ssh:\n runner = ssh.run\n local_bin = ssh.download_file(binary)\n local_elf = elf.ELF(os.path.basename(binary))\n local_libs = ssh.libs(binary)\n\n else:\n runner = tubes.process.process\n local_elf = elf.ELF(binary)\n local_libs = local_elf.libs\n\n #\n # Get the addresses from GDB\n #\n libs = {}\n cmd = \"gdb -q -nh --args %s | cat\" % (binary) # pipe through cat to disable colored output on GDB 9+\n expr = re.compile(r'(0x\\S+)[^/]+(.*)')\n\n if ulimit:\n cmd = ['sh', '-c', \"(ulimit -s unlimited; %s)\" % cmd]\n else:\n cmd = ['sh', '-c', cmd]\n\n with runner(cmd) as gdb:\n if context.aslr:\n gdb.sendline(b'set disable-randomization off')\n\n gdb.send(b\"\"\"\\\n set prompt\n catch load\n run\n \"\"\")\n gdb.sendline(b'info sharedlibrary')\n lines = packing._decode(gdb.recvrepeat(2))\n\n for line in lines.splitlines():\n m = expr.match(line)\n if m:\n libs[m.group(2)] = int(m.group(1),16)\n gdb.sendline(b'kill')\n gdb.sendline(b'y')\n gdb.sendline(b'quit')\n\n #\n # Fix up all of the addresses against the .text address\n #\n rv = []\n\n for remote_path,text_address in sorted(libs.items()):\n # Match up the local copy to the remote path\n try:\n path = next(p for p in local_libs.keys() if remote_path in p)\n except StopIteration:\n print(\"Skipping %r\" % remote_path)\n continue\n\n # Load it\n lib = elf.ELF(path)\n\n # Find its text segment\n text = lib.get_section_by_name('.text')\n\n # Fix the address\n lib.address = text_address - text.header.sh_addr\n rv.append(lib)\n\n return rv\n\ndef corefile(process):\n r\"\"\"Drops a core file for a running local process.\n\n Note:\n You should use :meth:`.process.corefile` instead of using this method directly.\n\n Arguments:\n process: Process to dump\n\n Returns:\n :class:`.Core`: The generated core file\n\n Example:\n\n >>> io = process('bash')\n >>> core = gdb.corefile(io)\n >>> core.exe.name # doctest: +ELLIPSIS\n '.../bin/bash'\n \"\"\"\n\n if context.noptrace:\n log.warn_once(\"Skipping corefile since context.noptrace==True\")\n return\n\n corefile_path = './core.%s.%i' % (os.path.basename(process.executable),\n process.pid)\n\n # Due to https://sourceware.org/bugzilla/show_bug.cgi?id=16092\n # will disregard coredump_filter, and will not dump private mappings.\n if version() < (7,11):\n log.warn_once('The installed GDB (%s) does not emit core-dumps which '\n 'contain all of the data in the process.\\n'\n 'Upgrade to GDB >= 7.11 for better core-dumps.' % binary())\n\n # This is effectively the same as what the 'gcore' binary does\n gdb_args = ['-batch',\n '-q',\n '-nx',\n '-ex', 'set pagination off',\n '-ex', 'set height 0',\n '-ex', 'set width 0',\n '-ex', 'set use-coredump-filter on',\n '-ex', 'generate-core-file %s' % corefile_path,\n '-ex', 'detach']\n\n with context.local(terminal = ['sh', '-c']):\n with context.quiet:\n pid = attach(process, gdb_args=gdb_args)\n log.debug(\"Got GDB pid %d\", pid)\n try:\n psutil.Process(pid).wait()\n except psutil.Error:\n pass\n\n if not os.path.exists(corefile_path):\n log.error(\"Could not generate a corefile for process %d\", process.pid)\n\n return elf.corefile.Core(corefile_path)\n\ndef version(program='gdb'):\n \"\"\"Gets the current GDB version.\n\n Note:\n Requires that GDB version meets the following format:\n\n ``GNU gdb (GDB) 7.12``\n\n Returns:\n tuple: A tuple containing the version numbers\n\n Example:\n\n >>> (7,0) <= gdb.version() <= (12,0)\n True\n \"\"\"\n program = misc.which(program)\n expr = br'([0-9]+\\.?)+'\n\n with tubes.process.process([program, '--version'], level='error', stdout=tubes.process.PIPE) as gdb:\n version = gdb.recvline()\n\n versions = re.search(expr, version).group()\n\n return tuple(map(int, versions.split(b'.')))\n", "path": "pwnlib/gdb.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nDuring exploit development, it is frequently useful to debug the\ntarget binary under GDB.\n\nPwntools makes this easy-to-do with a handful of helper routines, designed\nto make your exploit-debug-update cycles much faster.\n\nUseful Functions\n----------------\n\n- :func:`attach` - Attach to an existing process\n- :func:`debug` - Start a new process under a debugger, stopped at the first instruction\n- :func:`debug_shellcode` - Build a binary with the provided shellcode, and start it under a debugger\n\nDebugging Tips\n--------------\n\nThe :func:`attach` and :func:`debug` functions will likely be your bread and\nbutter for debugging.\n\nBoth allow you to provide a script to pass to GDB when it is started, so that\nit can automatically set your breakpoints.\n\nAttaching to Processes\n~~~~~~~~~~~~~~~~~~~~~~\n\nTo attach to an existing process, just use :func:`attach`. It is surprisingly\nversatile, and can attach to a :class:`.process` for simple\nbinaries, or will automatically find the correct process to attach to for a\nforking server, if given a :class:`.remote` object.\n\nSpawning New Processes\n~~~~~~~~~~~~~~~~~~~~~~\n\nAttaching to processes with :func:`attach` is useful, but the state the process\nis in may vary. If you need to attach to a process very early, and debug it from\nthe very first instruction (or even the start of ``main``), you instead should use\n:func:`debug`.\n\nWhen you use :func:`debug`, the return value is a :class:`.tube` object\nthat you interact with exactly like normal.\n\nUsing GDB Python API\n~~~~~~~~~~~~~~~~~~~~\n\nGDB provides Python API, which is documented at\nhttps://sourceware.org/gdb/onlinedocs/gdb/Python-API.html. Pwntools allows you\nto call it right from the exploit, without having to write a gdbscript. This is\nuseful for inspecting program state, e.g. asserting that leaked values are\ncorrect, or that certain packets trigger a particular code path or put the heap\nin a desired state.\n\nPass ``api=True`` to :func:`attach` or :func:`debug` in order to enable GDB\nPython API access. Pwntools will then connect to GDB using RPyC library:\nhttps://rpyc.readthedocs.io/en/latest/.\n\nAt the moment this is an experimental feature with the following limitations:\n\n- Only Python 3 is supported.\n\n Well, technically that's not quite true. The real limitation is that your\n GDB's Python interpreter major version should be the same as that of\n Pwntools. However, most GDBs use Python 3 nowadays.\n\n Different minor versions are allowed as long as no incompatible values are\n sent in either direction. See\n https://rpyc.readthedocs.io/en/latest/install.html#cross-interpreter-compatibility\n for more information.\n\n Use\n\n ::\n\n $ gdb -batch -ex 'python import sys; print(sys.version)'\n\n in order to check your GDB's Python version.\n- If your GDB uses a different Python interpreter than Pwntools (for example,\n because you run Pwntools out of a virtualenv), you should install ``rpyc``\n package into its ``sys.path``. Use\n\n ::\n\n $ gdb -batch -ex 'python import rpyc'\n\n in order to check whether this is necessary.\n- Only local processes are supported.\n- It is not possible to tell whether ``gdb.execute('continue')`` will be\n executed synchronously or asynchronously (in gdbscripts it is always\n synchronous). Therefore it is recommended to use either the explicitly\n synchronous :func:`pwnlib.gdb.Gdb.continue_and_wait` or the explicitly\n asynchronous :func:`pwnlib.gdb.Gdb.continue_nowait` instead.\n\nTips and Troubleshooting\n------------------------\n\n``NOPTRACE`` magic argument\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nIt's quite cumbersom to comment and un-comment lines containing `attach`.\n\nYou can cause these lines to be a no-op by running your script with the\n``NOPTRACE`` argument appended, or with ``PWNLIB_NOPTRACE=1`` in the environment.\n\n::\n\n $ python exploit.py NOPTRACE\n [+] Starting local process '/bin/bash': Done\n [!] Skipping debug attach since context.noptrace==True\n ...\n\nKernel Yama ptrace_scope\n~~~~~~~~~~~~~~~~~~~~~~~~\n\nThe Linux kernel v3.4 introduced a security mechanism called ``ptrace_scope``,\nwhich is intended to prevent processes from debugging eachother unless there is\na direct parent-child relationship.\n\nThis causes some issues with the normal Pwntools workflow, since the process\nhierarchy looks like this:\n\n::\n\n python ---> target\n `--> gdb\n\nNote that ``python`` is the parent of ``target``, not ``gdb``.\n\nIn order to avoid this being a problem, Pwntools uses the function\n``prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY)``. This disables Yama\nfor any processes launched by Pwntools via :class:`.process` or via\n:meth:`.ssh.process`.\n\nOlder versions of Pwntools did not perform the ``prctl`` step, and\nrequired that the Yama security feature was disabled systemwide, which\nrequires ``root`` access.\n\nMember Documentation\n===============================\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom contextlib import contextmanager\nimport os\nimport platform\nimport psutil\nimport random\nimport re\nimport shlex\nimport six\nimport six.moves\nimport socket\nimport tempfile\nfrom threading import Event\nimport time\n\nfrom pwnlib import adb\nfrom pwnlib import atexit\nfrom pwnlib import elf\nfrom pwnlib import qemu\nfrom pwnlib import tubes\nfrom pwnlib.asm import _bfdname\nfrom pwnlib.asm import make_elf\nfrom pwnlib.asm import make_elf_from_assembly\nfrom pwnlib.context import LocalContext\nfrom pwnlib.context import context\nfrom pwnlib.log import getLogger\nfrom pwnlib.timeout import Timeout\nfrom pwnlib.util import misc\nfrom pwnlib.util import packing\nfrom pwnlib.util import proc\n\nlog = getLogger(__name__)\n\n@LocalContext\ndef debug_assembly(asm, gdbscript=None, vma=None, api=False):\n r\"\"\"debug_assembly(asm, gdbscript=None, vma=None, api=False) -> tube\n\n Creates an ELF file, and launches it under a debugger.\n\n This is identical to debug_shellcode, except that\n any defined symbols are available in GDB, and it\n saves you the explicit call to asm().\n\n Arguments:\n asm(str): Assembly code to debug\n gdbscript(str): Script to run in GDB\n vma(int): Base address to load the shellcode at\n api(bool): Enable access to GDB Python API\n \\**kwargs: Override any :obj:`pwnlib.context.context` values.\n\n Returns:\n :class:`.process`\n\n Example:\n\n >>> assembly = shellcraft.echo(\"Hello world!\\n\")\n >>> io = gdb.debug_assembly(assembly)\n >>> io.recvline()\n b'Hello world!\\n'\n \"\"\"\n tmp_elf = make_elf_from_assembly(asm, vma=vma, extract=False)\n os.chmod(tmp_elf, 0o777)\n\n atexit.register(lambda: os.unlink(tmp_elf))\n\n if context.os == 'android':\n android_path = '/data/data/%s' % os.path.basename(tmp_elf)\n adb.push(tmp_elf, android_path)\n tmp_elf = android_path\n\n return debug(tmp_elf, gdbscript=gdbscript, arch=context.arch, api=api)\n\n@LocalContext\ndef debug_shellcode(data, gdbscript=None, vma=None, api=False):\n r\"\"\"debug_shellcode(data, gdbscript=None, vma=None, api=False) -> tube\n Creates an ELF file, and launches it under a debugger.\n\n Arguments:\n data(str): Assembled shellcode bytes\n gdbscript(str): Script to run in GDB\n vma(int): Base address to load the shellcode at\n api(bool): Enable access to GDB Python API\n \\**kwargs: Override any :obj:`pwnlib.context.context` values.\n\n Returns:\n :class:`.process`\n\n Example:\n\n >>> assembly = shellcraft.echo(\"Hello world!\\n\")\n >>> shellcode = asm(assembly)\n >>> io = gdb.debug_shellcode(shellcode)\n >>> io.recvline()\n b'Hello world!\\n'\n \"\"\"\n if isinstance(data, six.text_type):\n log.error(\"Shellcode is cannot be unicode. Did you mean debug_assembly?\")\n tmp_elf = make_elf(data, extract=False, vma=vma)\n os.chmod(tmp_elf, 0o777)\n\n atexit.register(lambda: os.unlink(tmp_elf))\n\n if context.os == 'android':\n android_path = '/data/data/%s' % os.path.basename(tmp_elf)\n adb.push(tmp_elf, android_path)\n tmp_elf = android_path\n\n return debug(tmp_elf, gdbscript=gdbscript, arch=context.arch, api=api)\n\ndef _gdbserver_args(pid=None, path=None, args=None, which=None, env=None):\n \"\"\"_gdbserver_args(pid=None, path=None, args=None, which=None, env=None) -> list\n\n Sets up a listening gdbserver, to either connect to the specified\n PID, or launch the specified binary by its full path.\n\n Arguments:\n pid(int): Process ID to attach to\n path(str): Process to launch\n args(list): List of arguments to provide on the debugger command line\n which(callaable): Function to find the path of a binary.\n\n Returns:\n A list of arguments to invoke gdbserver.\n \"\"\"\n if [pid, path, args].count(None) != 2:\n log.error(\"Must specify exactly one of pid, path, or args\")\n\n if not which:\n log.error(\"Must specify which.\")\n\n gdbserver = ''\n\n if not args:\n args = [str(path or pid)]\n\n # Android targets have a distinct gdbserver\n if context.bits == 64:\n gdbserver = which('gdbserver64')\n\n if not gdbserver:\n gdbserver = which('gdbserver')\n\n if not gdbserver:\n log.error(\"gdbserver is not installed\")\n\n orig_args = args\n\n gdbserver_args = [gdbserver, '--multi']\n if context.aslr:\n gdbserver_args += ['--no-disable-randomization']\n else:\n log.warn_once(\"Debugging process with ASLR disabled\")\n\n if pid:\n gdbserver_args += ['--once', '--attach']\n\n if env is not None:\n env_args = []\n for key in tuple(env):\n if key.startswith(b'LD_'): # LD_PRELOAD / LD_LIBRARY_PATH etc.\n env_args.append(b'%s=%s' % (key, env.pop(key)))\n else:\n env_args.append(b'%s=%s' % (key, env[key]))\n gdbserver_args += ['--wrapper', which('env'), '-i'] + env_args + ['--']\n\n gdbserver_args += ['localhost:0']\n gdbserver_args += args\n\n return gdbserver_args\n\ndef _gdbserver_port(gdbserver, ssh):\n which = _get_which(ssh)\n\n # Process /bin/bash created; pid = 14366\n # Listening on port 34816\n process_created = gdbserver.recvline()\n\n if process_created.startswith(b'ERROR:'):\n raise ValueError(\n 'Failed to spawn process under gdbserver. gdbserver error message: %r' % process_created\n )\n\n try:\n gdbserver.pid = int(process_created.split()[-1], 0)\n except ValueError:\n log.error('gdbserver did not output its pid (maybe chmod +x?): %r', process_created)\n\n listening_on = b''\n while b'Listening' not in listening_on:\n listening_on = gdbserver.recvline()\n\n port = int(listening_on.split()[-1])\n\n # Set up port forarding for SSH\n if ssh:\n remote = ssh.connect_remote('127.0.0.1', port)\n listener = tubes.listen.listen(0)\n port = listener.lport\n\n # Disable showing GDB traffic when debugging verbosity is increased\n remote.level = 'error'\n listener.level = 'error'\n\n # Hook them up\n remote.connect_both(listener)\n\n # Set up port forwarding for ADB\n elif context.os == 'android':\n adb.forward(port)\n\n return port\n\ndef _get_which(ssh=None):\n if ssh: return ssh.which\n elif context.os == 'android': return adb.which\n else: return misc.which\n\ndef _get_runner(ssh=None):\n if ssh: return ssh.process\n elif context.os == 'android': return adb.process\n else: return tubes.process.process\n\n@LocalContext\ndef debug(args, gdbscript=None, exe=None, ssh=None, env=None, sysroot=None, api=False, **kwargs):\n r\"\"\"\n Launch a GDB server with the specified command line,\n and launches GDB to attach to it.\n\n Arguments:\n args(list): Arguments to the process, similar to :class:`.process`.\n gdbscript(str): GDB script to run.\n exe(str): Path to the executable on disk\n env(dict): Environment to start the binary in\n ssh(:class:`.ssh`): Remote ssh session to use to launch the process.\n sysroot(str): Foreign-architecture sysroot, used for QEMU-emulated binaries\n and Android targets.\n api(bool): Enable access to GDB Python API.\n\n Returns:\n :class:`.process` or :class:`.ssh_channel`: A tube connected to the target process.\n When ``api=True``, ``gdb`` member of the returned object contains a :class:`Gdb`\n instance.\n\n Notes:\n\n The debugger is attached automatically, and you can debug everything\n from the very beginning. This requires that both ``gdb`` and ``gdbserver``\n are installed on your machine.\n\n When GDB opens via :func:`debug`, it will initially be stopped on the very first\n instruction of the dynamic linker (``ld.so``) for dynamically-linked binaries.\n\n Only the target binary and the linker will be loaded in memory, so you cannot\n set breakpoints on shared library routines like ``malloc`` since ``libc.so``\n has not even been loaded yet.\n\n There are several ways to handle this:\n\n 1. Set a breakpoint on the executable's entry point (generally, ``_start``)\n - This is only invoked after all of the required shared libraries\n are loaded.\n - You can generally get the address via the GDB command ``info file``.\n 2. Use pending breakpoints via ``set breakpoint pending on``\n - This has the side-effect of setting breakpoints for **every** function\n which matches the name. For ``malloc``, this will generally set a\n breakpoint in the executable's PLT, in the linker's internal ``malloc``,\n and eventaully in ``libc``'s malloc.\n 3. Wait for libraries to be loaded with ``set stop-on-solib-event 1``\n - There is no way to stop on any specific library being loaded, and sometimes\n multiple libraries are loaded and only a single breakpoint is issued.\n - Generally, you just add a few ``continue`` commands until things are set up\n the way you want it to be.\n\n Examples:\n\n Create a new process, and stop it at 'main'\n\n >>> io = gdb.debug('bash', '''\n ... break main\n ... continue\n ... ''')\n\n Send a command to Bash\n\n >>> io.sendline(b\"echo hello\")\n >>> io.recvline()\n b'hello\\n'\n\n Interact with the process\n\n >>> io.interactive() # doctest: +SKIP\n >>> io.close()\n\n Create a new process, and stop it at '_start'\n\n >>> io = gdb.debug('bash', '''\n ... # Wait until we hit the main executable's entry point\n ... break _start\n ... continue\n ...\n ... # Now set breakpoint on shared library routines\n ... break malloc\n ... break free\n ... continue\n ... ''')\n\n Send a command to Bash\n\n >>> io.sendline(b\"echo hello\")\n >>> io.recvline()\n b'hello\\n'\n\n Interact with the process\n\n >>> io.interactive() # doctest: +SKIP\n >>> io.close()\n\n Using GDB Python API:\n\n .. doctest\n :skipif: six.PY2\n\n Debug a new process\n\n >>> io = gdb.debug(['echo', 'foo'], api=True)\n\n Stop at 'write'\n\n >>> bp = io.gdb.Breakpoint('write', temporary=True)\n >>> io.gdb.continue_and_wait()\n\n Dump 'count'\n\n >>> count = io.gdb.parse_and_eval('$rdx')\n >>> long = io.gdb.lookup_type('long')\n >>> int(count.cast(long))\n 4\n\n Resume the program\n\n >>> io.gdb.continue_nowait()\n >>> io.recvline()\n b'foo\\n'\n\n\n Using SSH:\n\n You can use :func:`debug` to spawn new processes on remote machines as well,\n by using the ``ssh=`` keyword to pass in your :class:`.ssh` instance.\n\n Connect to the SSH server and start a process on the server\n\n >>> shell = ssh('travis', 'example.pwnme', password='demopass')\n >>> io = gdb.debug(['whoami'],\n ... ssh = shell,\n ... gdbscript = '''\n ... break main\n ... continue\n ... ''')\n\n Send a command to Bash\n\n >>> io.sendline(b\"echo hello\")\n\n Interact with the process\n >>> io.interactive() # doctest: +SKIP\n >>> io.close()\n \"\"\"\n if isinstance(args, six.integer_types + (tubes.process.process, tubes.ssh.ssh_channel)):\n log.error(\"Use gdb.attach() to debug a running process\")\n\n if isinstance(args, (bytes, six.text_type)):\n args = [args]\n\n orig_args = args\n\n runner = _get_runner(ssh)\n which = _get_which(ssh)\n gdbscript = gdbscript or ''\n\n if api and runner is not tubes.process.process:\n raise ValueError('GDB Python API is supported only for local processes')\n\n args, env = misc.normalize_argv_env(args, env, log)\n if env:\n env = {bytes(k): bytes(v) for k, v in env}\n\n if context.noptrace:\n log.warn_once(\"Skipping debugger since context.noptrace==True\")\n return runner(args, executable=exe, env=env)\n\n if ssh or context.native or (context.os == 'android'):\n args = _gdbserver_args(args=args, which=which, env=env)\n else:\n qemu_port = random.randint(1024, 65535)\n qemu_user = qemu.user_path()\n sysroot = sysroot or qemu.ld_prefix(env=env)\n if not qemu_user:\n log.error(\"Cannot debug %s binaries without appropriate QEMU binaries\" % context.arch)\n if context.os == 'baremetal':\n qemu_args = [qemu_user, '-S', '-gdb', 'tcp::' + str(qemu_port)]\n else:\n qemu_args = [qemu_user, '-g', str(qemu_port)]\n if sysroot:\n qemu_args += ['-L', sysroot]\n args = qemu_args + args\n\n # Use a sane default sysroot for Android\n if not sysroot and context.os == 'android':\n sysroot = 'remote:/'\n\n # Make sure gdbserver/qemu is installed\n if not which(args[0]):\n log.error(\"%s is not installed\" % args[0])\n\n if not ssh:\n exe = exe or which(orig_args[0])\n if not (exe and os.path.exists(exe)):\n log.error(\"%s does not exist\" % exe)\n\n # Start gdbserver/qemu\n # (Note: We override ASLR here for the gdbserver process itself.)\n gdbserver = runner(args, env=env, aslr=1, **kwargs)\n\n # Set the .executable on the process object.\n gdbserver.executable = exe\n\n # Find what port we need to connect to\n if ssh or context.native or (context.os == 'android'):\n port = _gdbserver_port(gdbserver, ssh)\n else:\n port = qemu_port\n\n host = '127.0.0.1'\n if not ssh and context.os == 'android':\n host = context.adb_host\n\n tmp = attach((host, port), exe=exe, gdbscript=gdbscript, ssh=ssh, sysroot=sysroot, api=api)\n if api:\n _, gdb = tmp\n gdbserver.gdb = gdb\n\n # gdbserver outputs a message when a client connects\n garbage = gdbserver.recvline(timeout=1)\n\n # Some versions of gdbserver output an additional message\n garbage2 = gdbserver.recvline_startswith(b\"Remote debugging from host \", timeout=2)\n\n return gdbserver\n\ndef get_gdb_arch():\n return {\n 'amd64': 'i386:x86-64',\n 'powerpc': 'powerpc:common',\n 'powerpc64': 'powerpc:common64',\n 'mips64': 'mips:isa64',\n 'thumb': 'arm',\n 'sparc64': 'sparc:v9'\n }.get(context.arch, context.arch)\n\ndef binary():\n \"\"\"binary() -> str\n\n Returns:\n str: Path to the appropriate ``gdb`` binary to use.\n\n Example:\n\n >>> gdb.binary() # doctest: +SKIP\n '/usr/bin/gdb'\n \"\"\"\n gdb = misc.which('pwntools-gdb') or misc.which('gdb')\n\n if not context.native:\n multiarch = misc.which('gdb-multiarch')\n\n if multiarch:\n return multiarch\n log.warn_once('Cross-architecture debugging usually requires gdb-multiarch\\n'\n '$ apt-get install gdb-multiarch')\n\n if not gdb:\n log.error('GDB is not installed\\n'\n '$ apt-get install gdb')\n\n return gdb\n\nclass Breakpoint:\n \"\"\"Mirror of ``gdb.Breakpoint`` class.\n\n See https://sourceware.org/gdb/onlinedocs/gdb/Breakpoints-In-Python.html\n for more information.\n \"\"\"\n\n def __init__(self, conn, *args, **kwargs):\n \"\"\"Do not create instances of this class directly.\n\n Use ``pwnlib.gdb.Gdb.Breakpoint`` instead.\n \"\"\"\n # Creates a real breakpoint and connects it with this mirror\n self.conn = conn\n self.server_breakpoint = conn.root.set_breakpoint(\n self, hasattr(self, 'stop'), *args, **kwargs)\n\n def __getattr__(self, item):\n \"\"\"Return attributes of the real breakpoint.\"\"\"\n if item in (\n '____id_pack__',\n '__name__',\n '____conn__',\n 'stop',\n ):\n # Ignore RPyC netref attributes.\n # Also, if stop() is not defined, hasattr() call in our\n # __init__() will bring us here. Don't contact the\n # server in this case either.\n raise AttributeError()\n return getattr(self.server_breakpoint, item)\n\n def exposed_stop(self):\n # Handle stop() call from the server.\n return self.stop()\n\nclass Gdb:\n \"\"\"Mirror of ``gdb`` module.\n\n See https://sourceware.org/gdb/onlinedocs/gdb/Basic-Python.html for more\n information.\n \"\"\"\n\n def __init__(self, conn):\n \"\"\"Do not create instances of this class directly.\n\n Use :func:`attach` or :func:`debug` with ``api=True`` instead.\n \"\"\"\n self.conn = conn\n\n class _Breakpoint(Breakpoint):\n def __init__(self, *args, **kwargs):\n super().__init__(conn, *args, **kwargs)\n\n self.Breakpoint = _Breakpoint\n self.stopped = Event()\n\n def stop_handler(event):\n self.stopped.set()\n\n self.events.stop.connect(stop_handler)\n\n def __getattr__(self, item):\n \"\"\"Provide access to the attributes of `gdb` module.\"\"\"\n return getattr(self.conn.root.gdb, item)\n\n def wait(self):\n \"\"\"Wait until the program stops.\"\"\"\n self.stopped.wait()\n self.stopped.clear()\n\n def interrupt_and_wait(self):\n \"\"\"Interrupt the program and wait until it stops.\"\"\"\n self.execute('interrupt')\n self.wait()\n\n def continue_nowait(self):\n \"\"\"Continue the program. Do not wait until it stops again.\"\"\"\n self.execute('continue &')\n\n def continue_and_wait(self):\n \"\"\"Continue the program and wait until it stops again.\"\"\"\n self.continue_nowait()\n self.wait()\n\n def quit(self):\n \"\"\"Terminate GDB.\"\"\"\n self.conn.root.quit()\n\n@LocalContext\ndef attach(target, gdbscript = '', exe = None, gdb_args = None, ssh = None, sysroot = None, api = False):\n r\"\"\"\n Start GDB in a new terminal and attach to `target`.\n\n Arguments:\n target: The target to attach to.\n gdbscript(:obj:`str` or :obj:`file`): GDB script to run after attaching.\n exe(str): The path of the target binary.\n arch(str): Architechture of the target binary. If `exe` known GDB will\n detect the architechture automatically (if it is supported).\n gdb_args(list): List of additional arguments to pass to GDB.\n sysroot(str): Foreign-architecture sysroot, used for QEMU-emulated binaries\n and Android targets.\n api(bool): Enable access to GDB Python API.\n\n Returns:\n PID of the GDB process (or the window which it is running in).\n When ``api=True``, a (PID, :class:`Gdb`) tuple.\n\n Notes:\n\n The ``target`` argument is very robust, and can be any of the following:\n\n :obj:`int`\n PID of a process\n :obj:`str`\n Process name. The youngest process is selected.\n :obj:`tuple`\n Host, port pair of a listening ``gdbserver``\n :class:`.process`\n Process to connect to\n :class:`.sock`\n Connected socket. The executable on the other end of the connection is attached to.\n Can be any socket type, including :class:`.listen` or :class:`.remote`.\n :class:`.ssh_channel`\n Remote process spawned via :meth:`.ssh.process`.\n This will use the GDB installed on the remote machine.\n If a password is required to connect, the ``sshpass`` program must be installed.\n\n Examples:\n\n Attach to a process by PID\n\n >>> pid = gdb.attach(1234) # doctest: +SKIP\n\n Attach to the youngest process by name\n\n >>> pid = gdb.attach('bash') # doctest: +SKIP\n\n Attach a debugger to a :class:`.process` tube and automate interaction\n\n >>> io = process('bash')\n >>> pid = gdb.attach(io, gdbscript='''\n ... call puts(\"Hello from process debugger!\")\n ... detach\n ... quit\n ... ''')\n >>> io.recvline()\n b'Hello from process debugger!\\n'\n >>> io.sendline(b'echo Hello from bash && exit')\n >>> io.recvall()\n b'Hello from bash\\n'\n\n Using GDB Python API:\n\n .. doctest\n :skipif: six.PY2\n\n >>> io = process('bash')\n\n Attach a debugger\n\n >>> pid, io_gdb = gdb.attach(io, api=True)\n\n Force the program to write something it normally wouldn't\n\n >>> io_gdb.execute('call puts(\"Hello from process debugger!\")')\n\n Resume the program\n\n >>> io_gdb.continue_nowait()\n\n Observe the forced line\n\n >>> io.recvline()\n b'Hello from process debugger!\\n'\n\n Interact with the program in a regular way\n\n >>> io.sendline(b'echo Hello from bash && exit')\n\n Observe the results\n\n >>> io.recvall()\n b'Hello from bash\\n'\n\n Attach to the remote process from a :class:`.remote` or :class:`.listen` tube,\n as long as it is running on the same machine.\n\n >>> server = process(['socat', 'tcp-listen:12345,reuseaddr,fork', 'exec:/bin/bash,nofork'])\n >>> sleep(1) # Wait for socat to start\n >>> io = remote('127.0.0.1', 12345)\n >>> sleep(1) # Wait for process to fork\n >>> pid = gdb.attach(io, gdbscript='''\n ... call puts(\"Hello from remote debugger!\")\n ... detach\n ... quit\n ... ''')\n >>> io.recvline()\n b'Hello from remote debugger!\\n'\n >>> io.sendline(b'echo Hello from bash && exit')\n >>> io.recvall()\n b'Hello from bash\\n'\n\n Attach to processes running on a remote machine via an SSH :class:`.ssh` process\n\n >>> shell = ssh('travis', 'example.pwnme', password='demopass')\n >>> io = shell.process(['cat'])\n >>> pid = gdb.attach(io, gdbscript='''\n ... call sleep(5)\n ... call puts(\"Hello from ssh debugger!\")\n ... detach\n ... quit\n ... ''')\n >>> io.recvline(timeout=5) # doctest: +SKIP\n b'Hello from ssh debugger!\\n'\n >>> io.sendline(b'This will be echoed back')\n >>> io.recvline()\n b'This will be echoed back\\n'\n >>> io.close()\n \"\"\"\n if context.noptrace:\n log.warn_once(\"Skipping debug attach since context.noptrace==True\")\n return\n\n # if gdbscript is a file object, then read it; we probably need to run some\n # more gdb script anyway\n if hasattr(gdbscript, 'read'):\n with gdbscript:\n gdbscript = gdbscript.read()\n\n # enable gdb.attach(p, 'continue')\n if gdbscript and not gdbscript.endswith('\\n'):\n gdbscript += '\\n'\n\n # Use a sane default sysroot for Android\n if not sysroot and context.os == 'android':\n sysroot = 'remote:/'\n\n # gdb script to run before `gdbscript`\n pre = ''\n if not context.native:\n pre += 'set endian %s\\n' % context.endian\n pre += 'set architecture %s\\n' % get_gdb_arch()\n if sysroot:\n pre += 'set sysroot %s\\n' % sysroot\n\n if context.os == 'android':\n pre += 'set gnutarget ' + _bfdname() + '\\n'\n\n if exe and context.os != 'baremetal':\n pre += 'file \"%s\"\\n' % exe\n\n # let's see if we can find a pid to attach to\n pid = None\n if isinstance(target, six.integer_types):\n # target is a pid, easy peasy\n pid = target\n elif isinstance(target, str):\n # pidof picks the youngest process\n pidof = proc.pidof\n\n if context.os == 'android':\n pidof = adb.pidof\n\n pids = list(pidof(target))\n if not pids:\n log.error('No such process: %s', target)\n pid = pids[0]\n log.info('Attaching to youngest process \"%s\" (PID = %d)' %\n (target, pid))\n elif isinstance(target, tubes.ssh.ssh_channel):\n if not target.pid:\n log.error(\"PID unknown for channel\")\n\n shell = target.parent\n\n tmpfile = shell.mktemp()\n gdbscript = b'shell rm %s\\n%s' % (tmpfile, packing._need_bytes(gdbscript, 2, 0x80))\n shell.upload_data(gdbscript or b'', tmpfile)\n\n cmd = ['ssh', '-C', '-t', '-p', str(shell.port), '-l', shell.user, shell.host]\n if shell.password:\n if not misc.which('sshpass'):\n log.error(\"sshpass must be installed to debug ssh processes\")\n cmd = ['sshpass', '-p', shell.password] + cmd\n if shell.keyfile:\n cmd += ['-i', shell.keyfile]\n cmd += ['gdb', '-q', target.executable, str(target.pid), '-x', tmpfile]\n\n misc.run_in_new_terminal(cmd)\n return\n\n elif isinstance(target, tubes.sock.sock):\n pids = proc.pidof(target)\n if not pids:\n log.error('Could not find remote process (%s:%d) on this machine' %\n target.sock.getpeername())\n pid = pids[0]\n\n # Specifically check for socat, since it has an intermediary process\n # if you do not specify \"nofork\" to the EXEC: argument\n # python(2640)───socat(2642)───socat(2643)───bash(2644)\n if proc.exe(pid).endswith('/socat') and time.sleep(0.1) and proc.children(pid):\n pid = proc.children(pid)[0]\n\n # We may attach to the remote process after the fork but before it performs an exec. \n # If an exe is provided, wait until the process is actually running the expected exe\n # before we attach the debugger.\n t = Timeout()\n with t.countdown(2):\n while exe and os.path.realpath(proc.exe(pid)) != os.path.realpath(exe) and t.timeout:\n time.sleep(0.1)\n\n elif isinstance(target, tubes.process.process):\n pid = proc.pidof(target)[0]\n exe = exe or target.executable\n elif isinstance(target, tuple) and len(target) == 2:\n host, port = target\n\n if context.os != 'android':\n pre += 'target remote %s:%d\\n' % (host, port)\n else:\n # Android debugging is done over gdbserver, which can't follow\n # new inferiors (tldr; follow-fork-mode child) unless it is run\n # in extended-remote mode.\n pre += 'target extended-remote %s:%d\\n' % (host, port)\n pre += 'set detach-on-fork off\\n'\n\n def findexe():\n for spid in proc.pidof(target):\n sexe = proc.exe(spid)\n name = os.path.basename(sexe)\n # XXX: parse cmdline\n if name.startswith('qemu-') or name.startswith('gdbserver'):\n exe = proc.cmdline(spid)[-1]\n return os.path.join(proc.cwd(spid), exe)\n\n exe = exe or findexe()\n elif isinstance(target, elf.corefile.Corefile):\n pre += 'target core \"%s\"\\n' % target.path\n else:\n log.error(\"don't know how to attach to target: %r\", target)\n\n # if we have a pid but no exe, just look it up in /proc/\n if pid and not exe:\n exe_fn = proc.exe\n if context.os == 'android':\n exe_fn = adb.proc_exe\n exe = exe_fn(pid)\n\n if not pid and not exe and not ssh:\n log.error('could not find target process')\n\n gdb_binary = binary()\n cmd = [gdb_binary]\n\n if gdb_args:\n cmd += gdb_args\n\n if context.gdbinit:\n cmd += ['-nh'] # ignore ~/.gdbinit\n cmd += ['-x', context.gdbinit] # load custom gdbinit\n\n cmd += ['-q']\n\n if exe and context.native:\n if not ssh and not os.path.isfile(exe):\n log.error('No such file: %s', exe)\n cmd += [exe]\n\n if pid and not context.os == 'android':\n cmd += [str(pid)]\n\n if context.os == 'android' and pid:\n runner = _get_runner()\n which = _get_which()\n gdb_cmd = _gdbserver_args(pid=pid, which=which)\n gdbserver = runner(gdb_cmd)\n port = _gdbserver_port(gdbserver, None)\n host = context.adb_host\n pre += 'target extended-remote %s:%i\\n' % (context.adb_host, port)\n\n # gdbserver on Android sets 'detach-on-fork on' which breaks things\n # when you're trying to debug anything that forks.\n pre += 'set detach-on-fork off\\n'\n\n if api:\n # create a UNIX socket for talking to GDB\n socket_dir = tempfile.mkdtemp()\n socket_path = os.path.join(socket_dir, 'socket')\n bridge = os.path.join(os.path.dirname(__file__), 'gdb_api_bridge.py')\n\n # inject the socket path and the GDB Python API bridge\n pre = 'python socket_path = ' + repr(socket_path) + '\\n' + \\\n 'source ' + bridge + '\\n' + \\\n pre\n\n gdbscript = pre + (gdbscript or '')\n\n if gdbscript:\n tmp = tempfile.NamedTemporaryFile(prefix = 'pwn', suffix = '.gdb',\n delete = False, mode = 'w+')\n log.debug('Wrote gdb script to %r\\n%s', tmp.name, gdbscript)\n gdbscript = 'shell rm %s\\n%s' % (tmp.name, gdbscript)\n\n tmp.write(gdbscript)\n tmp.close()\n cmd += ['-x', tmp.name]\n\n log.info('running in new terminal: %s', cmd)\n\n if api:\n # prevent gdb_faketerminal.py from messing up api doctests\n def preexec_fn():\n os.environ['GDB_FAKETERMINAL'] = '0'\n else:\n preexec_fn = None\n gdb_pid = misc.run_in_new_terminal(cmd, preexec_fn = preexec_fn)\n\n if pid and context.native:\n proc.wait_for_debugger(pid, gdb_pid)\n\n if not api:\n return gdb_pid\n\n # connect to the GDB Python API bridge\n from rpyc import BgServingThread\n from rpyc.utils.factory import unix_connect\n if six.PY2:\n retriable = socket.error\n else:\n retriable = ConnectionRefusedError, FileNotFoundError\n\n t = Timeout()\n with t.countdown(10):\n while t.timeout:\n try:\n conn = unix_connect(socket_path)\n break\n except retriable:\n time.sleep(0.1)\n else:\n # Check to see if RPyC is installed at all in GDB\n rpyc_check = [gdb_binary, '--nx', '-batch', '-ex',\n 'python import rpyc; import sys; sys.exit(123)']\n\n if 123 != tubes.process.process(rpyc_check).poll(block=True):\n log.error('Failed to connect to GDB: rpyc is not installed')\n\n # Check to see if the socket ever got created\n if not os.path.exists(socket_path):\n log.error('Failed to connect to GDB: Unix socket %s was never created', socket_path)\n\n # Check to see if the remote RPyC client is a compatible version\n version_check = [gdb_binary, '--nx', '-batch', '-ex',\n 'python import platform; print(platform.python_version())']\n gdb_python_version = tubes.process.process(version_check).recvall().strip()\n python_version = str(platform.python_version())\n\n if gdb_python_version != python_version:\n log.error('Failed to connect to GDB: Version mismatch (%s vs %s)',\n gdb_python_version,\n python_version)\n\n # Don't know what happened\n log.error('Failed to connect to GDB: Unknown error')\n\n # now that connection is up, remove the socket from the filesystem\n os.unlink(socket_path)\n os.rmdir(socket_dir)\n\n # create a thread for receiving breakpoint notifications\n BgServingThread(conn, callback=lambda: None)\n\n return gdb_pid, Gdb(conn)\n\n\ndef ssh_gdb(ssh, argv, gdbscript = None, arch = None, **kwargs):\n if not isinstance(argv, (list, tuple)):\n argv = [argv]\n\n exe = argv[0]\n argv = [\"gdbserver\", \"--multi\", \"127.0.0.1:0\"] + argv\n\n # Download the executable\n local_exe = os.path.basename(exe)\n ssh.download_file(ssh.which(exe), local_exe)\n\n # Run the process\n c = ssh.process(argv, **kwargs)\n\n # Find the port for the gdb server\n c.recvuntil(b'port ')\n line = c.recvline().strip()\n gdbport = re.match(b'[0-9]+', line)\n if gdbport:\n gdbport = int(gdbport.group(0))\n\n l = tubes.listen.listen(0)\n forwardport = l.lport\n\n attach(('127.0.0.1', forwardport), gdbscript, local_exe, arch, ssh=ssh)\n l.wait_for_connection().connect_both(ssh.connect_remote('127.0.0.1', gdbport))\n return c\n\ndef find_module_addresses(binary, ssh=None, ulimit=False):\n \"\"\"\n Cheat to find modules by using GDB.\n\n We can't use ``/proc/$pid/map`` since some servers forbid it.\n This breaks ``info proc`` in GDB, but ``info sharedlibrary`` still works.\n Additionally, ``info sharedlibrary`` works on FreeBSD, which may not have\n procfs enabled or accessible.\n\n The output looks like this:\n\n ::\n\n info proc mapping\n process 13961\n warning: unable to open /proc file '/proc/13961/maps'\n\n info sharedlibrary\n From To Syms Read Shared Object Library\n 0xf7fdc820 0xf7ff505f Yes (*) /lib/ld-linux.so.2\n 0xf7fbb650 0xf7fc79f8 Yes /lib32/libpthread.so.0\n 0xf7e26f10 0xf7f5b51c Yes (*) /lib32/libc.so.6\n (*): Shared library is missing debugging information.\n\n Note that the raw addresses provided by ``info sharedlibrary`` are actually\n the address of the ``.text`` segment, not the image base address.\n\n This routine automates the entire process of:\n\n 1. Downloading the binaries from the remote server\n 2. Scraping GDB for the information\n 3. Loading each library into an ELF\n 4. Fixing up the base address vs. the ``.text`` segment address\n\n Arguments:\n binary(str): Path to the binary on the remote server\n ssh(pwnlib.tubes.tube): SSH connection through which to load the libraries.\n If left as :const:`None`, will use a :class:`pwnlib.tubes.process.process`.\n ulimit(bool): Set to :const:`True` to run \"ulimit -s unlimited\" before GDB.\n\n Returns:\n A list of pwnlib.elf.ELF objects, with correct base addresses.\n\n Example:\n\n >>> with context.local(log_level=9999):\n ... shell = ssh(host='example.pwnme', user='travis', password='demopass')\n ... bash_libs = gdb.find_module_addresses('/bin/bash', shell)\n >>> os.path.basename(bash_libs[0].path)\n 'libc.so.6'\n >>> hex(bash_libs[0].symbols['system']) # doctest: +SKIP\n '0x7ffff7634660'\n \"\"\"\n #\n # Download all of the remote libraries\n #\n if ssh:\n runner = ssh.run\n local_bin = ssh.download_file(binary)\n local_elf = elf.ELF(os.path.basename(binary))\n local_libs = ssh.libs(binary)\n\n else:\n runner = tubes.process.process\n local_elf = elf.ELF(binary)\n local_libs = local_elf.libs\n\n #\n # Get the addresses from GDB\n #\n libs = {}\n cmd = \"gdb -q -nh --args %s | cat\" % (binary) # pipe through cat to disable colored output on GDB 9+\n expr = re.compile(r'(0x\\S+)[^/]+(.*)')\n\n if ulimit:\n cmd = ['sh', '-c', \"(ulimit -s unlimited; %s)\" % cmd]\n else:\n cmd = ['sh', '-c', cmd]\n\n with runner(cmd) as gdb:\n if context.aslr:\n gdb.sendline(b'set disable-randomization off')\n\n gdb.send(b\"\"\"\\\n set prompt\n catch load\n run\n \"\"\")\n gdb.sendline(b'info sharedlibrary')\n lines = packing._decode(gdb.recvrepeat(2))\n\n for line in lines.splitlines():\n m = expr.match(line)\n if m:\n libs[m.group(2)] = int(m.group(1),16)\n gdb.sendline(b'kill')\n gdb.sendline(b'y')\n gdb.sendline(b'quit')\n\n #\n # Fix up all of the addresses against the .text address\n #\n rv = []\n\n for remote_path,text_address in sorted(libs.items()):\n # Match up the local copy to the remote path\n try:\n path = next(p for p in local_libs.keys() if remote_path in p)\n except StopIteration:\n print(\"Skipping %r\" % remote_path)\n continue\n\n # Load it\n lib = elf.ELF(path)\n\n # Find its text segment\n text = lib.get_section_by_name('.text')\n\n # Fix the address\n lib.address = text_address - text.header.sh_addr\n rv.append(lib)\n\n return rv\n\ndef corefile(process):\n r\"\"\"Drops a core file for a running local process.\n\n Note:\n You should use :meth:`.process.corefile` instead of using this method directly.\n\n Arguments:\n process: Process to dump\n\n Returns:\n :class:`.Core`: The generated core file\n\n Example:\n\n >>> io = process('bash')\n >>> core = gdb.corefile(io)\n >>> core.exe.name # doctest: +ELLIPSIS\n '.../bin/bash'\n \"\"\"\n\n if context.noptrace:\n log.warn_once(\"Skipping corefile since context.noptrace==True\")\n return\n\n corefile_path = './core.%s.%i' % (os.path.basename(process.executable),\n process.pid)\n\n # Due to https://sourceware.org/bugzilla/show_bug.cgi?id=16092\n # will disregard coredump_filter, and will not dump private mappings.\n if version() < (7,11):\n log.warn_once('The installed GDB (%s) does not emit core-dumps which '\n 'contain all of the data in the process.\\n'\n 'Upgrade to GDB >= 7.11 for better core-dumps.' % binary())\n\n # This is effectively the same as what the 'gcore' binary does\n gdb_args = ['-batch',\n '-q',\n '-nx',\n '-ex', 'set pagination off',\n '-ex', 'set height 0',\n '-ex', 'set width 0',\n '-ex', 'set use-coredump-filter on',\n '-ex', 'generate-core-file %s' % corefile_path,\n '-ex', 'detach']\n\n with context.local(terminal = ['sh', '-c']):\n with context.quiet:\n pid = attach(process, gdb_args=gdb_args)\n log.debug(\"Got GDB pid %d\", pid)\n try:\n psutil.Process(pid).wait()\n except psutil.Error:\n pass\n\n if not os.path.exists(corefile_path):\n log.error(\"Could not generate a corefile for process %d\", process.pid)\n\n return elf.corefile.Core(corefile_path)\n\ndef version(program='gdb'):\n \"\"\"Gets the current GDB version.\n\n Note:\n Requires that GDB version meets the following format:\n\n ``GNU gdb (GDB) 7.12``\n\n Returns:\n tuple: A tuple containing the version numbers\n\n Example:\n\n >>> (7,0) <= gdb.version() <= (12,0)\n True\n \"\"\"\n program = misc.which(program)\n expr = br'([0-9]+\\.?)+'\n\n with tubes.process.process([program, '--version'], level='error', stdout=tubes.process.PIPE) as gdb:\n version = gdb.recvline()\n\n versions = re.search(expr, version).group()\n\n return tuple(map(int, versions.split(b'.')))\n", "path": "pwnlib/gdb.py" } ]
diff --git a/pwnlib/gdb.py b/pwnlib/gdb.py index 45019dc5d..7e14cc9f1 100644 --- a/pwnlib/gdb.py +++ b/pwnlib/gdb.py @@ -568,7 +568,7 @@ def debug(args, gdbscript=None, exe=None, ssh=None, env=None, sysroot=None, api= gdbserver.executable = exe # Find what port we need to connect to - if context.native or (context.os == 'android'): + if ssh or context.native or (context.os == 'android'): port = _gdbserver_port(gdbserver, ssh) else: port = qemu_port
google__flax-3785
[struct.dataclass] Consider adding optional `kw_only` arguments I often run into the following issue: ```python from flax import struct class Foo(struct.PyTreeNode): bar: int = struct.field(pytree_node=False, default=1) class Baz(Foo): qux: str ``` Since `qux` does not have a default value, I get: ``` Fields without default values cannot appear after fields with default values ``` Can we consider adding a simple wrapper to `dataclasses.dataclass(kw_only=True)`? It should be easy for the `struct.dataclass`, we can maybe have another object for inheritance, like `PyTreeNodeKwOnly`?
[ { "content": "# Copyright 2024 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for defining custom classes that can be used with jax transformations.\"\"\"\n\nimport dataclasses\nfrom typing import TypeVar\n\nimport jax\nfrom typing_extensions import (\n dataclass_transform, # pytype: disable=not-supported-yet\n)\n\nfrom . import serialization\n\n_T = TypeVar('_T')\n\n\ndef field(pytree_node=True, **kwargs):\n return dataclasses.field(metadata={'pytree_node': pytree_node}, **kwargs)\n\n\n@dataclass_transform(field_specifiers=(field,)) # type: ignore[literal-required]\ndef dataclass(clz: _T, **kwargs) -> _T:\n \"\"\"Create a class which can be passed to functional transformations.\n\n .. note::\n Inherit from ``PyTreeNode`` instead to avoid type checking issues when\n using PyType.\n\n Jax transformations such as ``jax.jit`` and ``jax.grad`` require objects that are\n immutable and can be mapped over using the ``jax.tree_util`` methods.\n The ``dataclass`` decorator makes it easy to define custom classes that can be\n passed safely to Jax. For example::\n\n >>> from flax import struct\n >>> import jax\n >>> from typing import Any, Callable\n\n >>> @struct.dataclass\n ... class Model:\n ... params: Any\n ... # use pytree_node=False to indicate an attribute should not be touched\n ... # by Jax transformations.\n ... apply_fn: Callable = struct.field(pytree_node=False)\n\n ... def __apply__(self, *args):\n ... return self.apply_fn(*args)\n\n >>> params = {}\n >>> params_b = {}\n >>> apply_fn = lambda v, x: x\n >>> model = Model(params, apply_fn)\n\n >>> # model.params = params_b # Model is immutable. This will raise an error.\n >>> model_b = model.replace(params=params_b) # Use the replace method instead.\n\n >>> # This class can now be used safely in Jax to compute gradients w.r.t. the\n >>> # parameters.\n >>> model = Model(params, apply_fn)\n >>> loss_fn = lambda model: 3.\n >>> model_grad = jax.grad(loss_fn)(model)\n\n Note that dataclasses have an auto-generated ``__init__`` where\n the arguments of the constructor and the attributes of the created\n instance match 1:1. This correspondence is what makes these objects\n valid containers that work with JAX transformations and\n more generally the ``jax.tree_util`` library.\n\n Sometimes a \"smart constructor\" is desired, for example because\n some of the attributes can be (optionally) derived from others.\n The way to do this with Flax dataclasses is to make a static or\n class method that provides the smart constructor.\n This way the simple constructor used by ``jax.tree_util`` is\n preserved. Consider the following example::\n\n >>> @struct.dataclass\n ... class DirectionAndScaleKernel:\n ... direction: jax.Array\n ... scale: jax.Array\n\n ... @classmethod\n ... def create(cls, kernel):\n ... scale = jax.numpy.linalg.norm(kernel, axis=0, keepdims=True)\n ... direction = direction / scale\n ... return cls(direction, scale)\n\n Args:\n clz: the class that will be transformed by the decorator.\n Returns:\n The new class.\n \"\"\"\n # check if already a flax dataclass\n if '_flax_dataclass' in clz.__dict__:\n return clz\n\n if 'frozen' not in kwargs.keys():\n kwargs['frozen'] = True\n data_clz = dataclasses.dataclass(**kwargs)(clz) # type: ignore\n meta_fields = []\n data_fields = []\n for field_info in dataclasses.fields(data_clz):\n is_pytree_node = field_info.metadata.get('pytree_node', True)\n if is_pytree_node:\n data_fields.append(field_info.name)\n else:\n meta_fields.append(field_info.name)\n\n def replace(self, **updates):\n \"\"\" \"Returns a new object replacing the specified fields with new values.\"\"\"\n return dataclasses.replace(self, **updates)\n\n data_clz.replace = replace\n\n def iterate_clz(x):\n meta = tuple(getattr(x, name) for name in meta_fields)\n data = tuple(getattr(x, name) for name in data_fields)\n return data, meta\n\n def iterate_clz_with_keys(x):\n meta = tuple(getattr(x, name) for name in meta_fields)\n data = tuple(\n (jax.tree_util.GetAttrKey(name), getattr(x, name)) for name in data_fields\n )\n return data, meta\n\n def clz_from_iterable(meta, data):\n meta_args = tuple(zip(meta_fields, meta))\n data_args = tuple(zip(data_fields, data))\n kwargs = dict(meta_args + data_args)\n return data_clz(**kwargs)\n\n jax.tree_util.register_pytree_with_keys(\n data_clz, iterate_clz_with_keys, clz_from_iterable, iterate_clz,\n )\n\n def to_state_dict(x):\n state_dict = {\n name: serialization.to_state_dict(getattr(x, name))\n for name in data_fields\n }\n return state_dict\n\n def from_state_dict(x, state):\n \"\"\"Restore the state of a data class.\"\"\"\n state = state.copy() # copy the state so we can pop the restored fields.\n updates = {}\n for name in data_fields:\n if name not in state:\n raise ValueError(\n f'Missing field {name} in state dict while restoring'\n f' an instance of {clz.__name__},'\n f' at path {serialization.current_path()}'\n )\n value = getattr(x, name)\n value_state = state.pop(name)\n updates[name] = serialization.from_state_dict(\n value, value_state, name=name\n )\n if state:\n names = ','.join(state.keys())\n raise ValueError(\n f'Unknown field(s) \"{names}\" in state dict while'\n f' restoring an instance of {clz.__name__}'\n f' at path {serialization.current_path()}'\n )\n return x.replace(**updates)\n\n serialization.register_serialization_state(\n data_clz, to_state_dict, from_state_dict\n )\n\n # add a _flax_dataclass flag to distinguish from regular dataclasses\n data_clz._flax_dataclass = True # type: ignore[attr-defined]\n\n return data_clz # type: ignore\n\n\nTNode = TypeVar('TNode', bound='PyTreeNode')\n\n\n@dataclass_transform(field_specifiers=(field,)) # type: ignore[literal-required]\nclass PyTreeNode:\n \"\"\"Base class for dataclasses that should act like a JAX pytree node.\n\n See ``flax.struct.dataclass`` for the ``jax.tree_util`` behavior.\n This base class additionally avoids type checking errors when using PyType.\n\n Example::\n\n >>> from flax import struct\n >>> import jax\n >>> from typing import Any, Callable\n\n >>> class Model(struct.PyTreeNode):\n ... params: Any\n ... # use pytree_node=False to indicate an attribute should not be touched\n ... # by Jax transformations.\n ... apply_fn: Callable = struct.field(pytree_node=False)\n\n ... def __apply__(self, *args):\n ... return self.apply_fn(*args)\n\n >>> params = {}\n >>> params_b = {}\n >>> apply_fn = lambda v, x: x\n >>> model = Model(params, apply_fn)\n\n >>> # model.params = params_b # Model is immutable. This will raise an error.\n >>> model_b = model.replace(params=params_b) # Use the replace method instead.\n\n >>> # This class can now be used safely in Jax to compute gradients w.r.t. the\n >>> # parameters.\n >>> model = Model(params, apply_fn)\n >>> loss_fn = lambda model: 3.\n >>> model_grad = jax.grad(loss_fn)(model)\n \"\"\"\n\n def __init_subclass__(cls):\n dataclass(cls) # pytype: disable=wrong-arg-types\n\n def __init__(self, *args, **kwargs):\n # stub for pytype\n raise NotImplementedError\n\n def replace(self: TNode, **overrides) -> TNode:\n # stub for pytype\n raise NotImplementedError\n", "path": "flax/struct.py" } ]
[ { "content": "# Copyright 2024 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for defining custom classes that can be used with jax transformations.\"\"\"\n\nimport dataclasses\nfrom typing import TypeVar\n\nimport jax\nfrom typing_extensions import (\n dataclass_transform, # pytype: disable=not-supported-yet\n)\n\nfrom . import serialization\n\n_T = TypeVar('_T')\n\n\ndef field(pytree_node=True, **kwargs):\n return dataclasses.field(metadata={'pytree_node': pytree_node}, **kwargs)\n\n\n@dataclass_transform(field_specifiers=(field,)) # type: ignore[literal-required]\ndef dataclass(clz: _T, **kwargs) -> _T:\n \"\"\"Create a class which can be passed to functional transformations.\n\n .. note::\n Inherit from ``PyTreeNode`` instead to avoid type checking issues when\n using PyType.\n\n Jax transformations such as ``jax.jit`` and ``jax.grad`` require objects that are\n immutable and can be mapped over using the ``jax.tree_util`` methods.\n The ``dataclass`` decorator makes it easy to define custom classes that can be\n passed safely to Jax. For example::\n\n >>> from flax import struct\n >>> import jax\n >>> from typing import Any, Callable\n\n >>> @struct.dataclass\n ... class Model:\n ... params: Any\n ... # use pytree_node=False to indicate an attribute should not be touched\n ... # by Jax transformations.\n ... apply_fn: Callable = struct.field(pytree_node=False)\n\n ... def __apply__(self, *args):\n ... return self.apply_fn(*args)\n\n >>> params = {}\n >>> params_b = {}\n >>> apply_fn = lambda v, x: x\n >>> model = Model(params, apply_fn)\n\n >>> # model.params = params_b # Model is immutable. This will raise an error.\n >>> model_b = model.replace(params=params_b) # Use the replace method instead.\n\n >>> # This class can now be used safely in Jax to compute gradients w.r.t. the\n >>> # parameters.\n >>> model = Model(params, apply_fn)\n >>> loss_fn = lambda model: 3.\n >>> model_grad = jax.grad(loss_fn)(model)\n\n Note that dataclasses have an auto-generated ``__init__`` where\n the arguments of the constructor and the attributes of the created\n instance match 1:1. This correspondence is what makes these objects\n valid containers that work with JAX transformations and\n more generally the ``jax.tree_util`` library.\n\n Sometimes a \"smart constructor\" is desired, for example because\n some of the attributes can be (optionally) derived from others.\n The way to do this with Flax dataclasses is to make a static or\n class method that provides the smart constructor.\n This way the simple constructor used by ``jax.tree_util`` is\n preserved. Consider the following example::\n\n >>> @struct.dataclass\n ... class DirectionAndScaleKernel:\n ... direction: jax.Array\n ... scale: jax.Array\n\n ... @classmethod\n ... def create(cls, kernel):\n ... scale = jax.numpy.linalg.norm(kernel, axis=0, keepdims=True)\n ... direction = direction / scale\n ... return cls(direction, scale)\n\n Args:\n clz: the class that will be transformed by the decorator.\n Returns:\n The new class.\n \"\"\"\n # check if already a flax dataclass\n if '_flax_dataclass' in clz.__dict__:\n return clz\n\n if 'frozen' not in kwargs.keys():\n kwargs['frozen'] = True\n data_clz = dataclasses.dataclass(**kwargs)(clz) # type: ignore\n meta_fields = []\n data_fields = []\n for field_info in dataclasses.fields(data_clz):\n is_pytree_node = field_info.metadata.get('pytree_node', True)\n if is_pytree_node:\n data_fields.append(field_info.name)\n else:\n meta_fields.append(field_info.name)\n\n def replace(self, **updates):\n \"\"\" \"Returns a new object replacing the specified fields with new values.\"\"\"\n return dataclasses.replace(self, **updates)\n\n data_clz.replace = replace\n\n def iterate_clz(x):\n meta = tuple(getattr(x, name) for name in meta_fields)\n data = tuple(getattr(x, name) for name in data_fields)\n return data, meta\n\n def iterate_clz_with_keys(x):\n meta = tuple(getattr(x, name) for name in meta_fields)\n data = tuple(\n (jax.tree_util.GetAttrKey(name), getattr(x, name)) for name in data_fields\n )\n return data, meta\n\n def clz_from_iterable(meta, data):\n meta_args = tuple(zip(meta_fields, meta))\n data_args = tuple(zip(data_fields, data))\n kwargs = dict(meta_args + data_args)\n return data_clz(**kwargs)\n\n jax.tree_util.register_pytree_with_keys(\n data_clz, iterate_clz_with_keys, clz_from_iterable, iterate_clz,\n )\n\n def to_state_dict(x):\n state_dict = {\n name: serialization.to_state_dict(getattr(x, name))\n for name in data_fields\n }\n return state_dict\n\n def from_state_dict(x, state):\n \"\"\"Restore the state of a data class.\"\"\"\n state = state.copy() # copy the state so we can pop the restored fields.\n updates = {}\n for name in data_fields:\n if name not in state:\n raise ValueError(\n f'Missing field {name} in state dict while restoring'\n f' an instance of {clz.__name__},'\n f' at path {serialization.current_path()}'\n )\n value = getattr(x, name)\n value_state = state.pop(name)\n updates[name] = serialization.from_state_dict(\n value, value_state, name=name\n )\n if state:\n names = ','.join(state.keys())\n raise ValueError(\n f'Unknown field(s) \"{names}\" in state dict while'\n f' restoring an instance of {clz.__name__}'\n f' at path {serialization.current_path()}'\n )\n return x.replace(**updates)\n\n serialization.register_serialization_state(\n data_clz, to_state_dict, from_state_dict\n )\n\n # add a _flax_dataclass flag to distinguish from regular dataclasses\n data_clz._flax_dataclass = True # type: ignore[attr-defined]\n\n return data_clz # type: ignore\n\n\nTNode = TypeVar('TNode', bound='PyTreeNode')\n\n\n@dataclass_transform(field_specifiers=(field,)) # type: ignore[literal-required]\nclass PyTreeNode:\n \"\"\"Base class for dataclasses that should act like a JAX pytree node.\n\n See ``flax.struct.dataclass`` for the ``jax.tree_util`` behavior.\n This base class additionally avoids type checking errors when using PyType.\n\n Example::\n\n >>> from flax import struct\n >>> import jax\n >>> from typing import Any, Callable\n\n >>> class Model(struct.PyTreeNode):\n ... params: Any\n ... # use pytree_node=False to indicate an attribute should not be touched\n ... # by Jax transformations.\n ... apply_fn: Callable = struct.field(pytree_node=False)\n\n ... def __apply__(self, *args):\n ... return self.apply_fn(*args)\n\n >>> params = {}\n >>> params_b = {}\n >>> apply_fn = lambda v, x: x\n >>> model = Model(params, apply_fn)\n\n >>> # model.params = params_b # Model is immutable. This will raise an error.\n >>> model_b = model.replace(params=params_b) # Use the replace method instead.\n\n >>> # This class can now be used safely in Jax to compute gradients w.r.t. the\n >>> # parameters.\n >>> model = Model(params, apply_fn)\n >>> loss_fn = lambda model: 3.\n >>> model_grad = jax.grad(loss_fn)(model)\n \"\"\"\n\n def __init_subclass__(cls, **kwargs):\n dataclass(cls, **kwargs) # pytype: disable=wrong-arg-types\n\n def __init__(self, *args, **kwargs):\n # stub for pytype\n raise NotImplementedError\n\n def replace(self: TNode, **overrides) -> TNode:\n # stub for pytype\n raise NotImplementedError\n", "path": "flax/struct.py" } ]
diff --git a/flax/struct.py b/flax/struct.py index 7a8283a9d..29dbb9c2f 100644 --- a/flax/struct.py +++ b/flax/struct.py @@ -227,8 +227,8 @@ class PyTreeNode: >>> model_grad = jax.grad(loss_fn)(model) """ - def __init_subclass__(cls): - dataclass(cls) # pytype: disable=wrong-arg-types + def __init_subclass__(cls, **kwargs): + dataclass(cls, **kwargs) # pytype: disable=wrong-arg-types def __init__(self, *args, **kwargs): # stub for pytype diff --git a/tests/struct_test.py b/tests/struct_test.py index da517c739..8ab3119d0 100644 --- a/tests/struct_test.py +++ b/tests/struct_test.py @@ -18,7 +18,7 @@ from typing import Any import jax -from absl.testing import absltest +from absl.testing import absltest, parameterized from jax._src.tree_util import prefix_errors from flax import struct @@ -34,7 +34,7 @@ class Point: meta: Any = struct.field(pytree_node=False) -class StructTest(absltest.TestCase): +class StructTest(parameterized.TestCase): def test_no_extra_fields(self): p = Point(x=1, y=2, meta={}) with self.assertRaises(dataclasses.FrozenInstanceError): @@ -93,24 +93,68 @@ class A(struct.PyTreeNode): a: int # TODO(marcuschiam): Uncomment when Flax upgrades to Python 3.10. - # def test_kw_only(self): - # @struct.dataclass - # class A: - # a: int = 1 - - # with self.assertRaisesRegex(TypeError, "non-default argument 'b' follows default argument"): + # @parameterized.parameters( + # {'mode': 'dataclass'}, + # {'mode': 'pytreenode'}, + # ) + # def test_kw_only(self, mode): + # if mode == 'dataclass': # @struct.dataclass + # class A: + # a: int = 1 + + # @functools.partial(struct.dataclass, kw_only=True) # class B(A): # b: int + # elif mode == 'pytreenode': + # class A(struct.PyTreeNode): + # a: int = 1 - # @functools.partial(struct.dataclass, kw_only=True) - # class B(A): - # b: int + # class B(A, struct.PyTreeNode, kw_only=True): + # b: int # obj = B(b=2) # self.assertEqual(obj.a, 1) # self.assertEqual(obj.b, 2) + # with self.assertRaisesRegex(TypeError, "non-default argument 'b' follows default argument"): + # if mode == 'dataclass': + # @struct.dataclass + # class B(A): + # b: int + # elif mode == 'pytreenode': + # class B(A, struct.PyTreeNode): + # b: int + + # TODO(marcuschiam): Uncomment when Flax upgrades to Python 3.10. + # @parameterized.parameters( + # {'mode': 'dataclass'}, + # {'mode': 'pytreenode'}, + # ) + # def test_mutable(self, mode): + # if mode == 'dataclass': + # @struct.dataclass + # class A: + # a: int = 1 + + # @functools.partial(struct.dataclass, frozen=False) + # class B: + # b: int = 1 + # elif mode == 'pytreenode': + # class A(struct.PyTreeNode): + # a: int = 1 + + # class B(struct.PyTreeNode, frozen=False): + # b: int = 1 + + # obj = A() + # with self.assertRaisesRegex(dataclasses.FrozenInstanceError, "cannot assign to field 'a'"): + # obj.a = 2 + + # obj = B() + # obj.b = 2 + # self.assertEqual(obj.b, 2) + if __name__ == '__main__': absltest.main()
secdev__scapy-1417
No /dev/bpf handle is available ! I'm running on mac high sierra 10.13.4 after downloading from https://github.com/secdev/scapy/archive/v2.4.0.zip I unzipping and ran each of the following as root: run_scapy, run_scapy2 and run_scapy_py3 within each repl I ran: ```send(IP(dst="2.2.2.2", src="1.1.1.1"))``` and the traceback was the same: ``` Traceback (most recent call last): File "<console>", line 1, in <module> File "/Users/idobn/dev/research/something/playground/tmp/scapy-2.4.0/scapy/sendrecv.py", line 302, in send realtime=realtime, return_packets=return_packets) File "/Users/idobn/dev/research/something/playground/tmp/scapy-2.4.0/scapy/sendrecv.py", line 276, in __gen_send s.send(p) File "/Users/idobn/dev/research/something/playground/tmp/scapy-2.4.0/scapy/arch/bpf/supersocket.py", line 345, in send frame = raw(self.guessed_cls()/pkt) File "/Users/idobn/dev/research/something/playground/tmp/scapy-2.4.0/scapy/compat.py", line 96, in raw return bytes(x) File "/Users/idobn/dev/research/something/playground/tmp/scapy-2.4.0/scapy/packet.py", line 345, in __bytes__ return self.build() File "/Users/idobn/dev/research/something/playground/tmp/scapy-2.4.0/scapy/packet.py", line 444, in build p = self.do_build() File "/Users/idobn/dev/research/something/playground/tmp/scapy-2.4.0/scapy/packet.py", line 426, in do_build pkt = self.self_build() File "/Users/idobn/dev/research/something/playground/tmp/scapy-2.4.0/scapy/packet.py", line 407, in self_build p = f.addfield(self, p, val) File "/Users/idobn/dev/research/something/playground/tmp/scapy-2.4.0/scapy/fields.py", line 80, in addfield return s+struct.pack(self.fmt, self.i2m(pkt,val)) File "/Users/idobn/dev/research/something/playground/tmp/scapy-2.4.0/scapy/layers/l2.py", line 109, in i2m return MACField.i2m(self, pkt, self.i2h(pkt, x)) File "/Users/idobn/dev/research/something/playground/tmp/scapy-2.4.0/scapy/layers/l2.py", line 101, in i2h x = conf.neighbor.resolve(pkt,pkt.payload) File "/Users/idobn/dev/research/something/playground/tmp/scapy-2.4.0/scapy/layers/l2.py", line 49, in resolve return self.resolvers[k](l2inst,l3inst) File "/Users/idobn/dev/research/something/playground/tmp/scapy-2.4.0/scapy/layers/inet.py", line 821, in inet_register_l3 return getmacbyip(l3.dst) File "/Users/idobn/dev/research/something/playground/tmp/scapy-2.4.0/scapy/layers/l2.py", line 84, in getmacbyip nofilter=1) File "/Users/idobn/dev/research/something/playground/tmp/scapy-2.4.0/scapy/sendrecv.py", line 434, in srp1 ans, _ = srp(*args, **kargs) File "/Users/idobn/dev/research/something/playground/tmp/scapy-2.4.0/scapy/sendrecv.py", line 416, in srp s = conf.L2socket(promisc=promisc, iface=iface, filter=filter, nofilter=nofilter, type=type) File "/Users/idobn/dev/research/something/playground/tmp/scapy-2.4.0/scapy/arch/bpf/supersocket.py", line 58, in __init__ (self.ins, self.dev_bpf) = get_dev_bpf() File "/Users/idobn/dev/research/something/playground/tmp/scapy-2.4.0/scapy/arch/bpf/core.py", line 98, in get_dev_bpf raise Scapy_Exception("No /dev/bpf handle is available !") scapy.error.Scapy_Exception: No /dev/bpf handle is available ! ``` after looking at some of the past issues it appears similar to this one: [#1015](https://github.com/secdev/scapy/issues/1015) however it was solved some time ago... Update: The above was ran while I had wireshark running, after quitting wireshark the error stopped.
[ { "content": "# Guillaume Valadon <[email protected]>\n\n\"\"\"\nScapy *BSD native support - core\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom scapy.config import conf\nfrom scapy.error import Scapy_Exception, warning\nfrom scapy.data import ARPHDR_LOOPBACK, ARPHDR_ETHER\nfrom scapy.arch.common import get_if, get_bpf_pointer\nfrom scapy.consts import LOOPBACK_NAME\n\nfrom scapy.arch.bpf.consts import *\n\nimport os\nimport socket\nimport fcntl\nimport struct\n\nfrom ctypes import cdll, cast, pointer, POINTER, Structure\nfrom ctypes import c_int, c_ulong, c_char_p\nfrom ctypes.util import find_library\nfrom scapy.modules.six.moves import range\n\n\n# ctypes definitions\n\nLIBC = cdll.LoadLibrary(find_library(\"libc\"))\nLIBC.ioctl.argtypes = [c_int, c_ulong, c_char_p]\nLIBC.ioctl.restype = c_int\n\n\n# Addresses manipulation functions\n\ndef get_if_raw_addr(ifname):\n \"\"\"Returns the IPv4 address configured on 'ifname', packed with inet_pton.\"\"\"\n\n # Get ifconfig output\n try:\n fd = os.popen(\"%s %s\" % (conf.prog.ifconfig, ifname))\n except OSError as msg:\n warning(\"Failed to execute ifconfig: (%s)\", msg)\n return b\"\\0\\0\\0\\0\"\n\n # Get IPv4 addresses\n addresses = [l for l in fd if l.find(\"netmask\") >= 0]\n if not addresses:\n warning(\"No IPv4 address found on %s !\", ifname)\n return b\"\\0\\0\\0\\0\"\n\n # Pack the first address\n address = addresses[0].split(' ')[1]\n return socket.inet_pton(socket.AF_INET, address)\n\n\ndef get_if_raw_hwaddr(ifname):\n \"\"\"Returns the packed MAC address configured on 'ifname'.\"\"\"\n\n NULL_MAC_ADDRESS = b'\\x00' * 6\n\n # Handle the loopback interface separately\n if ifname == LOOPBACK_NAME:\n return (ARPHDR_LOOPBACK, NULL_MAC_ADDRESS)\n\n # Get ifconfig output\n try:\n fd = os.popen(\"%s %s\" % (conf.prog.ifconfig, ifname))\n except OSError as msg:\n raise Scapy_Exception(\"Failed to execute ifconfig: (%s)\" % msg)\n\n # Get MAC addresses\n addresses = [l for l in fd.readlines() if l.find(\"ether\") >= 0 or\n l.find(\"lladdr\") >= 0 or\n l.find(\"address\") >= 0]\n if not addresses:\n raise Scapy_Exception(\"No MAC address found on %s !\" % ifname)\n\n # Pack and return the MAC address\n mac = addresses[0].split(' ')[1]\n mac = [chr(int(b, 16)) for b in mac.split(':')]\n return (ARPHDR_ETHER, ''.join(mac))\n\n\n# BPF specific functions\n\ndef get_dev_bpf():\n \"\"\"Returns an opened BPF file object\"\"\"\n\n # Get the first available BPF handle\n for bpf in range(0, 8):\n try:\n fd = os.open(\"/dev/bpf%i\" % bpf, os.O_RDWR)\n return (fd, bpf)\n except OSError:\n continue\n\n raise Scapy_Exception(\"No /dev/bpf handle is available !\")\n\n\ndef attach_filter(fd, iface, bpf_filter_string):\n \"\"\"Attach a BPF filter to the BPF file descriptor\"\"\"\n\n # Retrieve the BPF byte code in decimal\n command = \"%s -i %s -ddd -s 1600 '%s'\" % (conf.prog.tcpdump, iface, bpf_filter_string)\n try:\n f = os.popen(command)\n except OSError as msg:\n raise Scapy_Exception(\"Failed to execute tcpdump: (%s)\" % msg)\n\n # Convert the byte code to a BPF program structure\n lines = f.readlines()\n if lines == []:\n raise Scapy_Exception(\"Got an empty BPF filter from tcpdump !\")\n\n bp = get_bpf_pointer(lines)\n # Assign the BPF program to the interface\n ret = LIBC.ioctl(c_int(fd), BIOCSETF, cast(pointer(bp), c_char_p))\n if ret < 0:\n raise Scapy_Exception(\"Can't attach the BPF filter !\")\n\n\n# Interface manipulation functions\n\ndef get_if_list():\n \"\"\"Returns a list containing all network interfaces.\"\"\"\n\n # Get ifconfig output\n try:\n fd = os.popen(\"%s -a\" % conf.prog.ifconfig)\n except OSError as msg:\n raise Scapy_Exception(\"Failed to execute ifconfig: (%s)\" % msg)\n\n # Get interfaces\n interfaces = [line[:line.find(':')] for line in fd.readlines()\n if \": flags\" in line.lower()]\n return interfaces\n\n\ndef get_working_ifaces():\n \"\"\"\n Returns an ordered list of interfaces that could be used with BPF.\n Note: the order mimics pcap_findalldevs() behavior\n \"\"\"\n\n # Only root is allowed to perform the following ioctl() call\n if os.getuid() != 0:\n return []\n\n # Test all network interfaces\n interfaces = []\n for ifname in get_if_list():\n\n # Unlike pcap_findalldevs(), we do not care of loopback interfaces.\n if ifname == LOOPBACK_NAME:\n continue\n\n # Get interface flags\n try:\n result = get_if(ifname, SIOCGIFFLAGS)\n except IOError:\n warning(\"ioctl(SIOCGIFFLAGS) failed on %s !\", ifname)\n continue\n\n # Convert flags\n ifflags = struct.unpack(\"16xH14x\", result)[0]\n if ifflags & 0x1: # IFF_UP\n\n # Get a BPF handle\n fd, _ = get_dev_bpf()\n if fd is None:\n raise Scapy_Exception(\"No /dev/bpf are available !\")\n\n # Check if the interface can be used\n try:\n fcntl.ioctl(fd, BIOCSETIF, struct.pack(\"16s16x\", ifname.encode()))\n interfaces.append((ifname, int(ifname[-1])))\n except IOError:\n pass\n\n # Close the file descriptor\n os.close(fd)\n\n # Sort to mimic pcap_findalldevs() order\n interfaces.sort(key=lambda elt: elt[1])\n\n return interfaces\n\n\ndef get_working_if():\n \"\"\"Returns the first interface than can be used with BPF\"\"\"\n\n ifaces = get_working_ifaces()\n if not ifaces:\n # A better interface will be selected later using the routing table\n return LOOPBACK_NAME\n return ifaces[0][0]\n", "path": "scapy/arch/bpf/core.py" } ]
[ { "content": "# Guillaume Valadon <[email protected]>\n\n\"\"\"\nScapy *BSD native support - core\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom scapy.config import conf\nfrom scapy.error import Scapy_Exception, warning\nfrom scapy.data import ARPHDR_LOOPBACK, ARPHDR_ETHER\nfrom scapy.arch.common import get_if, get_bpf_pointer\nfrom scapy.consts import LOOPBACK_NAME\n\nfrom scapy.arch.bpf.consts import *\n\nimport os\nimport socket\nimport fcntl\nimport struct\n\nfrom ctypes import cdll, cast, pointer, POINTER, Structure\nfrom ctypes import c_int, c_ulong, c_char_p\nfrom ctypes.util import find_library\nfrom scapy.modules.six.moves import range\n\n\n# ctypes definitions\n\nLIBC = cdll.LoadLibrary(find_library(\"libc\"))\nLIBC.ioctl.argtypes = [c_int, c_ulong, c_char_p]\nLIBC.ioctl.restype = c_int\n\n\n# Addresses manipulation functions\n\ndef get_if_raw_addr(ifname):\n \"\"\"Returns the IPv4 address configured on 'ifname', packed with inet_pton.\"\"\"\n\n # Get ifconfig output\n try:\n fd = os.popen(\"%s %s\" % (conf.prog.ifconfig, ifname))\n except OSError as msg:\n warning(\"Failed to execute ifconfig: (%s)\", msg)\n return b\"\\0\\0\\0\\0\"\n\n # Get IPv4 addresses\n addresses = [l for l in fd if l.find(\"netmask\") >= 0]\n if not addresses:\n warning(\"No IPv4 address found on %s !\", ifname)\n return b\"\\0\\0\\0\\0\"\n\n # Pack the first address\n address = addresses[0].split(' ')[1]\n return socket.inet_pton(socket.AF_INET, address)\n\n\ndef get_if_raw_hwaddr(ifname):\n \"\"\"Returns the packed MAC address configured on 'ifname'.\"\"\"\n\n NULL_MAC_ADDRESS = b'\\x00' * 6\n\n # Handle the loopback interface separately\n if ifname == LOOPBACK_NAME:\n return (ARPHDR_LOOPBACK, NULL_MAC_ADDRESS)\n\n # Get ifconfig output\n try:\n fd = os.popen(\"%s %s\" % (conf.prog.ifconfig, ifname))\n except OSError as msg:\n raise Scapy_Exception(\"Failed to execute ifconfig: (%s)\" % msg)\n\n # Get MAC addresses\n addresses = [l for l in fd.readlines() if l.find(\"ether\") >= 0 or\n l.find(\"lladdr\") >= 0 or\n l.find(\"address\") >= 0]\n if not addresses:\n raise Scapy_Exception(\"No MAC address found on %s !\" % ifname)\n\n # Pack and return the MAC address\n mac = addresses[0].split(' ')[1]\n mac = [chr(int(b, 16)) for b in mac.split(':')]\n return (ARPHDR_ETHER, ''.join(mac))\n\n\n# BPF specific functions\n\ndef get_dev_bpf():\n \"\"\"Returns an opened BPF file object\"\"\"\n\n # Get the first available BPF handle\n for bpf in range(256):\n try:\n fd = os.open(\"/dev/bpf%i\" % bpf, os.O_RDWR)\n return (fd, bpf)\n except OSError:\n continue\n\n raise Scapy_Exception(\"No /dev/bpf handle is available !\")\n\n\ndef attach_filter(fd, iface, bpf_filter_string):\n \"\"\"Attach a BPF filter to the BPF file descriptor\"\"\"\n\n # Retrieve the BPF byte code in decimal\n command = \"%s -i %s -ddd -s 1600 '%s'\" % (conf.prog.tcpdump, iface, bpf_filter_string)\n try:\n f = os.popen(command)\n except OSError as msg:\n raise Scapy_Exception(\"Failed to execute tcpdump: (%s)\" % msg)\n\n # Convert the byte code to a BPF program structure\n lines = f.readlines()\n if lines == []:\n raise Scapy_Exception(\"Got an empty BPF filter from tcpdump !\")\n\n bp = get_bpf_pointer(lines)\n # Assign the BPF program to the interface\n ret = LIBC.ioctl(c_int(fd), BIOCSETF, cast(pointer(bp), c_char_p))\n if ret < 0:\n raise Scapy_Exception(\"Can't attach the BPF filter !\")\n\n\n# Interface manipulation functions\n\ndef get_if_list():\n \"\"\"Returns a list containing all network interfaces.\"\"\"\n\n # Get ifconfig output\n try:\n fd = os.popen(\"%s -a\" % conf.prog.ifconfig)\n except OSError as msg:\n raise Scapy_Exception(\"Failed to execute ifconfig: (%s)\" % msg)\n\n # Get interfaces\n interfaces = [line[:line.find(':')] for line in fd.readlines()\n if \": flags\" in line.lower()]\n return interfaces\n\n\ndef get_working_ifaces():\n \"\"\"\n Returns an ordered list of interfaces that could be used with BPF.\n Note: the order mimics pcap_findalldevs() behavior\n \"\"\"\n\n # Only root is allowed to perform the following ioctl() call\n if os.getuid() != 0:\n return []\n\n # Test all network interfaces\n interfaces = []\n for ifname in get_if_list():\n\n # Unlike pcap_findalldevs(), we do not care of loopback interfaces.\n if ifname == LOOPBACK_NAME:\n continue\n\n # Get interface flags\n try:\n result = get_if(ifname, SIOCGIFFLAGS)\n except IOError:\n warning(\"ioctl(SIOCGIFFLAGS) failed on %s !\", ifname)\n continue\n\n # Convert flags\n ifflags = struct.unpack(\"16xH14x\", result)[0]\n if ifflags & 0x1: # IFF_UP\n\n # Get a BPF handle\n fd, _ = get_dev_bpf()\n if fd is None:\n raise Scapy_Exception(\"No /dev/bpf are available !\")\n\n # Check if the interface can be used\n try:\n fcntl.ioctl(fd, BIOCSETIF, struct.pack(\"16s16x\", ifname.encode()))\n interfaces.append((ifname, int(ifname[-1])))\n except IOError:\n pass\n\n # Close the file descriptor\n os.close(fd)\n\n # Sort to mimic pcap_findalldevs() order\n interfaces.sort(key=lambda elt: elt[1])\n\n return interfaces\n\n\ndef get_working_if():\n \"\"\"Returns the first interface than can be used with BPF\"\"\"\n\n ifaces = get_working_ifaces()\n if not ifaces:\n # A better interface will be selected later using the routing table\n return LOOPBACK_NAME\n return ifaces[0][0]\n", "path": "scapy/arch/bpf/core.py" } ]
diff --git a/scapy/arch/bpf/core.py b/scapy/arch/bpf/core.py index 79f1e17ab69..7e32e4d5a03 100644 --- a/scapy/arch/bpf/core.py +++ b/scapy/arch/bpf/core.py @@ -88,7 +88,7 @@ def get_dev_bpf(): """Returns an opened BPF file object""" # Get the first available BPF handle - for bpf in range(0, 8): + for bpf in range(256): try: fd = os.open("/dev/bpf%i" % bpf, os.O_RDWR) return (fd, bpf)
learningequality__kolibri-4935
users should not be able to get 1000% on an exam, unfortunately ### Observed behavior reported by @jtamiace re: @radinamatic's apparent good luck: ![image](https://user-images.githubusercontent.com/2367265/52095361-812c2280-2577-11e9-86d6-8f6cff4e0f5b.png) ### Expected behavior exams are scored between 0 and 100 ### User-facing consequences ???? ### Errors and logs unknown ### Steps to reproduce see http://kolibribeta.learningequality.org/coach/#/fa4cbfeda32c0c0fbf1832fc1ddd10c3/reports/learners ### Context k 0.12.0 alpha 7
[ { "content": "from django.db.models import Max\nfrom django.db.models import Sum\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import serializers\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\n\nfrom kolibri.core.auth import models as auth_models\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.exams.models import Exam\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.logger import models as logger_models\nfrom kolibri.core.notifications.models import LearnerProgressNotification\nfrom kolibri.core.notifications.models import NotificationEventType\n\n\n# Intended to match NotificationEventType\nNOT_STARTED = \"NotStarted\"\nSTARTED = \"Started\"\nHELP_NEEDED = \"HelpNeeded\"\nCOMPLETED = \"Completed\"\n\n\ndef content_status_serializer(lesson_data, learners_data, classroom):\n\n # First generate a unique set of content node ids from all the lessons\n lesson_node_ids = set()\n for lesson in lesson_data:\n lesson_node_ids |= set(lesson.get(\"node_ids\"))\n\n # Now create a map of content_id to node_id so that we can map between lessons, and notifications\n # which use the node id, and summary logs, which use content_id\n content_map = {n[0]: n[1] for n in ContentNode.objects.filter(id__in=lesson_node_ids).values_list(\"content_id\", \"id\")}\n\n # Get all the values we need from the summary logs to be able to summarize current status on the\n # relevant content items.\n content_log_values = logger_models.ContentSummaryLog.objects.filter(\n content_id__in=set(content_map.keys()), user__in=[learner[\"id\"] for learner in learners_data]\n ).values(\"user_id\", \"content_id\", \"end_timestamp\", \"time_spent\", \"progress\")\n\n # In order to make the lookup speedy, generate a unique key for each user/node that we find\n # listed in the needs help notifications that are relevant. We can then just check\n # existence of this key in the set in order to see whether this user has been flagged as needing\n # help.\n lookup_key = \"{user_id}-{node_id}\"\n needs_help = {\n lookup_key.format(user_id=n[0], node_id=n[1]): n[2] for n in LearnerProgressNotification.objects.filter(\n classroom_id=classroom.id,\n notification_event=NotificationEventType.Help,\n lesson_id__in=[lesson[\"id\"] for lesson in lesson_data],\n ).values_list(\"user_id\", \"contentnode_id\", \"timestamp\")\n }\n\n # In case a previously flagged learner has since completed an exercise, check all the completed\n # notifications also\n completed = {\n lookup_key.format(user_id=n[0], node_id=n[1]): n[2] for n in LearnerProgressNotification.objects.filter(\n classroom_id=classroom.id,\n notification_event=NotificationEventType.Completed,\n lesson_id__in=[lesson[\"id\"] for lesson in lesson_data],\n ).values_list(\"user_id\", \"contentnode_id\", \"timestamp\")\n }\n\n def get_status(log):\n \"\"\"\n Read the dict from a content summary log values query and return the status\n In the case that we have found a needs help notification for the user and content node\n in question, return that they need help, otherwise return status based on their\n current progress.\n \"\"\"\n content_id = log[\"content_id\"]\n if content_id in content_map:\n # Don't try to lookup anything if we don't know the content_id\n # node_id mapping - might happen if a channel has since been deleted\n key = lookup_key.format(user_id=log[\"user_id\"], node_id=content_map[content_id])\n if key in needs_help:\n # Now check if we have not already registered completion of the content node\n # or if we have and the timestamp is earlier than that on the needs_help event\n if key not in completed or completed[key] < needs_help[key]:\n return HELP_NEEDED\n if log[\"progress\"] == 1:\n return COMPLETED\n elif log[\"progress\"] == 0:\n return NOT_STARTED\n return STARTED\n\n def map_content_logs(log):\n \"\"\"\n Parse the content logs to return objects in the expected format.\n \"\"\"\n return {\n \"learner_id\": log[\"user_id\"],\n \"content_id\": log[\"content_id\"],\n \"status\": get_status(log),\n \"last_activity\": log[\"end_timestamp\"],\n \"time_spent\": log[\"time_spent\"],\n }\n\n return map(map_content_logs, content_log_values)\n\n\nclass ExamStatusSerializer(serializers.ModelSerializer):\n status = serializers.SerializerMethodField()\n exam_id = serializers.PrimaryKeyRelatedField(source=\"exam\", read_only=True)\n learner_id = serializers.PrimaryKeyRelatedField(source=\"user\", read_only=True)\n last_activity = serializers.CharField()\n num_correct = serializers.SerializerMethodField()\n\n def get_status(self, exam_log):\n if exam_log.closed:\n return COMPLETED\n else:\n return STARTED\n\n def get_num_correct(self, exam_log):\n return (\n exam_log.attemptlogs.values_list('item')\n .order_by('completion_timestamp')\n .distinct()\n .aggregate(Sum('correct'))\n .get('correct__sum')\n )\n\n class Meta:\n model = logger_models.ExamLog\n fields = (\"exam_id\", \"learner_id\", \"status\", \"last_activity\", \"num_correct\")\n\n\nclass GroupSerializer(serializers.ModelSerializer):\n member_ids = serializers.SerializerMethodField()\n\n def get_member_ids(self, group):\n return group.get_members().values_list(\"id\", flat=True)\n\n class Meta:\n model = auth_models.LearnerGroup\n fields = (\"id\", \"name\", \"member_ids\")\n\n\nclass UserSerializer(serializers.ModelSerializer):\n name = serializers.CharField(source=\"full_name\")\n\n class Meta:\n model = auth_models.FacilityUser\n fields = (\"id\", \"name\", \"username\")\n\n\nclass LessonNodeIdsField(serializers.Field):\n def to_representation(self, values):\n return [value[\"contentnode_id\"] for value in values]\n\n\nclass LessonAssignmentsField(serializers.RelatedField):\n def to_representation(self, assignment):\n return assignment.collection.id\n\n\nclass LessonSerializer(serializers.ModelSerializer):\n active = serializers.BooleanField(source=\"is_active\")\n node_ids = LessonNodeIdsField(default=[], source=\"resources\")\n\n # classrooms are in here, and filtered out later\n groups = LessonAssignmentsField(\n many=True, read_only=True, source=\"lesson_assignments\"\n )\n\n class Meta:\n model = Lesson\n fields = (\"id\", \"title\", \"active\", \"node_ids\", \"groups\")\n\n\nclass ExamQuestionSourcesField(serializers.Field):\n def to_representation(self, values):\n return values\n\n\nclass ExamAssignmentsField(serializers.RelatedField):\n def to_representation(self, assignment):\n return assignment.collection.id\n\n\nclass ExamSerializer(serializers.ModelSerializer):\n\n question_sources = ExamQuestionSourcesField(default=[])\n\n # classes are in here, and filtered out later\n groups = ExamAssignmentsField(many=True, read_only=True, source=\"assignments\")\n\n class Meta:\n model = Exam\n fields = (\"id\", \"title\", \"active\", \"question_sources\", \"groups\", \"data_model_version\")\n\n\nclass ContentSerializer(serializers.ModelSerializer):\n node_id = serializers.CharField(source=\"id\")\n\n class Meta:\n model = ContentNode\n fields = (\"node_id\", \"content_id\", \"title\", \"kind\")\n\n\ndef data(Serializer, queryset):\n return Serializer(queryset, many=True).data\n\n\nclass ClassSummaryViewSet(viewsets.ViewSet):\n def retrieve(self, request, pk):\n classroom = get_object_or_404(auth_models.Classroom, id=pk)\n query_learners = classroom.get_members()\n query_lesson = Lesson.objects.filter(collection=pk)\n query_exams = Exam.objects.filter(collection=pk)\n query_exam_logs = logger_models.ExamLog.objects.filter(\n exam__in=query_exams\n ).annotate(last_activity=Max(\"attemptlogs__end_timestamp\"))\n\n lesson_data = data(LessonSerializer, query_lesson)\n exam_data = data(ExamSerializer, query_exams)\n\n # filter classes out of exam assignments\n for exam in exam_data:\n exam[\"groups\"] = [g for g in exam[\"groups\"] if g != pk]\n\n # filter classes out of lesson assignments\n for lesson in lesson_data:\n lesson[\"groups\"] = [g for g in lesson[\"groups\"] if g != pk]\n\n all_node_ids = set()\n for lesson in lesson_data:\n all_node_ids |= set(lesson.get(\"node_ids\"))\n for exam in exam_data:\n exam_node_ids = [question['exercise_id'] for question in exam.get(\"question_sources\")]\n all_node_ids |= set(exam_node_ids)\n\n query_content = ContentNode.objects.filter(id__in=all_node_ids)\n\n learners_data = data(UserSerializer, query_learners)\n\n output = {\n \"id\": pk,\n \"name\": classroom.name,\n \"coaches\": data(UserSerializer, classroom.get_coaches()),\n \"learners\": learners_data,\n \"groups\": data(GroupSerializer, classroom.get_learner_groups()),\n \"exams\": exam_data,\n \"exam_learner_status\": data(ExamStatusSerializer, query_exam_logs),\n \"content\": data(ContentSerializer, query_content),\n \"content_learner_status\": content_status_serializer(lesson_data, learners_data, classroom),\n \"lessons\": lesson_data,\n }\n\n return Response(output)\n", "path": "kolibri/plugins/coach/class_summary_api.py" } ]
[ { "content": "from django.db.models import Max\nfrom django.db.models import Sum\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import serializers\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\n\nfrom kolibri.core.auth import models as auth_models\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.exams.models import Exam\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.logger import models as logger_models\nfrom kolibri.core.notifications.models import LearnerProgressNotification\nfrom kolibri.core.notifications.models import NotificationEventType\n\n\n# Intended to match NotificationEventType\nNOT_STARTED = \"NotStarted\"\nSTARTED = \"Started\"\nHELP_NEEDED = \"HelpNeeded\"\nCOMPLETED = \"Completed\"\n\n\ndef content_status_serializer(lesson_data, learners_data, classroom):\n\n # First generate a unique set of content node ids from all the lessons\n lesson_node_ids = set()\n for lesson in lesson_data:\n lesson_node_ids |= set(lesson.get(\"node_ids\"))\n\n # Now create a map of content_id to node_id so that we can map between lessons, and notifications\n # which use the node id, and summary logs, which use content_id\n content_map = {n[0]: n[1] for n in ContentNode.objects.filter(id__in=lesson_node_ids).values_list(\"content_id\", \"id\")}\n\n # Get all the values we need from the summary logs to be able to summarize current status on the\n # relevant content items.\n content_log_values = logger_models.ContentSummaryLog.objects.filter(\n content_id__in=set(content_map.keys()), user__in=[learner[\"id\"] for learner in learners_data]\n ).values(\"user_id\", \"content_id\", \"end_timestamp\", \"time_spent\", \"progress\")\n\n # In order to make the lookup speedy, generate a unique key for each user/node that we find\n # listed in the needs help notifications that are relevant. We can then just check\n # existence of this key in the set in order to see whether this user has been flagged as needing\n # help.\n lookup_key = \"{user_id}-{node_id}\"\n needs_help = {\n lookup_key.format(user_id=n[0], node_id=n[1]): n[2] for n in LearnerProgressNotification.objects.filter(\n classroom_id=classroom.id,\n notification_event=NotificationEventType.Help,\n lesson_id__in=[lesson[\"id\"] for lesson in lesson_data],\n ).values_list(\"user_id\", \"contentnode_id\", \"timestamp\")\n }\n\n # In case a previously flagged learner has since completed an exercise, check all the completed\n # notifications also\n completed = {\n lookup_key.format(user_id=n[0], node_id=n[1]): n[2] for n in LearnerProgressNotification.objects.filter(\n classroom_id=classroom.id,\n notification_event=NotificationEventType.Completed,\n lesson_id__in=[lesson[\"id\"] for lesson in lesson_data],\n ).values_list(\"user_id\", \"contentnode_id\", \"timestamp\")\n }\n\n def get_status(log):\n \"\"\"\n Read the dict from a content summary log values query and return the status\n In the case that we have found a needs help notification for the user and content node\n in question, return that they need help, otherwise return status based on their\n current progress.\n \"\"\"\n content_id = log[\"content_id\"]\n if content_id in content_map:\n # Don't try to lookup anything if we don't know the content_id\n # node_id mapping - might happen if a channel has since been deleted\n key = lookup_key.format(user_id=log[\"user_id\"], node_id=content_map[content_id])\n if key in needs_help:\n # Now check if we have not already registered completion of the content node\n # or if we have and the timestamp is earlier than that on the needs_help event\n if key not in completed or completed[key] < needs_help[key]:\n return HELP_NEEDED\n if log[\"progress\"] == 1:\n return COMPLETED\n elif log[\"progress\"] == 0:\n return NOT_STARTED\n return STARTED\n\n def map_content_logs(log):\n \"\"\"\n Parse the content logs to return objects in the expected format.\n \"\"\"\n return {\n \"learner_id\": log[\"user_id\"],\n \"content_id\": log[\"content_id\"],\n \"status\": get_status(log),\n \"last_activity\": log[\"end_timestamp\"],\n \"time_spent\": log[\"time_spent\"],\n }\n\n return map(map_content_logs, content_log_values)\n\n\nclass ExamStatusSerializer(serializers.ModelSerializer):\n status = serializers.SerializerMethodField()\n exam_id = serializers.PrimaryKeyRelatedField(source=\"exam\", read_only=True)\n learner_id = serializers.PrimaryKeyRelatedField(source=\"user\", read_only=True)\n last_activity = serializers.CharField()\n num_correct = serializers.SerializerMethodField()\n\n def get_status(self, exam_log):\n if exam_log.closed:\n return COMPLETED\n else:\n return STARTED\n\n def get_num_correct(self, exam_log):\n return (\n exam_log.attemptlogs.values_list('item')\n .order_by('completion_timestamp')\n .distinct()\n .aggregate(Sum('correct'))\n .get('correct__sum')\n )\n\n class Meta:\n model = logger_models.ExamLog\n fields = (\"exam_id\", \"learner_id\", \"status\", \"last_activity\", \"num_correct\")\n\n\nclass GroupSerializer(serializers.ModelSerializer):\n member_ids = serializers.SerializerMethodField()\n\n def get_member_ids(self, group):\n return group.get_members().values_list(\"id\", flat=True)\n\n class Meta:\n model = auth_models.LearnerGroup\n fields = (\"id\", \"name\", \"member_ids\")\n\n\nclass UserSerializer(serializers.ModelSerializer):\n name = serializers.CharField(source=\"full_name\")\n\n class Meta:\n model = auth_models.FacilityUser\n fields = (\"id\", \"name\", \"username\")\n\n\nclass LessonNodeIdsField(serializers.Field):\n def to_representation(self, values):\n return [value[\"contentnode_id\"] for value in values]\n\n\nclass LessonAssignmentsField(serializers.RelatedField):\n def to_representation(self, assignment):\n return assignment.collection.id\n\n\nclass LessonSerializer(serializers.ModelSerializer):\n active = serializers.BooleanField(source=\"is_active\")\n node_ids = LessonNodeIdsField(default=[], source=\"resources\")\n\n # classrooms are in here, and filtered out later\n groups = LessonAssignmentsField(\n many=True, read_only=True, source=\"lesson_assignments\"\n )\n\n class Meta:\n model = Lesson\n fields = (\"id\", \"title\", \"active\", \"node_ids\", \"groups\")\n\n\nclass ExamQuestionSourcesField(serializers.Field):\n def to_representation(self, values):\n return values\n\n\nclass ExamAssignmentsField(serializers.RelatedField):\n def to_representation(self, assignment):\n return assignment.collection.id\n\n\nclass ExamSerializer(serializers.ModelSerializer):\n\n question_sources = ExamQuestionSourcesField(default=[])\n\n # classes are in here, and filtered out later\n groups = ExamAssignmentsField(many=True, read_only=True, source=\"assignments\")\n\n class Meta:\n model = Exam\n fields = (\"id\", \"title\", \"active\", \"question_sources\", \"groups\", \"data_model_version\", \"question_count\")\n\n\nclass ContentSerializer(serializers.ModelSerializer):\n node_id = serializers.CharField(source=\"id\")\n\n class Meta:\n model = ContentNode\n fields = (\"node_id\", \"content_id\", \"title\", \"kind\")\n\n\ndef data(Serializer, queryset):\n return Serializer(queryset, many=True).data\n\n\nclass ClassSummaryViewSet(viewsets.ViewSet):\n def retrieve(self, request, pk):\n classroom = get_object_or_404(auth_models.Classroom, id=pk)\n query_learners = classroom.get_members()\n query_lesson = Lesson.objects.filter(collection=pk)\n query_exams = Exam.objects.filter(collection=pk)\n query_exam_logs = logger_models.ExamLog.objects.filter(\n exam__in=query_exams\n ).annotate(last_activity=Max(\"attemptlogs__end_timestamp\"))\n\n lesson_data = data(LessonSerializer, query_lesson)\n exam_data = data(ExamSerializer, query_exams)\n\n # filter classes out of exam assignments\n for exam in exam_data:\n exam[\"groups\"] = [g for g in exam[\"groups\"] if g != pk]\n\n # filter classes out of lesson assignments\n for lesson in lesson_data:\n lesson[\"groups\"] = [g for g in lesson[\"groups\"] if g != pk]\n\n all_node_ids = set()\n for lesson in lesson_data:\n all_node_ids |= set(lesson.get(\"node_ids\"))\n for exam in exam_data:\n exam_node_ids = [question['exercise_id'] for question in exam.get(\"question_sources\")]\n all_node_ids |= set(exam_node_ids)\n\n query_content = ContentNode.objects.filter(id__in=all_node_ids)\n\n learners_data = data(UserSerializer, query_learners)\n\n output = {\n \"id\": pk,\n \"name\": classroom.name,\n \"coaches\": data(UserSerializer, classroom.get_coaches()),\n \"learners\": learners_data,\n \"groups\": data(GroupSerializer, classroom.get_learner_groups()),\n \"exams\": exam_data,\n \"exam_learner_status\": data(ExamStatusSerializer, query_exam_logs),\n \"content\": data(ContentSerializer, query_content),\n \"content_learner_status\": content_status_serializer(lesson_data, learners_data, classroom),\n \"lessons\": lesson_data,\n }\n\n return Response(output)\n", "path": "kolibri/plugins/coach/class_summary_api.py" } ]
diff --git a/kolibri/plugins/coach/assets/src/modules/classSummary/__test__/sampleServerResponse.js b/kolibri/plugins/coach/assets/src/modules/classSummary/__test__/sampleServerResponse.js index 7758f81365c..d8487072b51 100644 --- a/kolibri/plugins/coach/assets/src/modules/classSummary/__test__/sampleServerResponse.js +++ b/kolibri/plugins/coach/assets/src/modules/classSummary/__test__/sampleServerResponse.js @@ -77,6 +77,8 @@ export default { { exercise_id: '2a722a9e57575148bc55deed7550ed62', question_id: '3' }, ], groups: ['c4625c3fef6b7d918e9417d92e482e6f'], + data_version_model: 1, + question_count: 3, }, { id: 'd7033a1cb888493763dc9b5f3ab2505b', @@ -87,6 +89,8 @@ export default { { exercise_id: 'eadec7f803994b6eb8f401237ec0f777', question_id: 'B' }, ], groups: ['8d2e8c66c05004657d676155dd0b305d'], + data_version_model: 1, + question_count: 2, }, { id: '4018bcea43cee3d05811b641fca0b152', @@ -98,6 +102,8 @@ export default { { exercise_id: '3a655a4b8adb5114a571dfd0c75cbc19', question_id: '12' }, ], groups: ['7c20f664b6a5c43d64b0cdd3161be513'], + data_version_model: 1, + question_count: 3, }, { id: '97316f077d470b45e912096edb534076', @@ -109,6 +115,8 @@ export default { { exercise_id: '9baf781e43b0514085cc205176b0ee71', question_id: 'z' }, ], groups: [], + data_version_model: 1, + question_count: 3, }, ], exam_learner_status: [ diff --git a/kolibri/plugins/coach/assets/src/modules/classSummary/__test__/sampleState.js b/kolibri/plugins/coach/assets/src/modules/classSummary/__test__/sampleState.js index fd3fbeae748..a8dab4003e8 100644 --- a/kolibri/plugins/coach/assets/src/modules/classSummary/__test__/sampleState.js +++ b/kolibri/plugins/coach/assets/src/modules/classSummary/__test__/sampleState.js @@ -153,6 +153,8 @@ export default { { exercise_id: '2a722a9e57575148bc55deed7550ed62', question_id: '3' }, ], groups: ['c4625c3fef6b7d918e9417d92e482e6f'], + data_version_model: 1, + question_count: 3, }, d7033a1cb888493763dc9b5f3ab2505b: { id: 'd7033a1cb888493763dc9b5f3ab2505b', @@ -163,6 +165,8 @@ export default { { exercise_id: 'eadec7f803994b6eb8f401237ec0f777', question_id: 'B' }, ], groups: ['8d2e8c66c05004657d676155dd0b305d'], + data_version_model: 1, + question_count: 2, }, '4018bcea43cee3d05811b641fca0b152': { id: '4018bcea43cee3d05811b641fca0b152', @@ -174,6 +178,8 @@ export default { { exercise_id: '3a655a4b8adb5114a571dfd0c75cbc19', question_id: '12' }, ], groups: ['7c20f664b6a5c43d64b0cdd3161be513'], + data_version_model: 1, + question_count: 3, }, '97316f077d470b45e912096edb534076': { id: '97316f077d470b45e912096edb534076', @@ -185,6 +191,8 @@ export default { { exercise_id: '9baf781e43b0514085cc205176b0ee71', question_id: 'z' }, ], groups: [], + data_version_model: 1, + question_count: 3, }, }, examLearnerStatusMap: { diff --git a/kolibri/plugins/coach/assets/src/modules/classSummary/index.js b/kolibri/plugins/coach/assets/src/modules/classSummary/index.js index af3ac95f501..ead5d87db6a 100644 --- a/kolibri/plugins/coach/assets/src/modules/classSummary/index.js +++ b/kolibri/plugins/coach/assets/src/modules/classSummary/index.js @@ -39,6 +39,7 @@ function defaultState() { * question_sources: [{exercise_id, question_id}, ...], * groups: [id, ...], * data_model_version, + * question_count, * } * } */ @@ -264,8 +265,7 @@ export default { if (status.num_correct === null) { status.score = null; } else { - status.score = - (1.0 * status.num_correct) / examMap[status.exam_id].question_sources.length; + status.score = (1.0 * status.num_correct) / examMap[status.exam_id].question_count; } }); summary.content_learner_status.forEach(status => { diff --git a/kolibri/plugins/coach/class_summary_api.py b/kolibri/plugins/coach/class_summary_api.py index eefe42e1969..73e6792ae18 100644 --- a/kolibri/plugins/coach/class_summary_api.py +++ b/kolibri/plugins/coach/class_summary_api.py @@ -188,7 +188,7 @@ class ExamSerializer(serializers.ModelSerializer): class Meta: model = Exam - fields = ("id", "title", "active", "question_sources", "groups", "data_model_version") + fields = ("id", "title", "active", "question_sources", "groups", "data_model_version", "question_count") class ContentSerializer(serializers.ModelSerializer):
ansible-collections__community.aws-1197
ec2_customer_gateway: bgp_asn is not required ### Summary The ec2_customer_gateway module has incorrect documentation for the bgp_asn parameter. It says the ASN must be passed when state=present, but the code defaults to 25000 if the parameter is absent. See the ensure_cgw_present() method: ``` def ensure_cgw_present(self, bgp_asn, ip_address): if not bgp_asn: bgp_asn = 65000 response = self.ec2.create_customer_gateway( DryRun=False, Type='ipsec.1', PublicIp=ip_address, BgpAsn=bgp_asn, ) return response ### Issue Type Documentation Report ### Component Name ec2_customer_gateway ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.12.4] config file = None configured module search path = ['/home/neil/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/neil/.local/share/virtualenvs/community.aws-uRL047Ho/lib/python3.10/site-packages/ansible ansible collection location = /home/neil/.ansible/collections:/usr/share/ansible/collections executable location = /home/neil/.local/share/virtualenvs/community.aws-uRL047Ho/bin/ansible python version = 3.10.1 (main, Jan 10 2022, 00:00:00) [GCC 11.2.1 20211203 (Red Hat 11.2.1-7)] jinja version = 3.1.1 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment main branch, as of 2022-04-18. ### Additional Information Suggested rewording: ``` options: bgp_asn: description: - Border Gateway Protocol (BGP) Autonomous System Number (ASN), defaults to 25000. type: int ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
[ { "content": "#!/usr/bin/python\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: ec2_customer_gateway\nversion_added: 1.0.0\nshort_description: Manage an AWS customer gateway\ndescription:\n - Manage an AWS customer gateway.\nauthor: Michael Baydoun (@MichaelBaydoun)\nnotes:\n - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the\n first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent\n requests do not create new customer gateway resources.\n - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use\n customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.\noptions:\n bgp_asn:\n description:\n - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present).\n type: int\n ip_address:\n description:\n - Internet-routable IP address for customers gateway, must be a static address.\n required: true\n type: str\n name:\n description:\n - Name of the customer gateway.\n required: true\n type: str\n routing:\n description:\n - The type of routing.\n choices: ['static', 'dynamic']\n default: dynamic\n type: str\n state:\n description:\n - Create or terminate the Customer Gateway.\n default: present\n choices: [ 'present', 'absent' ]\n type: str\nextends_documentation_fragment:\n- amazon.aws.aws\n- amazon.aws.ec2\n\n'''\n\nEXAMPLES = '''\n- name: Create Customer Gateway\n community.aws.ec2_customer_gateway:\n bgp_asn: 12345\n ip_address: 1.2.3.4\n name: IndianapolisOffice\n region: us-east-1\n register: cgw\n\n- name: Delete Customer Gateway\n community.aws.ec2_customer_gateway:\n ip_address: 1.2.3.4\n name: IndianapolisOffice\n state: absent\n region: us-east-1\n register: cgw\n'''\n\nRETURN = '''\ngateway.customer_gateways:\n description: details about the gateway that was created.\n returned: success\n type: complex\n contains:\n bgp_asn:\n description: The Border Gateway Autonomous System Number.\n returned: when exists and gateway is available.\n sample: 65123\n type: str\n customer_gateway_id:\n description: gateway id assigned by amazon.\n returned: when exists and gateway is available.\n sample: cgw-cb6386a2\n type: str\n ip_address:\n description: ip address of your gateway device.\n returned: when exists and gateway is available.\n sample: 1.2.3.4\n type: str\n state:\n description: state of gateway.\n returned: when gateway exists and is available.\n sample: available\n type: str\n tags:\n description: Any tags on the gateway.\n returned: when gateway exists and is available, and when tags exist.\n type: list\n type:\n description: encryption type.\n returned: when gateway exists and is available.\n sample: ipsec.1\n type: str\n'''\n\ntry:\n import botocore\nexcept ImportError:\n pass # Handled by AnsibleAWSModule\n\nfrom ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry\n\n\nclass Ec2CustomerGatewayManager:\n\n def __init__(self, module):\n self.module = module\n\n try:\n self.ec2 = module.client('ec2')\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg='Failed to connect to AWS')\n\n @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState'])\n def ensure_cgw_absent(self, gw_id):\n response = self.ec2.delete_customer_gateway(\n DryRun=False,\n CustomerGatewayId=gw_id\n )\n return response\n\n def ensure_cgw_present(self, bgp_asn, ip_address):\n if not bgp_asn:\n bgp_asn = 65000\n response = self.ec2.create_customer_gateway(\n DryRun=False,\n Type='ipsec.1',\n PublicIp=ip_address,\n BgpAsn=bgp_asn,\n )\n return response\n\n def tag_cgw_name(self, gw_id, name):\n response = self.ec2.create_tags(\n DryRun=False,\n Resources=[\n gw_id,\n ],\n Tags=[\n {\n 'Key': 'Name',\n 'Value': name\n },\n ]\n )\n return response\n\n def describe_gateways(self, ip_address):\n response = self.ec2.describe_customer_gateways(\n DryRun=False,\n Filters=[\n {\n 'Name': 'state',\n 'Values': [\n 'available',\n ]\n },\n {\n 'Name': 'ip-address',\n 'Values': [\n ip_address,\n ]\n }\n ]\n )\n return response\n\n\ndef main():\n argument_spec = dict(\n bgp_asn=dict(required=False, type='int'),\n ip_address=dict(required=True),\n name=dict(required=True),\n routing=dict(default='dynamic', choices=['dynamic', 'static']),\n state=dict(default='present', choices=['present', 'absent']),\n )\n\n module = AnsibleAWSModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n required_if=[\n ('routing', 'dynamic', ['bgp_asn'])\n ]\n )\n\n gw_mgr = Ec2CustomerGatewayManager(module)\n\n name = module.params.get('name')\n\n existing = gw_mgr.describe_gateways(module.params['ip_address'])\n\n results = dict(changed=False)\n if module.params['state'] == 'present':\n if existing['CustomerGateways']:\n existing['CustomerGateway'] = existing['CustomerGateways'][0]\n results['gateway'] = existing\n if existing['CustomerGateway']['Tags']:\n tag_array = existing['CustomerGateway']['Tags']\n for key, value in enumerate(tag_array):\n if value['Key'] == 'Name':\n current_name = value['Value']\n if current_name != name:\n results['name'] = gw_mgr.tag_cgw_name(\n results['gateway']['CustomerGateway']['CustomerGatewayId'],\n module.params['name'],\n )\n results['changed'] = True\n else:\n if not module.check_mode:\n results['gateway'] = gw_mgr.ensure_cgw_present(\n module.params['bgp_asn'],\n module.params['ip_address'],\n )\n results['name'] = gw_mgr.tag_cgw_name(\n results['gateway']['CustomerGateway']['CustomerGatewayId'],\n module.params['name'],\n )\n results['changed'] = True\n\n elif module.params['state'] == 'absent':\n if existing['CustomerGateways']:\n existing['CustomerGateway'] = existing['CustomerGateways'][0]\n results['gateway'] = existing\n if not module.check_mode:\n results['gateway'] = gw_mgr.ensure_cgw_absent(\n existing['CustomerGateway']['CustomerGatewayId']\n )\n results['changed'] = True\n\n pretty_results = camel_dict_to_snake_dict(results)\n module.exit_json(**pretty_results)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/ec2_customer_gateway.py" } ]
[ { "content": "#!/usr/bin/python\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: ec2_customer_gateway\nversion_added: 1.0.0\nshort_description: Manage an AWS customer gateway\ndescription:\n - Manage an AWS customer gateway.\nauthor: Michael Baydoun (@MichaelBaydoun)\nnotes:\n - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the\n first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent\n requests do not create new customer gateway resources.\n - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use\n customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.\noptions:\n bgp_asn:\n description:\n - Border Gateway Protocol (BGP) Autonomous System Number (ASN).\n - Defaults to C(65000) if not specified when I(state=present).\n type: int\n ip_address:\n description:\n - Internet-routable IP address for customers gateway, must be a static address.\n required: true\n type: str\n name:\n description:\n - Name of the customer gateway.\n required: true\n type: str\n routing:\n description:\n - The type of routing.\n choices: ['static', 'dynamic']\n default: dynamic\n type: str\n state:\n description:\n - Create or terminate the Customer Gateway.\n default: present\n choices: [ 'present', 'absent' ]\n type: str\nextends_documentation_fragment:\n- amazon.aws.aws\n- amazon.aws.ec2\n\n'''\n\nEXAMPLES = '''\n- name: Create Customer Gateway\n community.aws.ec2_customer_gateway:\n bgp_asn: 12345\n ip_address: 1.2.3.4\n name: IndianapolisOffice\n region: us-east-1\n register: cgw\n\n- name: Delete Customer Gateway\n community.aws.ec2_customer_gateway:\n ip_address: 1.2.3.4\n name: IndianapolisOffice\n state: absent\n region: us-east-1\n register: cgw\n'''\n\nRETURN = '''\ngateway.customer_gateways:\n description: details about the gateway that was created.\n returned: success\n type: complex\n contains:\n bgp_asn:\n description: The Border Gateway Autonomous System Number.\n returned: when exists and gateway is available.\n sample: 65123\n type: str\n customer_gateway_id:\n description: gateway id assigned by amazon.\n returned: when exists and gateway is available.\n sample: cgw-cb6386a2\n type: str\n ip_address:\n description: ip address of your gateway device.\n returned: when exists and gateway is available.\n sample: 1.2.3.4\n type: str\n state:\n description: state of gateway.\n returned: when gateway exists and is available.\n sample: available\n type: str\n tags:\n description: Any tags on the gateway.\n returned: when gateway exists and is available, and when tags exist.\n type: list\n type:\n description: encryption type.\n returned: when gateway exists and is available.\n sample: ipsec.1\n type: str\n'''\n\ntry:\n import botocore\nexcept ImportError:\n pass # Handled by AnsibleAWSModule\n\nfrom ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry\n\n\nclass Ec2CustomerGatewayManager:\n\n def __init__(self, module):\n self.module = module\n\n try:\n self.ec2 = module.client('ec2')\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg='Failed to connect to AWS')\n\n @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState'])\n def ensure_cgw_absent(self, gw_id):\n response = self.ec2.delete_customer_gateway(\n DryRun=False,\n CustomerGatewayId=gw_id\n )\n return response\n\n def ensure_cgw_present(self, bgp_asn, ip_address):\n if not bgp_asn:\n bgp_asn = 65000\n response = self.ec2.create_customer_gateway(\n DryRun=False,\n Type='ipsec.1',\n PublicIp=ip_address,\n BgpAsn=bgp_asn,\n )\n return response\n\n def tag_cgw_name(self, gw_id, name):\n response = self.ec2.create_tags(\n DryRun=False,\n Resources=[\n gw_id,\n ],\n Tags=[\n {\n 'Key': 'Name',\n 'Value': name\n },\n ]\n )\n return response\n\n def describe_gateways(self, ip_address):\n response = self.ec2.describe_customer_gateways(\n DryRun=False,\n Filters=[\n {\n 'Name': 'state',\n 'Values': [\n 'available',\n ]\n },\n {\n 'Name': 'ip-address',\n 'Values': [\n ip_address,\n ]\n }\n ]\n )\n return response\n\n\ndef main():\n argument_spec = dict(\n bgp_asn=dict(required=False, type='int'),\n ip_address=dict(required=True),\n name=dict(required=True),\n routing=dict(default='dynamic', choices=['dynamic', 'static']),\n state=dict(default='present', choices=['present', 'absent']),\n )\n\n module = AnsibleAWSModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n required_if=[\n ('routing', 'dynamic', ['bgp_asn'])\n ]\n )\n\n gw_mgr = Ec2CustomerGatewayManager(module)\n\n name = module.params.get('name')\n\n existing = gw_mgr.describe_gateways(module.params['ip_address'])\n\n results = dict(changed=False)\n if module.params['state'] == 'present':\n if existing['CustomerGateways']:\n existing['CustomerGateway'] = existing['CustomerGateways'][0]\n results['gateway'] = existing\n if existing['CustomerGateway']['Tags']:\n tag_array = existing['CustomerGateway']['Tags']\n for key, value in enumerate(tag_array):\n if value['Key'] == 'Name':\n current_name = value['Value']\n if current_name != name:\n results['name'] = gw_mgr.tag_cgw_name(\n results['gateway']['CustomerGateway']['CustomerGatewayId'],\n module.params['name'],\n )\n results['changed'] = True\n else:\n if not module.check_mode:\n results['gateway'] = gw_mgr.ensure_cgw_present(\n module.params['bgp_asn'],\n module.params['ip_address'],\n )\n results['name'] = gw_mgr.tag_cgw_name(\n results['gateway']['CustomerGateway']['CustomerGatewayId'],\n module.params['name'],\n )\n results['changed'] = True\n\n elif module.params['state'] == 'absent':\n if existing['CustomerGateways']:\n existing['CustomerGateway'] = existing['CustomerGateways'][0]\n results['gateway'] = existing\n if not module.check_mode:\n results['gateway'] = gw_mgr.ensure_cgw_absent(\n existing['CustomerGateway']['CustomerGatewayId']\n )\n results['changed'] = True\n\n pretty_results = camel_dict_to_snake_dict(results)\n module.exit_json(**pretty_results)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/ec2_customer_gateway.py" } ]
diff --git a/plugins/modules/ec2_customer_gateway.py b/plugins/modules/ec2_customer_gateway.py index 9c00783a58a..f07e92f4f7c 100644 --- a/plugins/modules/ec2_customer_gateway.py +++ b/plugins/modules/ec2_customer_gateway.py @@ -23,7 +23,8 @@ options: bgp_asn: description: - - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present). + - Border Gateway Protocol (BGP) Autonomous System Number (ASN). + - Defaults to C(65000) if not specified when I(state=present). type: int ip_address: description:
TabbycatDebate__tabbycat-1883
Should we set DEFAULT_AUTO_FIELD? Related to the Django 3.2 upgrade. Just thought this should be a conscious documented discussion rather than an informal one, since there seem to be (minor but nontrivial) consequences. https://docs.djangoproject.com/en/3.2/releases/3.2/#customizing-type-of-auto-created-primary-keys: > Maintaining the historical behavior, the default value for `DEFAULT_AUTO_FIELD` is `AutoField`. Starting with 3.2 new projects are generated with `DEFAULT_AUTO_FIELD` set to `BigAutoField`. Also, new apps are generated with `AppConfig.default_auto_field` set to `BigAutoField`. In a future Django release the default value of `DEFAULT_AUTO_FIELD` will be changed to `BigAutoField`. But migrations aren't seamless. https://docs.djangoproject.com/en/3.2/ref/settings/#std:setting-DEFAULT_AUTO_FIELD: > Unfortunately, the primary keys of existing auto-created through tables cannot currently be updated by the migrations framework. > > This means that if you switch the value of `DEFAULT_AUTO_FIELD` and then generate migrations, the primary keys of the related models will be updated, as will the foreign keys from the through table, but the primary key of the auto-created through table will not be migrated. To me the path of least resistance would be to set `DEFAULT_AUTO_FIELD` to `AutoField`, and kick the `BigAutoField` can down the road until (maybe) migrations work for it without manually added code, or until it becomes necessary. I can't imagine hitting 2 billion entries (what I presume the `AutoField` limit would be) in a table in a Tabbycat instance any time soon. But there's nothing prohibitive about `BigAutoField` migration, if others would prefer to get this change out of the way.
[ { "content": "import os\n\nfrom django.contrib.messages import constants as messages\nfrom django.utils.translation import gettext_lazy as _\n\n\nBASE_DIR = os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir)))\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# ==============================================================================\n# Overwritten in local.py or heroku.py\n# ==============================================================================\n\nADMINS = ('Philip and Chuan-Zheng', '[email protected]'),\nMANAGERS = ADMINS\nDEBUG = bool(int(os.environ['DEBUG'])) if 'DEBUG' in os.environ else False\nENABLE_DEBUG_TOOLBAR = False # Must default to false; overriden in Dev config\nDISABLE_SENTRY = True # Overriden in Heroku config\nSECRET_KEY = r'#2q43u&tp4((4&m3i8v%w-6z6pp7m(v0-6@w@i!j5n)n15epwc'\n\n# ==============================================================================\n# Version\n# ==============================================================================\n\nTABBYCAT_VERSION = '2.6.0-dev'\nTABBYCAT_CODENAME = 'Ocicat'\nREADTHEDOCS_VERSION = 'v2.6.0-dev'\n\n# ==============================================================================\n# Internationalization and Localization\n# ==============================================================================\n\nUSE_I18N = True\nUSE_TZ = True\nUSE_L10N = True\nLANGUAGE_CODE = 'en'\nTIME_ZONE = os.environ.get('TIME_ZONE', 'Australia/Melbourne')\n\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, 'locale'),\n]\n\n# Languages that should be available in the switcher\nEXTRA_LANG_INFO = {\n 'ms': {\n 'bidi': False,\n 'code': 'ms',\n 'name': 'Malay',\n 'name_local': 'Bahasa Melayu', #unicode codepoints here\n },\n 'tzl': {\n # Use code for Talossan; can't use proper reserved code...\n # Talossan is a constructed language, without native speakers,\n # so the odds of having a translation are low.\n 'code': 'tzl',\n 'name': 'Translation',\n 'name_local': 'Translation',\n },\n}\n\n# Add custom languages not provided by Django\nimport django.conf.locale\nLANG_INFO = dict(django.conf.locale.LANG_INFO, **EXTRA_LANG_INFO)\ndjango.conf.locale.LANG_INFO = LANG_INFO\n\nLANGUAGES = [\n ('ar', _('Arabic')),\n ('bn', _('Bengali')),\n ('en', _('English')),\n ('es', _('Spanish')),\n ('fr', _('French')),\n ('id', _('Indonesian')),\n ('ja', _('Japanese')),\n ('ms', _('Malay')),\n ('pt', _('Portuguese')),\n ('ru', _('Russian')),\n ('zh-hans', _('Simplified Chinese')),\n ('tzl', _('Translation')),\n]\n\nSTATICI18N_ROOT = os.path.join(BASE_DIR, \"locale\")\n\nFORMAT_MODULE_PATH = [\n 'utils.formats',\n]\n\n# ==============================================================================\n# Django-specific Modules\n# ==============================================================================\n\nMIDDLEWARE = [\n 'django.middleware.gzip.GZipMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n # User language preferences; must be after Session\n 'django.middleware.locale.LocaleMiddleware',\n # Set Etags; i.e. cached requests not on network; must precede Common\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.http.ConditionalGetMiddleware',\n 'django.middleware.common.CommonMiddleware',\n # Must be after SessionMiddleware\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'utils.middleware.DebateMiddleware',\n]\n\nTABBYCAT_APPS = (\n 'actionlog',\n 'adjallocation',\n 'adjfeedback',\n 'api',\n 'availability',\n 'breakqual',\n 'checkins',\n 'divisions', # obsolete\n 'draw',\n 'motions',\n 'options',\n 'participants',\n 'printing',\n 'privateurls',\n 'results',\n 'tournaments',\n 'venues',\n 'utils',\n 'users',\n 'standings',\n 'notifications',\n 'importer',\n)\n\nINSTALLED_APPS = (\n 'jet',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'channels', # For Websockets / real-time connections (above whitenoise)\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_summernote', # Keep above our apps; as we unregister an admin model\n 'django.contrib.messages') \\\n + TABBYCAT_APPS + (\n 'dynamic_preferences',\n 'django_extensions', # For Secret Generation Command\n 'gfklookupwidget',\n 'formtools',\n 'statici18n', # Compile js translations as static file; saving requests\n 'polymorphic',\n 'corsheaders',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'django_better_admin_arrayfield',\n)\n\nROOT_URLCONF = 'urls'\nLOGIN_REDIRECT_URL = '/'\nLOGOUT_REDIRECT_URL = '/'\nFIXTURE_DIRS = (os.path.join(os.path.dirname(BASE_DIR), 'data', 'fixtures'), )\nSILENCED_SYSTEM_CHECKS = ('urls.W002',)\n\n# ==============================================================================\n# Templates\n# ==============================================================================\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.template.context_processors.request', # for Jet\n 'utils.context_processors.debate_context', # for tournament config vars\n 'django.template.context_processors.i18n', # for serving static language translations\n 'dynamic_preferences.processors.global_preferences',\n ],\n 'loaders': [\n ('django.template.loaders.cached.Loader', [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n ]),\n ],\n }\n },\n]\n\n# ==============================================================================\n# Caching\n# ==============================================================================\n\nPUBLIC_FAST_CACHE_TIMEOUT = int(os.environ.get('PUBLIC_FAST_CACHE_TIMEOUT', 60 * 1))\nPUBLIC_SLOW_CACHE_TIMEOUT = int(os.environ.get('PUBLIC_SLOW_CACHE_TIMEOUT', 60 * 3.5))\nTAB_PAGES_CACHE_TIMEOUT = int(os.environ.get('TAB_PAGES_CACHE_TIMEOUT', 60 * 120))\n\n# Default non-heroku cache is to use local memory\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n },\n}\n\nSESSION_ENGINE = 'django.contrib.sessions.backends.cache'\n\n# ==============================================================================\n# Static Files and Compilation\n# ==============================================================================\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'), )\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'\n\n# ==============================================================================\n# Logging\n# ==============================================================================\n\nMESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'sentry.errors': {\n 'level': 'INFO',\n 'handlers': ['console'],\n 'propagate': False,\n },\n },\n 'formatters': {\n 'standard': {\n 'format': '[%(asctime)s] %(levelname)s %(name)s: %(message)s',\n },\n },\n}\n\nfor app in TABBYCAT_APPS:\n LOGGING['loggers'][app] = {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n }\n\n# ==============================================================================\n# Messages\n# ==============================================================================\n\nMESSAGE_TAGS = {messages.ERROR: 'danger', }\n\n# ==============================================================================\n# Summernote (WYSWIG)\n# ==============================================================================\n\nSUMMERNOTE_THEME = 'bs4' # Bootstrap 4\n\nSUMMERNOTE_CONFIG = {\n 'width': '100%',\n 'height': '480',\n 'toolbar': [\n ['style', ['bold', 'italic', 'underline', 'fontsize', 'color', 'clear']],\n ['para', ['ul', 'ol']],\n ['insert', ['link', 'picture']],\n ['misc', ['undo', 'redo', 'codeview']],\n ],\n 'disable_upload': True,\n 'iframe': True, # Necessary; if just to compartmentalise jQuery dependency,\n}\n\nX_FRAME_OPTIONS = 'SAMEORIGIN' # Necessary to get Django-Summernote working because of Django 3 changes\n\n# ==============================================================================\n# Database\n# ==============================================================================\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n },\n}\n\n# ==============================================================================\n# Channels\n# ==============================================================================\n\nASGI_APPLICATION = \"routing.application\"\n\nCHANNEL_LAYERS = {\n \"default\": {\n \"BACKEND\": \"channels.layers.InMemoryChannelLayer\",\n },\n}\n\n# ==============================================================================\n# Dynamic preferences\n# ==============================================================================\n\nDYNAMIC_PREFERENCES = {\n 'REGISTRY_MODULE': 'preferences',\n}\n\n# ==============================================================================\n# REST Framework\n# ==============================================================================\n\nREST_FRAMEWORK = {\n 'DEFAULT_RENDERER_CLASSES': [\n 'rest_framework.renderers.JSONRenderer',\n ],\n 'DEFAULT_PARSER_CLASSES': [\n 'rest_framework.parsers.JSONParser',\n ],\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',\n}\n\n# ----------------------------------------\n# CORS-related settings for REST framework\n# ----------------------------------------\n\nCORS_ALLOW_ALL_ORIGINS = True\nCORS_URLS_REGEX = r'^/api(/.*)?$'\n", "path": "tabbycat/settings/core.py" } ]
[ { "content": "import os\n\nfrom django.contrib.messages import constants as messages\nfrom django.utils.translation import gettext_lazy as _\n\n\nBASE_DIR = os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir)))\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# ==============================================================================\n# Overwritten in local.py or heroku.py\n# ==============================================================================\n\nADMINS = ('Philip and Chuan-Zheng', '[email protected]'),\nMANAGERS = ADMINS\nDEBUG = bool(int(os.environ['DEBUG'])) if 'DEBUG' in os.environ else False\nENABLE_DEBUG_TOOLBAR = False # Must default to false; overriden in Dev config\nDISABLE_SENTRY = True # Overriden in Heroku config\nSECRET_KEY = r'#2q43u&tp4((4&m3i8v%w-6z6pp7m(v0-6@w@i!j5n)n15epwc'\n\n# ==============================================================================\n# Version\n# ==============================================================================\n\nTABBYCAT_VERSION = '2.6.0-dev'\nTABBYCAT_CODENAME = 'Ocicat'\nREADTHEDOCS_VERSION = 'v2.6.0-dev'\n\n# ==============================================================================\n# Internationalization and Localization\n# ==============================================================================\n\nUSE_I18N = True\nUSE_TZ = True\nUSE_L10N = True\nLANGUAGE_CODE = 'en'\nTIME_ZONE = os.environ.get('TIME_ZONE', 'Australia/Melbourne')\n\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, 'locale'),\n]\n\n# Languages that should be available in the switcher\nEXTRA_LANG_INFO = {\n 'ms': {\n 'bidi': False,\n 'code': 'ms',\n 'name': 'Malay',\n 'name_local': 'Bahasa Melayu', #unicode codepoints here\n },\n 'tzl': {\n # Use code for Talossan; can't use proper reserved code...\n # Talossan is a constructed language, without native speakers,\n # so the odds of having a translation are low.\n 'code': 'tzl',\n 'name': 'Translation',\n 'name_local': 'Translation',\n },\n}\n\n# Add custom languages not provided by Django\nimport django.conf.locale\nLANG_INFO = dict(django.conf.locale.LANG_INFO, **EXTRA_LANG_INFO)\ndjango.conf.locale.LANG_INFO = LANG_INFO\n\nLANGUAGES = [\n ('ar', _('Arabic')),\n ('bn', _('Bengali')),\n ('en', _('English')),\n ('es', _('Spanish')),\n ('fr', _('French')),\n ('id', _('Indonesian')),\n ('ja', _('Japanese')),\n ('ms', _('Malay')),\n ('pt', _('Portuguese')),\n ('ru', _('Russian')),\n ('zh-hans', _('Simplified Chinese')),\n ('tzl', _('Translation')),\n]\n\nSTATICI18N_ROOT = os.path.join(BASE_DIR, \"locale\")\n\nFORMAT_MODULE_PATH = [\n 'utils.formats',\n]\n\n# ==============================================================================\n# Django-specific Modules\n# ==============================================================================\n\nMIDDLEWARE = [\n 'django.middleware.gzip.GZipMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n # User language preferences; must be after Session\n 'django.middleware.locale.LocaleMiddleware',\n # Set Etags; i.e. cached requests not on network; must precede Common\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.http.ConditionalGetMiddleware',\n 'django.middleware.common.CommonMiddleware',\n # Must be after SessionMiddleware\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'utils.middleware.DebateMiddleware',\n]\n\nTABBYCAT_APPS = (\n 'actionlog',\n 'adjallocation',\n 'adjfeedback',\n 'api',\n 'availability',\n 'breakqual',\n 'checkins',\n 'divisions', # obsolete\n 'draw',\n 'motions',\n 'options',\n 'participants',\n 'printing',\n 'privateurls',\n 'results',\n 'tournaments',\n 'venues',\n 'utils',\n 'users',\n 'standings',\n 'notifications',\n 'importer',\n)\n\nINSTALLED_APPS = (\n 'jet',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'channels', # For Websockets / real-time connections (above whitenoise)\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_summernote', # Keep above our apps; as we unregister an admin model\n 'django.contrib.messages') \\\n + TABBYCAT_APPS + (\n 'dynamic_preferences',\n 'django_extensions', # For Secret Generation Command\n 'gfklookupwidget',\n 'formtools',\n 'statici18n', # Compile js translations as static file; saving requests\n 'polymorphic',\n 'corsheaders',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'django_better_admin_arrayfield',\n)\n\nROOT_URLCONF = 'urls'\nLOGIN_REDIRECT_URL = '/'\nLOGOUT_REDIRECT_URL = '/'\nFIXTURE_DIRS = (os.path.join(os.path.dirname(BASE_DIR), 'data', 'fixtures'), )\nSILENCED_SYSTEM_CHECKS = ('urls.W002',)\n\n# ==============================================================================\n# Templates\n# ==============================================================================\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.template.context_processors.request', # for Jet\n 'utils.context_processors.debate_context', # for tournament config vars\n 'django.template.context_processors.i18n', # for serving static language translations\n 'dynamic_preferences.processors.global_preferences',\n ],\n 'loaders': [\n ('django.template.loaders.cached.Loader', [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n ]),\n ],\n }\n },\n]\n\n# ==============================================================================\n# Caching\n# ==============================================================================\n\nPUBLIC_FAST_CACHE_TIMEOUT = int(os.environ.get('PUBLIC_FAST_CACHE_TIMEOUT', 60 * 1))\nPUBLIC_SLOW_CACHE_TIMEOUT = int(os.environ.get('PUBLIC_SLOW_CACHE_TIMEOUT', 60 * 3.5))\nTAB_PAGES_CACHE_TIMEOUT = int(os.environ.get('TAB_PAGES_CACHE_TIMEOUT', 60 * 120))\n\n# Default non-heroku cache is to use local memory\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n },\n}\n\nSESSION_ENGINE = 'django.contrib.sessions.backends.cache'\n\n# ==============================================================================\n# Static Files and Compilation\n# ==============================================================================\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'), )\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'\n\n# ==============================================================================\n# Logging\n# ==============================================================================\n\nMESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'sentry.errors': {\n 'level': 'INFO',\n 'handlers': ['console'],\n 'propagate': False,\n },\n },\n 'formatters': {\n 'standard': {\n 'format': '[%(asctime)s] %(levelname)s %(name)s: %(message)s',\n },\n },\n}\n\nfor app in TABBYCAT_APPS:\n LOGGING['loggers'][app] = {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n }\n\n# ==============================================================================\n# Messages\n# ==============================================================================\n\nMESSAGE_TAGS = {messages.ERROR: 'danger', }\n\n# ==============================================================================\n# Summernote (WYSWIG)\n# ==============================================================================\n\nSUMMERNOTE_THEME = 'bs4' # Bootstrap 4\n\nSUMMERNOTE_CONFIG = {\n 'width': '100%',\n 'height': '480',\n 'toolbar': [\n ['style', ['bold', 'italic', 'underline', 'fontsize', 'color', 'clear']],\n ['para', ['ul', 'ol']],\n ['insert', ['link', 'picture']],\n ['misc', ['undo', 'redo', 'codeview']],\n ],\n 'disable_upload': True,\n 'iframe': True, # Necessary; if just to compartmentalise jQuery dependency,\n}\n\nX_FRAME_OPTIONS = 'SAMEORIGIN' # Necessary to get Django-Summernote working because of Django 3 changes\n\n# ==============================================================================\n# Database\n# ==============================================================================\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n },\n}\n\nDEFAULT_AUTO_FIELD = 'django.db.models.AutoField'\n\n# ==============================================================================\n# Channels\n# ==============================================================================\n\nASGI_APPLICATION = \"routing.application\"\n\nCHANNEL_LAYERS = {\n \"default\": {\n \"BACKEND\": \"channels.layers.InMemoryChannelLayer\",\n },\n}\n\n# ==============================================================================\n# Dynamic preferences\n# ==============================================================================\n\nDYNAMIC_PREFERENCES = {\n 'REGISTRY_MODULE': 'preferences',\n}\n\n# ==============================================================================\n# REST Framework\n# ==============================================================================\n\nREST_FRAMEWORK = {\n 'DEFAULT_RENDERER_CLASSES': [\n 'rest_framework.renderers.JSONRenderer',\n ],\n 'DEFAULT_PARSER_CLASSES': [\n 'rest_framework.parsers.JSONParser',\n ],\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',\n}\n\n# ----------------------------------------\n# CORS-related settings for REST framework\n# ----------------------------------------\n\nCORS_ALLOW_ALL_ORIGINS = True\nCORS_URLS_REGEX = r'^/api(/.*)?$'\n", "path": "tabbycat/settings/core.py" } ]
diff --git a/tabbycat/settings/core.py b/tabbycat/settings/core.py index fcee78da7da..34bbc91b105 100644 --- a/tabbycat/settings/core.py +++ b/tabbycat/settings/core.py @@ -301,6 +301,8 @@ }, } +DEFAULT_AUTO_FIELD = 'django.db.models.AutoField' + # ============================================================================== # Channels # ==============================================================================