in_source_id
stringlengths
13
58
issue
stringlengths
3
241k
before_files
listlengths
0
3
after_files
listlengths
0
3
pr_diff
stringlengths
109
107M
django-oscar__django-oscar-1235
Misprint in commit f56226a oscar/apps/dashboard/catalogue/forms.py have a misprint after commit f56226aa2f0e18538a1095a558c76312166bb11a in line 382: ``` python class StockAlertSearchForm(forms.Form): tatus = forms.CharField(label=_('Status')) ``` tatus -> status.
[ { "content": "from django import forms\nfrom django.core.exceptions import ValidationError, MultipleObjectsReturned\nfrom django.forms.models import inlineformset_factory\nfrom django.utils.translation import ugettext_lazy as _\nfrom treebeard.forms import MoveNodeForm, movenodeform_factory\n\nfrom oscar.core.utils import slugify\nfrom oscar.core.loading import get_class, get_model\nfrom oscar.forms.widgets import ImageInput\n\nProduct = get_model('catalogue', 'Product')\nProductClass = get_model('catalogue', 'ProductClass')\nCategory = get_model('catalogue', 'Category')\nStockRecord = get_model('partner', 'StockRecord')\nPartner = get_model('partner', 'Partner')\nProductAttributeValue = get_model('catalogue', 'ProductAttributeValue')\nProductCategory = get_model('catalogue', 'ProductCategory')\nProductImage = get_model('catalogue', 'ProductImage')\nProductRecommendation = get_model('catalogue', 'ProductRecommendation')\nProductSelect = get_class('dashboard.catalogue.widgets', 'ProductSelect')\nProductSelectMultiple = get_class('dashboard.catalogue.widgets',\n 'ProductSelectMultiple')\n\n\nclass BaseCategoryForm(MoveNodeForm):\n\n def clean(self):\n cleaned_data = super(BaseCategoryForm, self).clean()\n\n name = cleaned_data.get('name')\n ref_node_pk = cleaned_data.get('_ref_node_id')\n pos = cleaned_data.get('_position')\n\n if name and self.is_slug_conflicting(name, ref_node_pk, pos):\n raise forms.ValidationError(\n _('Category with the given path already exists.'))\n return cleaned_data\n\n def is_slug_conflicting(self, name, ref_node_pk, position):\n # determine parent\n if ref_node_pk:\n ref_category = Category.objects.get(pk=ref_node_pk)\n if position == 'first-child':\n parent = ref_category\n else:\n parent = ref_category.get_parent()\n else:\n parent = None\n\n # build full slug\n slug_prefix = ''\n if parent:\n slug_prefix = (parent.slug + Category._slug_separator)\n slug = '%s%s' % (slug_prefix, slugify(name))\n\n # check if slug is conflicting\n try:\n category = Category.objects.get(slug=slug)\n except Category.DoesNotExist:\n pass\n else:\n if category.pk != self.instance.pk:\n return True\n return False\n\nCategoryForm = movenodeform_factory(Category, form=BaseCategoryForm)\n\n\nclass ProductClassSelectForm(forms.Form):\n \"\"\"\n Form which is used before creating a product to select it's product class\n \"\"\"\n\n product_class = forms.ModelChoiceField(\n label=_(\"Create a new product of type\"),\n empty_label=_(\"-- Choose type --\"),\n queryset=ProductClass.objects.all())\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n If there's only one product class, pre-select it\n \"\"\"\n super(ProductClassSelectForm, self).__init__(*args, **kwargs)\n qs = self.fields['product_class'].queryset\n if not kwargs.get('initial') and len(qs) == 1:\n self.fields['product_class'].initial = qs[0]\n\n\nclass ProductSearchForm(forms.Form):\n upc = forms.CharField(max_length=16, required=False, label=_('UPC'))\n title = forms.CharField(max_length=255, required=False, label=_('Title'))\n\n def clean(self):\n cleaned_data = super(ProductSearchForm, self).clean()\n cleaned_data['upc'] = cleaned_data['upc'].strip()\n cleaned_data['title'] = cleaned_data['title'].strip()\n return cleaned_data\n\n\nclass StockRecordForm(forms.ModelForm):\n\n def __init__(self, product_class, user, *args, **kwargs):\n # The user kwarg is not used by stock StockRecordForm. We pass it\n # anyway in case one wishes to customise the partner queryset\n self.user = user\n super(StockRecordForm, self).__init__(*args, **kwargs)\n\n # If not tracking stock, we hide the fields\n if not product_class.track_stock:\n del self.fields['num_in_stock']\n del self.fields['low_stock_threshold']\n else:\n self.fields['price_excl_tax'].required = True\n self.fields['num_in_stock'].required = True\n\n class Meta:\n model = StockRecord\n exclude = ('product', 'num_allocated')\n\n\nBaseStockRecordFormSet = inlineformset_factory(\n Product, StockRecord, form=StockRecordForm, extra=1)\n\n\nclass StockRecordFormSet(BaseStockRecordFormSet):\n\n def __init__(self, product_class, user, *args, **kwargs):\n self.user = user\n self.require_user_stockrecord = not user.is_staff\n self.product_class = product_class\n super(StockRecordFormSet, self).__init__(*args, **kwargs)\n self.set_initial_data()\n\n def set_initial_data(self):\n \"\"\"\n If user has only one partner associated, set the first\n stock record's partner to it. Can't pre-select for staff users as\n they're allowed to save a product without a stock record.\n\n This is intentionally done after calling __init__ as passing initial\n data to __init__ creates a form for each list item. So depending on\n whether we can pre-select the partner or not, we'd end up with 1 or 2\n forms for an unbound form.\n \"\"\"\n if self.require_user_stockrecord:\n try:\n user_partner = self.user.partners.get()\n except (Partner.DoesNotExist, MultipleObjectsReturned):\n pass\n else:\n partner_field = self.forms[0].fields.get('partner', None)\n if partner_field and partner_field.initial is None:\n partner_field.initial = user_partner\n\n def _construct_form(self, i, **kwargs):\n kwargs['product_class'] = self.product_class\n kwargs['user'] = self.user\n return super(StockRecordFormSet, self)._construct_form(\n i, **kwargs)\n\n def clean(self):\n \"\"\"\n If the user isn't a staff user, this validation ensures that at least\n one stock record's partner is associated with a users partners.\n \"\"\"\n if any(self.errors):\n return\n if self.require_user_stockrecord:\n stockrecord_partners = set([form.cleaned_data.get('partner', None)\n for form in self.forms])\n user_partners = set(self.user.partners.all())\n if not user_partners & stockrecord_partners:\n raise ValidationError(_(\"At least one stock record must be set\"\n \" to a partner that you're associated\"\n \" with.\"))\n\n\ndef _attr_text_field(attribute):\n return forms.CharField(label=attribute.name,\n required=attribute.required)\n\n\ndef _attr_textarea_field(attribute):\n return forms.CharField(label=attribute.name,\n widget=forms.Textarea(),\n required=attribute.required)\n\n\ndef _attr_integer_field(attribute):\n return forms.IntegerField(label=attribute.name,\n required=attribute.required)\n\n\ndef _attr_boolean_field(attribute):\n return forms.BooleanField(label=attribute.name,\n required=attribute.required)\n\n\ndef _attr_float_field(attribute):\n return forms.FloatField(label=attribute.name,\n required=attribute.required)\n\n\ndef _attr_date_field(attribute):\n return forms.DateField(label=attribute.name,\n required=attribute.required,\n widget=forms.widgets.DateInput)\n\n\ndef _attr_option_field(attribute):\n return forms.ModelChoiceField(\n label=attribute.name,\n required=attribute.required,\n queryset=attribute.option_group.options.all())\n\n\ndef _attr_multi_option_field(attribute):\n return forms.ModelMultipleChoiceField(\n label=attribute.name,\n required=attribute.required,\n queryset=attribute.option_group.options.all())\n\n\ndef _attr_entity_field(attribute):\n return forms.ModelChoiceField(\n label=attribute.name,\n required=attribute.required,\n queryset=attribute.entity_type.entities.all())\n\n\ndef _attr_numeric_field(attribute):\n return forms.FloatField(label=attribute.name,\n required=attribute.required)\n\n\ndef _attr_file_field(attribute):\n return forms.FileField(\n label=attribute.name, required=attribute.required)\n\n\ndef _attr_image_field(attribute):\n return forms.ImageField(\n label=attribute.name, required=attribute.required)\n\n\nclass ProductForm(forms.ModelForm):\n\n # We need a special field to distinguish between group and standalone\n # products. It's impossible to tell when the product is first created.\n # This is quite clunky but will be replaced when #693 is complete.\n is_group = forms.BooleanField(\n label=_(\"Is group product?\"),\n required=False,\n help_text=_(\n \"Check this if this product is a group/parent product \"\n \"that has variants (eg different sizes/colours available)\"))\n\n FIELD_FACTORIES = {\n \"text\": _attr_text_field,\n \"richtext\": _attr_textarea_field,\n \"integer\": _attr_integer_field,\n \"boolean\": _attr_boolean_field,\n \"float\": _attr_float_field,\n \"date\": _attr_date_field,\n \"option\": _attr_option_field,\n \"multi_option\": _attr_multi_option_field,\n \"entity\": _attr_entity_field,\n \"numeric\": _attr_numeric_field,\n \"file\": _attr_file_field,\n \"image\": _attr_image_field,\n }\n\n class Meta:\n model = Product\n exclude = ('slug', 'score', 'product_class',\n 'recommended_products', 'product_options',\n 'attributes', 'categories')\n widgets = {\n 'parent': ProductSelect,\n 'related_products': ProductSelectMultiple,\n }\n\n def __init__(self, product_class, data=None, *args, **kwargs):\n self.product_class = product_class\n self.set_initial_attribute_values(kwargs)\n super(ProductForm, self).__init__(data, *args, **kwargs)\n\n # Set the initial value of the is_group field. This isn't watertight:\n # if the product is intended to be a parent product but doesn't have\n # any variants then we can't distinguish it from a standalone product\n # and this checkbox won't have the right value. This will be addressed\n # in #693\n instance = kwargs.get('instance', None)\n if instance:\n self.fields['is_group'].initial = instance.is_group\n\n # This is quite nasty. We use the raw posted data to determine if the\n # product is a group product, as this changes the validation rules we\n # want to apply.\n is_parent = data and data.get('is_group', '') == 'on'\n self.add_attribute_fields(is_parent)\n\n related_products = self.fields.get('related_products', None)\n parent = self.fields.get('parent', None)\n\n if parent is not None:\n parent.queryset = self.get_parent_products_queryset()\n if related_products is not None:\n related_products.queryset = self.get_related_products_queryset()\n if 'title' in self.fields:\n self.fields['title'].widget = forms.TextInput(\n attrs={'autocomplete': 'off'})\n\n def set_initial_attribute_values(self, kwargs):\n if kwargs.get('instance', None) is None:\n return\n if 'initial' not in kwargs:\n kwargs['initial'] = {}\n for attribute in self.product_class.attributes.all():\n try:\n value = kwargs['instance'].attribute_values.get(\n attribute=attribute).value\n except ProductAttributeValue.DoesNotExist:\n pass\n else:\n kwargs['initial']['attr_%s' % attribute.code] = value\n\n def add_attribute_fields(self, is_parent=False):\n for attribute in self.product_class.attributes.all():\n self.fields['attr_%s' % attribute.code] \\\n = self.get_attribute_field(attribute)\n # Attributes are not required for a parent product\n if is_parent:\n self.fields['attr_%s' % attribute.code].required = False\n\n def get_attribute_field(self, attribute):\n return self.FIELD_FACTORIES[attribute.type](attribute)\n\n def get_related_products_queryset(self):\n return Product.browsable.order_by('title')\n\n def get_parent_products_queryset(self):\n \"\"\"\n :return: Canonical products excluding this product\n \"\"\"\n # Not using Product.browsable because a deployment might override\n # that manager to respect a status field or such like\n queryset = Product._default_manager.filter(parent=None)\n if self.instance.pk is not None:\n # Prevent selecting itself as parent\n queryset = queryset.exclude(pk=self.instance.pk)\n return queryset\n\n def save(self):\n object = super(ProductForm, self).save(commit=False)\n object.product_class = self.product_class\n for attribute in self.product_class.attributes.all():\n value = self.cleaned_data['attr_%s' % attribute.code]\n setattr(object.attr, attribute.code, value)\n\n if self.cleaned_data['is_group']:\n # Don't validate attributes for parent products\n object.save(validate_attributes=False)\n else:\n object.save()\n self.save_m2m()\n return object\n\n def clean(self):\n data = self.cleaned_data\n if 'parent' not in data and not data['title']:\n raise forms.ValidationError(_(\"This field is required\"))\n elif 'parent' in data and data['parent'] is None and not data['title']:\n raise forms.ValidationError(_(\"Parent products must have a title\"))\n # Calling the clean() method of BaseForm here is required to apply\n # checks for 'unique' field. This prevents e.g. the UPC field from\n # raising a DatabaseError.\n return super(ProductForm, self).clean()\n\n\nclass StockAlertSearchForm(forms.Form):\n tatus = forms.CharField(label=_('Status'))\n\n\nclass ProductCategoryForm(forms.ModelForm):\n\n class Meta:\n model = ProductCategory\n\n\nBaseProductCategoryFormSet = inlineformset_factory(\n Product, ProductCategory, form=ProductCategoryForm,\n fields=('category',), extra=1, can_delete=False)\n\n\nclass ProductCategoryFormSet(BaseProductCategoryFormSet):\n\n def __init__(self, product_class, user, *args, **kwargs):\n super(ProductCategoryFormSet, self).__init__(*args, **kwargs)\n\n def clean(self):\n if self.instance.is_top_level and self.get_num_categories() == 0:\n raise forms.ValidationError(\n _(\"A top-level product must have at least one category\"))\n if self.instance.is_variant and self.get_num_categories() > 0:\n raise forms.ValidationError(\n _(\"A variant product should not have categories\"))\n\n def get_num_categories(self):\n num_categories = 0\n for i in range(0, self.total_form_count()):\n form = self.forms[i]\n if (hasattr(form, 'cleaned_data')\n and form.cleaned_data.get('category', None)\n and not form.cleaned_data.get('DELETE', False)):\n num_categories += 1\n return num_categories\n\n\nclass ProductImageForm(forms.ModelForm):\n class Meta:\n model = ProductImage\n exclude = ('display_order',)\n # use ImageInput widget to create HTML displaying the\n # actual uploaded image and providing the upload dialog\n # when clicking on the actual image.\n widgets = {\n 'original': ImageInput(),\n }\n\n def save(self, *args, **kwargs):\n # We infer the display order of the image based on the order of the\n # image fields within the formset.\n kwargs['commit'] = False\n obj = super(ProductImageForm, self).save(*args, **kwargs)\n obj.display_order = self.get_display_order()\n obj.save()\n return obj\n\n def get_display_order(self):\n return self.prefix.split('-').pop()\n\n\nBaseProductImageFormSet = inlineformset_factory(\n Product, ProductImage, form=ProductImageForm, extra=2)\n\n\nclass ProductImageFormSet(BaseProductImageFormSet):\n def __init__(self, product_class, user, *args, **kwargs):\n super(ProductImageFormSet, self).__init__(*args, **kwargs)\n\n\nclass ProductRecommendationForm(forms.ModelForm):\n class Meta:\n model = ProductRecommendation\n widgets = {\n 'recommendation': ProductSelect,\n }\n\n\nBaseProductRecommendationFormSet = inlineformset_factory(\n Product, ProductRecommendation, form=ProductRecommendationForm,\n extra=5, fk_name=\"primary\")\n\n\nclass ProductRecommendationFormSet(BaseProductRecommendationFormSet):\n def __init__(self, product_class, user, *args, **kwargs):\n super(ProductRecommendationFormSet, self).__init__(*args, **kwargs)\n", "path": "oscar/apps/dashboard/catalogue/forms.py" } ]
[ { "content": "from django import forms\nfrom django.core.exceptions import ValidationError, MultipleObjectsReturned\nfrom django.forms.models import inlineformset_factory\nfrom django.utils.translation import ugettext_lazy as _\nfrom treebeard.forms import MoveNodeForm, movenodeform_factory\n\nfrom oscar.core.utils import slugify\nfrom oscar.core.loading import get_class, get_model\nfrom oscar.forms.widgets import ImageInput\n\nProduct = get_model('catalogue', 'Product')\nProductClass = get_model('catalogue', 'ProductClass')\nCategory = get_model('catalogue', 'Category')\nStockRecord = get_model('partner', 'StockRecord')\nPartner = get_model('partner', 'Partner')\nProductAttributeValue = get_model('catalogue', 'ProductAttributeValue')\nProductCategory = get_model('catalogue', 'ProductCategory')\nProductImage = get_model('catalogue', 'ProductImage')\nProductRecommendation = get_model('catalogue', 'ProductRecommendation')\nProductSelect = get_class('dashboard.catalogue.widgets', 'ProductSelect')\nProductSelectMultiple = get_class('dashboard.catalogue.widgets',\n 'ProductSelectMultiple')\n\n\nclass BaseCategoryForm(MoveNodeForm):\n\n def clean(self):\n cleaned_data = super(BaseCategoryForm, self).clean()\n\n name = cleaned_data.get('name')\n ref_node_pk = cleaned_data.get('_ref_node_id')\n pos = cleaned_data.get('_position')\n\n if name and self.is_slug_conflicting(name, ref_node_pk, pos):\n raise forms.ValidationError(\n _('Category with the given path already exists.'))\n return cleaned_data\n\n def is_slug_conflicting(self, name, ref_node_pk, position):\n # determine parent\n if ref_node_pk:\n ref_category = Category.objects.get(pk=ref_node_pk)\n if position == 'first-child':\n parent = ref_category\n else:\n parent = ref_category.get_parent()\n else:\n parent = None\n\n # build full slug\n slug_prefix = ''\n if parent:\n slug_prefix = (parent.slug + Category._slug_separator)\n slug = '%s%s' % (slug_prefix, slugify(name))\n\n # check if slug is conflicting\n try:\n category = Category.objects.get(slug=slug)\n except Category.DoesNotExist:\n pass\n else:\n if category.pk != self.instance.pk:\n return True\n return False\n\nCategoryForm = movenodeform_factory(Category, form=BaseCategoryForm)\n\n\nclass ProductClassSelectForm(forms.Form):\n \"\"\"\n Form which is used before creating a product to select it's product class\n \"\"\"\n\n product_class = forms.ModelChoiceField(\n label=_(\"Create a new product of type\"),\n empty_label=_(\"-- Choose type --\"),\n queryset=ProductClass.objects.all())\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n If there's only one product class, pre-select it\n \"\"\"\n super(ProductClassSelectForm, self).__init__(*args, **kwargs)\n qs = self.fields['product_class'].queryset\n if not kwargs.get('initial') and len(qs) == 1:\n self.fields['product_class'].initial = qs[0]\n\n\nclass ProductSearchForm(forms.Form):\n upc = forms.CharField(max_length=16, required=False, label=_('UPC'))\n title = forms.CharField(max_length=255, required=False, label=_('Title'))\n\n def clean(self):\n cleaned_data = super(ProductSearchForm, self).clean()\n cleaned_data['upc'] = cleaned_data['upc'].strip()\n cleaned_data['title'] = cleaned_data['title'].strip()\n return cleaned_data\n\n\nclass StockRecordForm(forms.ModelForm):\n\n def __init__(self, product_class, user, *args, **kwargs):\n # The user kwarg is not used by stock StockRecordForm. We pass it\n # anyway in case one wishes to customise the partner queryset\n self.user = user\n super(StockRecordForm, self).__init__(*args, **kwargs)\n\n # If not tracking stock, we hide the fields\n if not product_class.track_stock:\n del self.fields['num_in_stock']\n del self.fields['low_stock_threshold']\n else:\n self.fields['price_excl_tax'].required = True\n self.fields['num_in_stock'].required = True\n\n class Meta:\n model = StockRecord\n exclude = ('product', 'num_allocated')\n\n\nBaseStockRecordFormSet = inlineformset_factory(\n Product, StockRecord, form=StockRecordForm, extra=1)\n\n\nclass StockRecordFormSet(BaseStockRecordFormSet):\n\n def __init__(self, product_class, user, *args, **kwargs):\n self.user = user\n self.require_user_stockrecord = not user.is_staff\n self.product_class = product_class\n super(StockRecordFormSet, self).__init__(*args, **kwargs)\n self.set_initial_data()\n\n def set_initial_data(self):\n \"\"\"\n If user has only one partner associated, set the first\n stock record's partner to it. Can't pre-select for staff users as\n they're allowed to save a product without a stock record.\n\n This is intentionally done after calling __init__ as passing initial\n data to __init__ creates a form for each list item. So depending on\n whether we can pre-select the partner or not, we'd end up with 1 or 2\n forms for an unbound form.\n \"\"\"\n if self.require_user_stockrecord:\n try:\n user_partner = self.user.partners.get()\n except (Partner.DoesNotExist, MultipleObjectsReturned):\n pass\n else:\n partner_field = self.forms[0].fields.get('partner', None)\n if partner_field and partner_field.initial is None:\n partner_field.initial = user_partner\n\n def _construct_form(self, i, **kwargs):\n kwargs['product_class'] = self.product_class\n kwargs['user'] = self.user\n return super(StockRecordFormSet, self)._construct_form(\n i, **kwargs)\n\n def clean(self):\n \"\"\"\n If the user isn't a staff user, this validation ensures that at least\n one stock record's partner is associated with a users partners.\n \"\"\"\n if any(self.errors):\n return\n if self.require_user_stockrecord:\n stockrecord_partners = set([form.cleaned_data.get('partner', None)\n for form in self.forms])\n user_partners = set(self.user.partners.all())\n if not user_partners & stockrecord_partners:\n raise ValidationError(_(\"At least one stock record must be set\"\n \" to a partner that you're associated\"\n \" with.\"))\n\n\ndef _attr_text_field(attribute):\n return forms.CharField(label=attribute.name,\n required=attribute.required)\n\n\ndef _attr_textarea_field(attribute):\n return forms.CharField(label=attribute.name,\n widget=forms.Textarea(),\n required=attribute.required)\n\n\ndef _attr_integer_field(attribute):\n return forms.IntegerField(label=attribute.name,\n required=attribute.required)\n\n\ndef _attr_boolean_field(attribute):\n return forms.BooleanField(label=attribute.name,\n required=attribute.required)\n\n\ndef _attr_float_field(attribute):\n return forms.FloatField(label=attribute.name,\n required=attribute.required)\n\n\ndef _attr_date_field(attribute):\n return forms.DateField(label=attribute.name,\n required=attribute.required,\n widget=forms.widgets.DateInput)\n\n\ndef _attr_option_field(attribute):\n return forms.ModelChoiceField(\n label=attribute.name,\n required=attribute.required,\n queryset=attribute.option_group.options.all())\n\n\ndef _attr_multi_option_field(attribute):\n return forms.ModelMultipleChoiceField(\n label=attribute.name,\n required=attribute.required,\n queryset=attribute.option_group.options.all())\n\n\ndef _attr_entity_field(attribute):\n return forms.ModelChoiceField(\n label=attribute.name,\n required=attribute.required,\n queryset=attribute.entity_type.entities.all())\n\n\ndef _attr_numeric_field(attribute):\n return forms.FloatField(label=attribute.name,\n required=attribute.required)\n\n\ndef _attr_file_field(attribute):\n return forms.FileField(\n label=attribute.name, required=attribute.required)\n\n\ndef _attr_image_field(attribute):\n return forms.ImageField(\n label=attribute.name, required=attribute.required)\n\n\nclass ProductForm(forms.ModelForm):\n\n # We need a special field to distinguish between group and standalone\n # products. It's impossible to tell when the product is first created.\n # This is quite clunky but will be replaced when #693 is complete.\n is_group = forms.BooleanField(\n label=_(\"Is group product?\"),\n required=False,\n help_text=_(\n \"Check this if this product is a group/parent product \"\n \"that has variants (eg different sizes/colours available)\"))\n\n FIELD_FACTORIES = {\n \"text\": _attr_text_field,\n \"richtext\": _attr_textarea_field,\n \"integer\": _attr_integer_field,\n \"boolean\": _attr_boolean_field,\n \"float\": _attr_float_field,\n \"date\": _attr_date_field,\n \"option\": _attr_option_field,\n \"multi_option\": _attr_multi_option_field,\n \"entity\": _attr_entity_field,\n \"numeric\": _attr_numeric_field,\n \"file\": _attr_file_field,\n \"image\": _attr_image_field,\n }\n\n class Meta:\n model = Product\n exclude = ('slug', 'score', 'product_class',\n 'recommended_products', 'product_options',\n 'attributes', 'categories')\n widgets = {\n 'parent': ProductSelect,\n 'related_products': ProductSelectMultiple,\n }\n\n def __init__(self, product_class, data=None, *args, **kwargs):\n self.product_class = product_class\n self.set_initial_attribute_values(kwargs)\n super(ProductForm, self).__init__(data, *args, **kwargs)\n\n # Set the initial value of the is_group field. This isn't watertight:\n # if the product is intended to be a parent product but doesn't have\n # any variants then we can't distinguish it from a standalone product\n # and this checkbox won't have the right value. This will be addressed\n # in #693\n instance = kwargs.get('instance', None)\n if instance:\n self.fields['is_group'].initial = instance.is_group\n\n # This is quite nasty. We use the raw posted data to determine if the\n # product is a group product, as this changes the validation rules we\n # want to apply.\n is_parent = data and data.get('is_group', '') == 'on'\n self.add_attribute_fields(is_parent)\n\n related_products = self.fields.get('related_products', None)\n parent = self.fields.get('parent', None)\n\n if parent is not None:\n parent.queryset = self.get_parent_products_queryset()\n if related_products is not None:\n related_products.queryset = self.get_related_products_queryset()\n if 'title' in self.fields:\n self.fields['title'].widget = forms.TextInput(\n attrs={'autocomplete': 'off'})\n\n def set_initial_attribute_values(self, kwargs):\n if kwargs.get('instance', None) is None:\n return\n if 'initial' not in kwargs:\n kwargs['initial'] = {}\n for attribute in self.product_class.attributes.all():\n try:\n value = kwargs['instance'].attribute_values.get(\n attribute=attribute).value\n except ProductAttributeValue.DoesNotExist:\n pass\n else:\n kwargs['initial']['attr_%s' % attribute.code] = value\n\n def add_attribute_fields(self, is_parent=False):\n for attribute in self.product_class.attributes.all():\n self.fields['attr_%s' % attribute.code] \\\n = self.get_attribute_field(attribute)\n # Attributes are not required for a parent product\n if is_parent:\n self.fields['attr_%s' % attribute.code].required = False\n\n def get_attribute_field(self, attribute):\n return self.FIELD_FACTORIES[attribute.type](attribute)\n\n def get_related_products_queryset(self):\n return Product.browsable.order_by('title')\n\n def get_parent_products_queryset(self):\n \"\"\"\n :return: Canonical products excluding this product\n \"\"\"\n # Not using Product.browsable because a deployment might override\n # that manager to respect a status field or such like\n queryset = Product._default_manager.filter(parent=None)\n if self.instance.pk is not None:\n # Prevent selecting itself as parent\n queryset = queryset.exclude(pk=self.instance.pk)\n return queryset\n\n def save(self):\n object = super(ProductForm, self).save(commit=False)\n object.product_class = self.product_class\n for attribute in self.product_class.attributes.all():\n value = self.cleaned_data['attr_%s' % attribute.code]\n setattr(object.attr, attribute.code, value)\n\n if self.cleaned_data['is_group']:\n # Don't validate attributes for parent products\n object.save(validate_attributes=False)\n else:\n object.save()\n self.save_m2m()\n return object\n\n def clean(self):\n data = self.cleaned_data\n if 'parent' not in data and not data['title']:\n raise forms.ValidationError(_(\"This field is required\"))\n elif 'parent' in data and data['parent'] is None and not data['title']:\n raise forms.ValidationError(_(\"Parent products must have a title\"))\n # Calling the clean() method of BaseForm here is required to apply\n # checks for 'unique' field. This prevents e.g. the UPC field from\n # raising a DatabaseError.\n return super(ProductForm, self).clean()\n\n\nclass StockAlertSearchForm(forms.Form):\n status = forms.CharField(label=_('Status'))\n\n\nclass ProductCategoryForm(forms.ModelForm):\n\n class Meta:\n model = ProductCategory\n\n\nBaseProductCategoryFormSet = inlineformset_factory(\n Product, ProductCategory, form=ProductCategoryForm,\n fields=('category',), extra=1, can_delete=False)\n\n\nclass ProductCategoryFormSet(BaseProductCategoryFormSet):\n\n def __init__(self, product_class, user, *args, **kwargs):\n super(ProductCategoryFormSet, self).__init__(*args, **kwargs)\n\n def clean(self):\n if self.instance.is_top_level and self.get_num_categories() == 0:\n raise forms.ValidationError(\n _(\"A top-level product must have at least one category\"))\n if self.instance.is_variant and self.get_num_categories() > 0:\n raise forms.ValidationError(\n _(\"A variant product should not have categories\"))\n\n def get_num_categories(self):\n num_categories = 0\n for i in range(0, self.total_form_count()):\n form = self.forms[i]\n if (hasattr(form, 'cleaned_data')\n and form.cleaned_data.get('category', None)\n and not form.cleaned_data.get('DELETE', False)):\n num_categories += 1\n return num_categories\n\n\nclass ProductImageForm(forms.ModelForm):\n class Meta:\n model = ProductImage\n exclude = ('display_order',)\n # use ImageInput widget to create HTML displaying the\n # actual uploaded image and providing the upload dialog\n # when clicking on the actual image.\n widgets = {\n 'original': ImageInput(),\n }\n\n def save(self, *args, **kwargs):\n # We infer the display order of the image based on the order of the\n # image fields within the formset.\n kwargs['commit'] = False\n obj = super(ProductImageForm, self).save(*args, **kwargs)\n obj.display_order = self.get_display_order()\n obj.save()\n return obj\n\n def get_display_order(self):\n return self.prefix.split('-').pop()\n\n\nBaseProductImageFormSet = inlineformset_factory(\n Product, ProductImage, form=ProductImageForm, extra=2)\n\n\nclass ProductImageFormSet(BaseProductImageFormSet):\n def __init__(self, product_class, user, *args, **kwargs):\n super(ProductImageFormSet, self).__init__(*args, **kwargs)\n\n\nclass ProductRecommendationForm(forms.ModelForm):\n class Meta:\n model = ProductRecommendation\n widgets = {\n 'recommendation': ProductSelect,\n }\n\n\nBaseProductRecommendationFormSet = inlineformset_factory(\n Product, ProductRecommendation, form=ProductRecommendationForm,\n extra=5, fk_name=\"primary\")\n\n\nclass ProductRecommendationFormSet(BaseProductRecommendationFormSet):\n def __init__(self, product_class, user, *args, **kwargs):\n super(ProductRecommendationFormSet, self).__init__(*args, **kwargs)\n", "path": "oscar/apps/dashboard/catalogue/forms.py" } ]
diff --git a/oscar/apps/dashboard/catalogue/forms.py b/oscar/apps/dashboard/catalogue/forms.py index 4a5a26489ec..2683e216c14 100644 --- a/oscar/apps/dashboard/catalogue/forms.py +++ b/oscar/apps/dashboard/catalogue/forms.py @@ -379,7 +379,7 @@ def clean(self): class StockAlertSearchForm(forms.Form): - tatus = forms.CharField(label=_('Status')) + status = forms.CharField(label=_('Status')) class ProductCategoryForm(forms.ModelForm):
scikit-hep__awkward-2213
Example in merge_union_of_records is a no-op ### Which documentation? Python docstrings ### What needs to be documented? In the example given, the result type is the same as the input type: <details> <summary> Quote from the docstring </summary> ```python Simplifies unions of records, e.g. >>> array = ak.Array([{"a": 1}, {"b": 2}]) into records of options, i.e. >>> ak.merge_union_of_records(array) <Array [{a: 1, b: None}, {a: None, ...}] type='2 * {a: ?int64, b: ?int64}'> ``` </details> ```python >>> import awkward as ak >>> ak.Array([{"a": 1}, {"b": 2}]).type.show() 2 * { a: ?int64, b: ?int64 } >>> ak.merge_union_of_records(ak.Array([{"a": 1}, {"b": 2}])).type.show() 2 * { a: ?int64, b: ?int64 } ```
[ { "content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\n\nimport awkward as ak\nfrom awkward._nplikes.numpylike import NumpyMetadata\n\nnp = NumpyMetadata.instance()\ncpu = ak._backends.NumpyBackend.instance()\n\n\ndef merge_union_of_records(array, axis=-1, *, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Array-like data (anything #ak.to_layout recognizes).\n axis (int): The dimension at which this operation is applied.\n The outermost dimension is `0`, followed by `1`, etc., and negative\n values count backward from the innermost: `-1` is the innermost\n dimension, `-2` is the next level up, etc.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Simplifies unions of records, e.g.\n\n >>> array = ak.Array([{\"a\": 1}, {\"b\": 2}])\n\n into records of options, i.e.\n\n >>> ak.merge_union_of_records(array)\n <Array [{a: 1, b: None}, {a: None, ...}] type='2 * {a: ?int64, b: ?int64}'>\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.merge_union_of_records\",\n {\"array\": array, \"axis\": axis, \"highlevel\": highlevel, \"behavior\": behavior},\n ):\n return _impl(array, axis, highlevel, behavior)\n\n\ndef _impl(array, axis, highlevel, behavior):\n behavior = ak._util.behavior_of(array, behavior=behavior)\n layout = ak.to_layout(array, allow_record=False)\n\n def apply_displace_index(layout, backend, **kwargs):\n if layout.is_record:\n return layout\n elif layout.is_option and layout.content.is_record:\n raise ak._errors.wrap_error(\n TypeError(\n \"optional records cannot be merged by this function. First call `ak.merge_option_of_records` \"\n \"to convert these into records of options.\"\n )\n )\n elif layout.is_indexed and layout.content.is_record:\n record = layout.content\n # Transpose index-of-record to record-of-index\n return ak.contents.RecordArray(\n [\n ak.contents.IndexedArray.simplified(\n layout.index, c, parameters=layout._parameters\n )\n for c in record.contents\n ],\n record.fields,\n record.length,\n backend=backend,\n )\n else:\n raise ak._errors.wrap_error(TypeError(layout))\n\n def apply(layout, depth, backend, **kwargs):\n posaxis = ak._util.maybe_posaxis(layout, axis, depth)\n if depth < posaxis + 1 and layout.is_leaf:\n raise ak._errors.wrap_error(\n np.AxisError(f\"axis={axis} exceeds the depth of this array ({depth})\")\n )\n elif depth == posaxis + 1 and layout.is_union:\n if all(x.is_record for x in layout.contents):\n # First, find all ordered fields, regularising any index-of-record\n # such that we have record-of-index\n seen_fields = set()\n all_fields = []\n regularised_contents = []\n for content in layout.contents:\n # Ensure that we have record-of-index\n regularised_content = ak._do.recursively_apply(\n content, apply_displace_index\n )\n regularised_contents.append(regularised_content)\n\n # Find new fields\n for field in regularised_content.fields:\n if field not in seen_fields:\n seen_fields.add(field)\n all_fields.append(field)\n\n # Build unions for each field\n outer_field_contents = []\n for field in all_fields:\n field_tags = backend.index_nplike.asarray(layout.tags, copy=True)\n field_index = backend.index_nplike.asarray(layout.index, copy=True)\n\n # Build contents for union representing current field\n field_contents = [\n c.content(field)\n for c in regularised_contents\n if c.has_field(field)\n ]\n\n # Find the best location for option type.\n # We will potentially have fewer contents in this per-field union\n # than the original outer union-of-records, because some recordarrays\n # may not have the given field.\n tag_for_missing = 0\n for i, content in enumerate(field_contents):\n if content.is_option:\n tag_for_missing = i\n break\n\n # If at least one recordarray doesn't have this field, we add\n # a special option\n if len(field_contents) < len(regularised_contents):\n # Make the tagged content an option, growing by one to ensure we\n # have a known `None` value to index into\n tagged_content = field_contents[tag_for_missing]\n indexedoption_index = backend.index_nplike.arange(\n tagged_content.length + 1, dtype=np.int64\n )\n indexedoption_index[tagged_content.length] = -1\n field_contents[\n tag_for_missing\n ] = ak.contents.IndexedOptionArray.simplified(\n ak.index.Index64(indexedoption_index), tagged_content\n )\n\n # Now build contents for union, by looping over outermost index\n # Overwrite tags to adjust for new contents length\n # and use the tagged content for any missing values\n k = 0\n for j, content in enumerate(regularised_contents):\n tag_is_j = field_tags == j\n\n if content.has_field(field):\n # Rewrite tags to account for missing fields\n field_tags[tag_is_j] = k\n k += 1\n\n else:\n # Rewrite tags to point to option content\n field_tags[tag_is_j] = tag_for_missing\n # Point each value to missing value\n field_index[tag_is_j] = (\n field_contents[tag_for_missing].length - 1\n )\n\n outer_field_contents.append(\n ak.contents.UnionArray.simplified(\n ak.index.Index8(field_tags),\n ak.index.Index64(field_index),\n field_contents,\n )\n )\n return ak.contents.RecordArray(\n outer_field_contents, all_fields, backend=backend\n )\n\n out = ak._do.recursively_apply(layout, apply)\n return ak._util.wrap(out, highlevel=highlevel, behavior=behavior)\n", "path": "src/awkward/operations/ak_merge_union_of_records.py" } ]
[ { "content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\n\nimport awkward as ak\nfrom awkward._nplikes.numpylike import NumpyMetadata\n\nnp = NumpyMetadata.instance()\ncpu = ak._backends.NumpyBackend.instance()\n\n\ndef merge_union_of_records(array, axis=-1, *, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Array-like data (anything #ak.to_layout recognizes).\n axis (int): The dimension at which this operation is applied.\n The outermost dimension is `0`, followed by `1`, etc., and negative\n values count backward from the innermost: `-1` is the innermost\n dimension, `-2` is the next level up, etc.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Simplifies unions of records, e.g.\n\n >>> array = ak.concatenate(([{\"a\": 1}], [{\"b\": 2}]))\n\n into records of options, i.e.\n\n >>> ak.merge_union_of_records(array)\n <Array [{a: 1, b: None}, {a: None, ...}] type='2 * {a: ?int64, b: ?int64}'>\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.merge_union_of_records\",\n {\"array\": array, \"axis\": axis, \"highlevel\": highlevel, \"behavior\": behavior},\n ):\n return _impl(array, axis, highlevel, behavior)\n\n\ndef _impl(array, axis, highlevel, behavior):\n behavior = ak._util.behavior_of(array, behavior=behavior)\n layout = ak.to_layout(array, allow_record=False)\n\n def apply_displace_index(layout, backend, **kwargs):\n if layout.is_record:\n return layout\n elif layout.is_option and layout.content.is_record:\n raise ak._errors.wrap_error(\n TypeError(\n \"optional records cannot be merged by this function. First call `ak.merge_option_of_records` \"\n \"to convert these into records of options.\"\n )\n )\n elif layout.is_indexed and layout.content.is_record:\n record = layout.content\n # Transpose index-of-record to record-of-index\n return ak.contents.RecordArray(\n [\n ak.contents.IndexedArray.simplified(\n layout.index, c, parameters=layout._parameters\n )\n for c in record.contents\n ],\n record.fields,\n record.length,\n backend=backend,\n )\n else:\n raise ak._errors.wrap_error(TypeError(layout))\n\n def apply(layout, depth, backend, **kwargs):\n posaxis = ak._util.maybe_posaxis(layout, axis, depth)\n if depth < posaxis + 1 and layout.is_leaf:\n raise ak._errors.wrap_error(\n np.AxisError(f\"axis={axis} exceeds the depth of this array ({depth})\")\n )\n elif depth == posaxis + 1 and layout.is_union:\n if all(x.is_record for x in layout.contents):\n # First, find all ordered fields, regularising any index-of-record\n # such that we have record-of-index\n seen_fields = set()\n all_fields = []\n regularised_contents = []\n for content in layout.contents:\n # Ensure that we have record-of-index\n regularised_content = ak._do.recursively_apply(\n content, apply_displace_index\n )\n regularised_contents.append(regularised_content)\n\n # Find new fields\n for field in regularised_content.fields:\n if field not in seen_fields:\n seen_fields.add(field)\n all_fields.append(field)\n\n # Build unions for each field\n outer_field_contents = []\n for field in all_fields:\n field_tags = backend.index_nplike.asarray(layout.tags, copy=True)\n field_index = backend.index_nplike.asarray(layout.index, copy=True)\n\n # Build contents for union representing current field\n field_contents = [\n c.content(field)\n for c in regularised_contents\n if c.has_field(field)\n ]\n\n # Find the best location for option type.\n # We will potentially have fewer contents in this per-field union\n # than the original outer union-of-records, because some recordarrays\n # may not have the given field.\n tag_for_missing = 0\n for i, content in enumerate(field_contents):\n if content.is_option:\n tag_for_missing = i\n break\n\n # If at least one recordarray doesn't have this field, we add\n # a special option\n if len(field_contents) < len(regularised_contents):\n # Make the tagged content an option, growing by one to ensure we\n # have a known `None` value to index into\n tagged_content = field_contents[tag_for_missing]\n indexedoption_index = backend.index_nplike.arange(\n tagged_content.length + 1, dtype=np.int64\n )\n indexedoption_index[tagged_content.length] = -1\n field_contents[\n tag_for_missing\n ] = ak.contents.IndexedOptionArray.simplified(\n ak.index.Index64(indexedoption_index), tagged_content\n )\n\n # Now build contents for union, by looping over outermost index\n # Overwrite tags to adjust for new contents length\n # and use the tagged content for any missing values\n k = 0\n for j, content in enumerate(regularised_contents):\n tag_is_j = field_tags == j\n\n if content.has_field(field):\n # Rewrite tags to account for missing fields\n field_tags[tag_is_j] = k\n k += 1\n\n else:\n # Rewrite tags to point to option content\n field_tags[tag_is_j] = tag_for_missing\n # Point each value to missing value\n field_index[tag_is_j] = (\n field_contents[tag_for_missing].length - 1\n )\n\n outer_field_contents.append(\n ak.contents.UnionArray.simplified(\n ak.index.Index8(field_tags),\n ak.index.Index64(field_index),\n field_contents,\n )\n )\n return ak.contents.RecordArray(\n outer_field_contents, all_fields, backend=backend\n )\n\n out = ak._do.recursively_apply(layout, apply)\n return ak._util.wrap(out, highlevel=highlevel, behavior=behavior)\n", "path": "src/awkward/operations/ak_merge_union_of_records.py" } ]
diff --git a/src/awkward/operations/ak_merge_union_of_records.py b/src/awkward/operations/ak_merge_union_of_records.py index c389dccb83..3694909496 100644 --- a/src/awkward/operations/ak_merge_union_of_records.py +++ b/src/awkward/operations/ak_merge_union_of_records.py @@ -23,7 +23,7 @@ def merge_union_of_records(array, axis=-1, *, highlevel=True, behavior=None): Simplifies unions of records, e.g. - >>> array = ak.Array([{"a": 1}, {"b": 2}]) + >>> array = ak.concatenate(([{"a": 1}], [{"b": 2}])) into records of options, i.e.
zestedesavoir__zds-site-800
Erreur dans /membres/?q=... via AJAX Suite à l'ajout de la réponse sous forme JSON de `/membres/` ( #677 ), le GET de cette page avec le header `X-Requestes-With: XMLHttpRequest` plante, dû au fait que la lib JSON n'est pas importé dans le fichier `zds/membres/view.py` ``` bash [07/Jun/2014 14:30:49] "GET /membres/?q=a HTTP/1.1" 500 15042 Internal Server Error: /membres/ Traceback (most recent call last): File "~/.local/lib/python2.7/site-packages/django/core/handlers/base.py", line 112, in get_response response = wrapped_callback(request, *callback_args, **callback_kwargs) File "zds/member/decorator.py", line 21, in _can_read_now return func(request, *args, **kwargs) File "zds/member/views.py", line 55, in index data = json.dumps(results) NameError: global name 'json' is not defined ```
[ { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom datetime import datetime, timedelta\nimport os\nimport uuid\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User, SiteProfileNotAvailable\nfrom django.core.context_processors import csrf\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.core.urlresolvers import reverse\nfrom django.db import transaction\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import redirect, get_object_or_404, render_to_response\nfrom django.template import Context, RequestContext\nfrom django.template.loader import get_template\nfrom django.views.decorators.http import require_POST\nimport json\nimport pygal\n\nfrom forms import LoginForm, MiniProfileForm, ProfileForm, RegisterForm, \\\n ChangePasswordForm, ChangeUserForm, ForgotPasswordForm, NewPasswordForm, \\\n OldTutoForm\nfrom models import Profile, TokenForgotPassword, Ban, TokenRegister, \\\n get_info_old_tuto, logout_user\nfrom zds.gallery.forms import ImageAsAvatarForm\nfrom zds.article.models import Article\nfrom zds.forum.models import Topic\nfrom zds.member.decorator import can_write_and_read_now\nfrom zds.tutorial.models import Tutorial\nfrom zds.utils import render_template\nfrom zds.utils.mps import send_mp\nfrom zds.utils.paginator import paginator_range\nfrom zds.utils.tokens import generate_token\n\n\n\ndef index(request):\n \"\"\"Displays the list of registered users.\"\"\"\n\n if request.is_ajax():\n q = request.GET.get('q', '')\n members = User.objects.filter(username__icontains=q)[:20]\n results = []\n for member in members:\n member_json = {}\n member_json['id'] = member.pk\n member_json['label'] = member.username\n member_json['value'] = member.username\n results.append(member_json)\n data = json.dumps(results)\n\n return HttpResponse(data, mimetype)\n\n else:\n members = User.objects.order_by(\"-date_joined\")\n # Paginator\n\n paginator = Paginator(members, settings.MEMBERS_PER_PAGE)\n page = request.GET.get(\"page\")\n try:\n shown_members = paginator.page(page)\n page = int(page)\n except PageNotAnInteger:\n shown_members = paginator.page(1)\n page = 1\n except EmptyPage:\n shown_members = paginator.page(paginator.num_pages)\n page = paginator.num_pages\n return render_template(\"member/index.html\", {\n \"members\": shown_members,\n \"count\": members.count(),\n \"pages\": paginator_range(page, paginator.num_pages),\n \"nb\": page,\n })\n\n\n\ndef details(request, user_name):\n \"\"\"Displays details about a profile.\"\"\"\n\n usr = get_object_or_404(User, username=user_name)\n try:\n profile = usr.profile\n bans = Ban.objects.filter(user=usr).order_by(\"-pubdate\")\n except SiteProfileNotAvailable:\n raise Http404\n\n # refresh moderation chart\n\n dot_chart = pygal.Dot(x_label_rotation=30)\n dot_chart.title = u\"Messages postés par période\"\n dot_chart.x_labels = [\n u\"Dimanche\",\n u\"Lundi\",\n u\"Mardi\",\n u\"Mercredi\",\n u\"Jeudi\",\n u\"Vendredi\",\n u\"Samedi\",\n ]\n dot_chart.show_legend = False\n dates = date_to_chart(profile.get_posts())\n for i in range(0, 24):\n dot_chart.add(str(i) + \" h\", dates[(i + 1) % 24])\n img_path = os.path.join(settings.MEDIA_ROOT, \"pygal\")\n if not os.path.isdir(img_path):\n os.makedirs(img_path, mode=0o777)\n fchart = os.path.join(img_path, \"mod-{}.svg\".format(str(usr.pk)))\n dot_chart.render_to_file(fchart)\n my_articles = Article.objects.filter(sha_public__isnull=False).order_by(\n \"-pubdate\").filter(authors__in=[usr]).all()\n my_tutorials = \\\n Tutorial.objects.filter(sha_public__isnull=False) \\\n .filter(authors__in=[usr]) \\\n .order_by(\"-pubdate\"\n ).all()\n my_topics = Topic.objects.filter(author__pk=usr.pk).order_by(\"-pubdate\"\n ).all()\n tops = []\n for top in my_topics:\n if not top.forum.can_read(request.user):\n continue\n else:\n tops.append(top)\n if len(tops) >= 5:\n break\n form = OldTutoForm(profile)\n oldtutos = []\n if profile.sdz_tutorial:\n olds = profile.sdz_tutorial.strip().split(\":\")\n else:\n olds = []\n for old in olds:\n oldtutos.append(get_info_old_tuto(old))\n return render_template(\"member/profile.html\", {\n \"usr\": usr,\n \"profile\": profile,\n \"bans\": bans,\n \"articles\": my_articles,\n \"tutorials\": my_tutorials,\n \"topics\": tops,\n \"form\": form,\n \"old_tutos\": oldtutos,\n })\n\n\n@can_write_and_read_now\n@login_required\[email protected]\ndef modify_profile(request, user_pk):\n \"\"\"Modifies sanction of a user if there is a POST request.\"\"\"\n\n profile = get_object_or_404(Profile, user__pk=user_pk)\n if request.method == \"POST\":\n ban = Ban()\n ban.moderator = request.user\n ban.user = profile.user\n ban.pubdate = datetime.now()\n if \"ls\" in request.POST:\n profile.can_write = False\n ban.type = u\"Lecture Seule\"\n ban.text = request.POST[\"ls-text\"]\n detail = (u'Vous ne pouvez plus poster dans les forums, ni dans les '\n u'commentaires d\\'articles et de tutoriels.')\n if \"ls-temp\" in request.POST:\n ban.type = u\"Lecture Seule Temporaire\"\n ban.text = request.POST[\"ls-temp-text\"]\n profile.can_write = False\n profile.end_ban_write = datetime.now() \\\n + timedelta(days=int(request.POST[\"ls-jrs\"]), hours=0,\n minutes=0, seconds=0)\n detail = (u'Vous ne pouvez plus poster dans les forums, ni dans les '\n u'commentaires d\\'articles et de tutoriels pendant {0} jours.'\n .format(request.POST[\"ls-jrs\"]))\n if \"ban-temp\" in request.POST:\n ban.type = u\"Ban Temporaire\"\n ban.text = request.POST[\"ban-temp-text\"]\n profile.can_read = False\n profile.end_ban_read = datetime.now() \\\n + timedelta(days=int(request.POST[\"ban-jrs\"]), hours=0,\n minutes=0, seconds=0)\n detail = (u'Vous ne pouvez plus vous connecter sur ZesteDeSavoir '\n u'pendant {0} jours.'.format(request.POST[\"ban-jrs\"]))\n logout_user(profile.user.username)\n\n if \"ban\" in request.POST:\n ban.type = u\"Ban définitif\"\n ban.text = request.POST[\"ban-text\"]\n profile.can_read = False\n detail = u\"vous ne pouvez plus vous connecter sur ZesteDeSavoir.\"\n logout_user(profile.user.username)\n if \"un-ls\" in request.POST:\n ban.type = u\"Autorisation d'écrire\"\n ban.text = request.POST[\"unls-text\"]\n profile.can_write = True\n detail = (u'Vous pouvez désormais poster sur les forums, dans les '\n u'commentaires d\\'articles et tutoriels.')\n if \"un-ban\" in request.POST:\n ban.type = u\"Autorisation de se connecter\"\n ban.text = request.POST[\"unban-text\"]\n profile.can_read = True\n detail = u\"vous pouvez désormais vous connecter sur le site.\"\n profile.save()\n ban.save()\n\n # send register message\n\n if \"un-ls\" in request.POST or \"un-ban\" in request.POST:\n msg = \\\n u\"\"\"Bonjour **{0}**,\n\n**Bonne Nouvelle**, la sanction qui pesait sur vous a été levée par **{1}**.\n\nCe qui signifie que {2}\n\nLe motif de votre sanction est :\n\n`{3}`\n\nCordialement, L'équipe ZesteDeSavoir.\n\n\"\"\".format(ban.user,\n ban.moderator, detail, ban.text)\n else:\n msg = \\\n u\"\"\"Bonjour **{0}**,\n\nVous avez été santionné par **{1}**.\n\nLa sanction est de type *{2}*, ce qui signifie que {3}\n\nLe motif de votre sanction est :\n\n`{4}`\n\nCordialement, L'équipe ZesteDeSavoir.\n\n\"\"\".format(ban.user,\n ban.moderator, ban.type, detail, ban.text)\n bot = get_object_or_404(User, username=settings.BOT_ACCOUNT)\n send_mp(\n bot,\n [ban.user],\n ban.type,\n \"Sanction\",\n msg,\n True,\n direct=True,\n )\n return redirect(profile.get_absolute_url())\n\n\n\n@login_required\ndef tutorials(request):\n \"\"\"Returns all tutorials of the authenticated user.\"\"\"\n\n # The type indicate what the user would like to display. We can display\n # public, draft or all user's tutorials.\n\n try:\n type = request.GET[\"type\"]\n except KeyError:\n type = None\n\n # Retrieves all tutorials of the current user.\n\n profile = request.user.profile\n if type == \"draft\":\n user_tutorials = profile.get_draft_tutos()\n elif type == \"public\":\n user_tutorials = profile.get_public_tutos()\n else:\n user_tutorials = profile.get_tutos()\n\n return render_template(\"tutorial/member/index.html\",\n {\"tutorials\": user_tutorials, \"type\": type})\n\n\n\n@login_required\ndef articles(request):\n \"\"\"Returns all articles of the authenticated user.\"\"\"\n\n # The type indicate what the user would like to display. We can display\n # public, draft or all user's articles.\n\n try:\n type = request.GET[\"type\"]\n except KeyError:\n type = None\n\n # Retrieves all articles of the current user.\n\n profile = request.user.profile\n if type == \"draft\":\n user_articles = profile.get_draft_articles()\n elif type == \"public\":\n user_articles = profile.get_public_articles()\n else:\n user_articles = profile.get_articles()\n\n return render_template(\"article/member/index.html\",\n {\"articles\": user_articles, \"type\": type})\n\n\n\n@login_required\ndef actions(request):\n \"\"\"Show avaible actions for current user, like a customized homepage.\n\n This may be very temporary.\n\n \"\"\"\n\n # TODO: Seriously improve this page, and see if cannot be merged in\n # zds.pages.views.home since it will be more coherent to give an enhanced\n # homepage for registered users\n\n return render_template(\"member/actions.html\")\n\n\n# settings for public profile\n\n@can_write_and_read_now\n@login_required\ndef settings_mini_profile(request, user_name):\n \"\"\"Minimal settings of users for staff.\"\"\"\n\n # extra information about the current user\n\n profile = Profile.objects.get(user__username=user_name)\n if request.method == \"POST\":\n form = MiniProfileForm(request.POST)\n c = {\"form\": form, \"profile\": profile}\n if form.is_valid():\n profile.biography = form.data[\"biography\"]\n profile.site = form.data[\"site\"]\n profile.avatar_url = form.data[\"avatar_url\"]\n profile.sign = form.data[\"sign\"]\n\n # Save the profile and redirect the user to the configuration space\n # with message indicate the state of the operation\n\n try:\n profile.save()\n except:\n messages.error(request, \"Une erreur est survenue.\")\n return redirect(reverse(\"zds.member.views.settings_mini_profil\"\n \"e\"))\n messages.success(request,\n \"Le profil a correctement été mis à jour.\")\n return redirect(reverse(\"zds.member.views.details\",\n args=[profile.user.username]))\n else:\n return render_to_response(\"member/settings/profile.html\", c,\n RequestContext(request))\n else:\n form = MiniProfileForm(initial={\n \"biography\": profile.biography,\n \"site\": profile.site,\n \"avatar_url\": profile.avatar_url,\n \"sign\": profile.sign,\n })\n c = {\"form\": form, \"profile\": profile}\n return render_to_response(\"member/settings/profile.html\", c,\n RequestContext(request))\n\n\n@can_write_and_read_now\n@login_required\ndef settings_profile(request):\n \"\"\"User's settings about his personal information.\"\"\"\n\n # extra information about the current user\n\n profile = request.user.profile\n if request.method == \"POST\":\n form = ProfileForm(request.POST)\n c = {\"form\": form}\n if form.is_valid():\n profile.biography = form.data[\"biography\"]\n profile.site = form.data[\"site\"]\n profile.show_email = \"show_email\" \\\n in form.cleaned_data.get(\"options\")\n profile.show_sign = \"show_sign\" in form.cleaned_data.get(\"options\")\n profile.hover_or_click = \"hover_or_click\" \\\n in form.cleaned_data.get(\"options\")\n profile.email_for_answer = \"email_for_answer\" \\\n in form.cleaned_data.get(\"options\")\n profile.avatar_url = form.data[\"avatar_url\"]\n profile.sign = form.data[\"sign\"]\n\n # Save the profile and redirect the user to the configuration space\n # with message indicate the state of the operation\n\n try:\n profile.save()\n except:\n messages.error(request, \"Une erreur est survenue.\")\n return redirect(reverse(\"zds.member.views.settings_profile\"))\n messages.success(request,\n \"Le profil a correctement été mis à jour.\")\n return redirect(reverse(\"zds.member.views.settings_profile\"))\n else:\n return render_to_response(\"member/settings/profile.html\", c,\n RequestContext(request))\n else:\n form = ProfileForm(initial={\n \"biography\": profile.biography,\n \"site\": profile.site,\n \"avatar_url\": profile.avatar_url,\n \"show_email\": profile.show_email,\n \"show_sign\": profile.show_sign,\n \"hover_or_click\": profile.hover_or_click,\n \"email_for_answer\": profile.email_for_answer,\n \"sign\": profile.sign,\n })\n c = {\"form\": form}\n return render_to_response(\"member/settings/profile.html\", c,\n RequestContext(request))\n\n\n@can_write_and_read_now\n@login_required\n@require_POST\ndef update_avatar(request):\n \"\"\"\n Update avatar from gallery.\n Specific method instead using settings_profile() to avoid to handle all required fields.\n \"\"\"\n profile = request.user.profile\n form = ImageAsAvatarForm(request.POST)\n if form.is_valid():\n profile.avatar_url = form.data[\"avatar_url\"]\n try:\n profile.save()\n except:\n messages.error(request, \"Une erreur est survenue.\")\n return redirect(reverse(\"zds.member.views.settings_profile\"))\n messages.success(request, \"L'avatar a correctement été mis à jour.\")\n\n return redirect(reverse(\"zds.member.views.settings_profile\"))\n\n\n@can_write_and_read_now\n@login_required\ndef settings_account(request):\n \"\"\"User's settings about his account.\"\"\"\n\n if request.method == \"POST\":\n form = ChangePasswordForm(request.user, request.POST)\n c = {\"form\": form}\n if form.is_valid():\n try:\n request.user.set_password(form.data[\"password_new\"])\n request.user.save()\n messages.success(request, \"Le mot de passe a bien été modifié.\"\n )\n return redirect(reverse(\"zds.member.views.settings_account\"))\n except:\n messages.error(request, \"Une erreur est survenue.\")\n return redirect(reverse(\"zds.member.views.settings_account\"))\n else:\n return render_to_response(\"member/settings/account.html\", c,\n RequestContext(request))\n else:\n form = ChangePasswordForm(request.user)\n c = {\"form\": form}\n return render_to_response(\"member/settings/account.html\", c,\n RequestContext(request))\n\n\n@can_write_and_read_now\n@login_required\ndef settings_user(request):\n \"\"\"User's settings about his email.\"\"\"\n\n profile = request.user.profile\n if request.method == \"POST\":\n form = ChangeUserForm(request.POST)\n c = {\"form\": form}\n if form.is_valid():\n email_exist = User.objects.filter(email=form.data[\"username_new\"\n ]).count()\n username_exist = \\\n User.objects.filter(username=form.data[\"username_new\"]).count()\n old = User.objects.filter(pk=request.user.pk).all()[0]\n if form.data[\"username_new\"] and username_exist > 0:\n raise Http404\n elif form.data[\"username_new\"]:\n if form.data[\"username_new\"].strip() != \"\":\n old.username = form.data[\"username_new\"]\n if form.data[\"email_new\"] and email_exist > 0:\n raise Http404\n elif form.data[\"email_new\"]:\n if form.data[\"email_new\"].strip() != \"\":\n old.email = form.data[\"email_new\"]\n old.save()\n return redirect(profile.get_absolute_url())\n else:\n return render_to_response(\"member/settings/user.html\", c,\n RequestContext(request))\n else:\n form = ChangeUserForm()\n c = {\"form\": form}\n return render_to_response(\"member/settings/user.html\", c,\n RequestContext(request))\n\n\n\ndef login_view(request):\n \"\"\"Log in user.\"\"\"\n\n csrf_tk = {}\n csrf_tk.update(csrf(request))\n error = False\n\n # Redirecting user once logged in?\n\n if \"next\" in request.GET:\n next_page = request.GET[\"next\"]\n else:\n next_page = None\n if request.method == \"POST\":\n form = LoginForm(request.POST)\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(username=username, password=password)\n if user is not None:\n profile = get_object_or_404(Profile, user=user)\n if user.is_active:\n if profile.can_read_now():\n login(request, user)\n request.session[\"get_token\"] = generate_token()\n if \"remember\" not in request.POST:\n request.session.set_expiry(0)\n\n # redirect the user if needed\n\n try:\n return redirect(next_page)\n except:\n return redirect(reverse(\"zds.pages.views.home\"))\n else:\n messages.error(request,\n \"Vous n'êtes pas autorisé à vous connecter \"\n \"sur le site, vous avez été banni par un \"\n \"modérateur\")\n else:\n messages.error(request,\n \"Vous n'avez pas encore activé votre compte, \"\n \"vous devez le faire pour pouvoir vous \"\n \"connecter sur le site. Regardez dans vos \"\n \"mails : \" + str(user.email))\n else:\n messages.error(request,\n \"Les identifiants fournis ne sont pas valides\")\n form = LoginForm()\n form.helper.form_action = reverse(\"zds.member.views.login_view\") \\\n + \"?next=\" + str(next_page)\n csrf_tk[\"error\"] = error\n csrf_tk[\"form\"] = form\n csrf_tk[\"next_page\"] = next_page\n return render_template(\"member/login.html\",\n {\"form\": form,\n \"csrf_tk\": csrf_tk,\n \"next_page\": next_page})\n\n\n@login_required\n@require_POST\ndef logout_view(request):\n \"\"\"Log out user.\"\"\"\n\n logout(request)\n request.session.clear()\n return redirect(reverse(\"zds.pages.views.home\"))\n\n\ndef register_view(request):\n \"\"\"Register a new user.\"\"\"\n\n if request.method == \"POST\":\n form = RegisterForm(request.POST)\n if form.is_valid():\n data = form.data\n user = User.objects.create_user(data[\"username\"], data[\"email\"],\n data[\"password\"])\n user.is_active = False\n user.save()\n profile = Profile(user=user, show_email=False, show_sign=True,\n hover_or_click=True, email_for_answer=False)\n profile.last_ip_address = get_client_ip(request)\n profile.save()\n user.backend = \"django.contrib.auth.backends.ModelBackend\"\n\n # Generate a valid token during one hour.\n\n uuidToken = str(uuid.uuid4())\n date_end = datetime.now() + timedelta(days=0, hours=1, minutes=0,\n seconds=0)\n token = TokenRegister(user=user, token=uuidToken,\n date_end=date_end)\n token.save()\n\n # send email\n\n subject = \"ZDS - Confirmation d'inscription\"\n from_email = \"Zeste de Savoir <{0}>\".format(settings.MAIL_NOREPLY)\n message_html = get_template(\"email/register/confirm.html\").render(Context(\n {\"username\": user.username, \"url\": settings.SITE_URL + token.get_absolute_url()}))\n message_txt = get_template(\"email/register/confirm.txt\") .render(Context(\n {\"username\": user.username, \"url\": settings.SITE_URL + token.get_absolute_url()}))\n msg = EmailMultiAlternatives(subject, message_txt, from_email,\n [user.email])\n msg.attach_alternative(message_html, \"text/html\")\n try:\n msg.send()\n except:\n msg = None\n return render_template(\"member/register/success.html\", {})\n else:\n return render_template(\"member/register/index.html\", {\"form\": form})\n form = RegisterForm()\n return render_template(\"member/register/index.html\", {\"form\": form})\n\n\n\ndef forgot_password(request):\n \"\"\"If the user forgot his password, he can have a new one.\"\"\"\n\n if request.method == \"POST\":\n form = ForgotPasswordForm(request.POST)\n if form.is_valid():\n data = form.data\n username = data[\"username\"]\n usr = get_object_or_404(User, username=username)\n\n # Generate a valid token during one hour.\n\n uuidToken = str(uuid.uuid4())\n date_end = datetime.now() + timedelta(days=0, hours=1, minutes=0,\n seconds=0)\n token = TokenForgotPassword(user=usr, token=uuidToken,\n date_end=date_end)\n token.save()\n\n # send email\n\n subject = \"ZDS - Mot de passe oublié\"\n from_email = \"ZesteDeSavoir <{0}>\".format(settings.MAIL_NOREPLY)\n message_html = get_template(\"email/forgot_password/confirm.html\").render(Context(\n {\"username\": usr.username, \"url\": settings.SITE_URL + token.get_absolute_url()}))\n message_txt = get_template(\"email/forgot_password/confirm.txt\") .render(Context(\n {\"username\": usr.username, \"url\": settings.SITE_URL + token.get_absolute_url()}))\n msg = EmailMultiAlternatives(subject, message_txt, from_email,\n [usr.email])\n msg.attach_alternative(message_html, \"text/html\")\n msg.send()\n return render_template(\"member/forgot_password/success.html\")\n else:\n return render_template(\"member/forgot_password/index.html\",\n {\"form\": form})\n form = ForgotPasswordForm()\n return render_template(\"member/forgot_password/index.html\", {\"form\": form})\n\n\n\ndef new_password(request):\n \"\"\"Create a new password for a user.\"\"\"\n\n try:\n token = request.GET[\"token\"]\n except KeyError:\n return redirect(reverse(\"zds.pages.views.home\"))\n token = get_object_or_404(TokenForgotPassword, token=token)\n if request.method == \"POST\":\n form = NewPasswordForm(token.user.username, request.POST)\n if form.is_valid():\n data = form.data\n password = data[\"password\"]\n # User can't confirm his request if it is too late.\n\n if datetime.now() > token.date_end:\n return render_template(\"member/new_password/failed.html\")\n token.user.set_password(password)\n token.user.save()\n token.delete()\n return render_template(\"member/new_password/success.html\")\n else:\n return render_template(\"member/new_password.html\", {\"form\": form})\n form = NewPasswordForm(identifier=token.user.username)\n return render_template(\"member/new_password/index.html\", {\"form\": form})\n\n\ndef active_account(request):\n \"\"\"Active token for a user.\"\"\"\n\n try:\n token = request.GET[\"token\"]\n except KeyError:\n return redirect(reverse(\"zds.pages.views.home\"))\n token = get_object_or_404(TokenRegister, token=token)\n usr = token.user\n\n # User can't confirm his request if it is too late.\n\n if datetime.now() > token.date_end:\n return render_template(\"member/register/token_failed.html\",\n {\"token\": token})\n usr.is_active = True\n usr.save()\n\n # send register message\n\n bot = get_object_or_404(User, username=settings.BOT_ACCOUNT)\n msg = (\n u'Bonjour **{0}**,'\n u'\\n\\n'\n u'Ton compte a été activé, et tu es donc officiellement '\n u'membre de la communauté de ZesteDeSavoir.'\n u'\\n\\n'\n u'ZesteDeSavoir est une communauté dont le but est de diffuser des '\n u'connaissances au plus grand nombre.'\n u'\\n\\n'\n u'Sur ce site, tu trouveras un ensemble de [tutoriels]({1}) dans '\n u'plusieurs domaines et plus particulièrement autour de l\\'informatique '\n u'et des sciences. Tu y retrouveras aussi des [articles]({2}) '\n u'traitant de sujets d\\'actualités ou non, qui, tout comme les tutoriels, s'\n u'ont écrits par des [membres]({3}) de la communauté. '\n u'Pendant tes lectures et ton apprentissage, si jamais tu as des '\n u'questions à poser, tu retrouveras sur les [forums]({4}) des personnes '\n u'prêtes à te filer un coup de main et ainsi t\\'éviter de passer '\n u'plusieurs heures sur un problème.'\n u'\\n\\n'\n u'L\\'ensemble du contenu disponible sur le site est et sera toujours gratuit, '\n u'car la communauté de ZesteDeSavoir est attachée aux valeurs du libre '\n u'partage et désire apporter le savoir à tout le monde quelques soit ses moyens.'\n u'\\n\\n'\n u'En espérant que tu te plaira ici, '\n u'je te laisse maintenant faire le tour'\n .format(usr.username,\n settings.SITE_URL + reverse(\"zds.tutorial.views.index\"),\n settings.SITE_URL + reverse(\"zds.article.views.index\"),\n settings.SITE_URL + reverse(\"zds.member.views.index\"),\n settings.SITE_URL + reverse(\"zds.forum.views.index\")))\n send_mp(\n bot,\n [usr],\n u\"Bienvenue sur ZesteDeSavoir\",\n u\"Le manuel du nouveau membre\",\n msg,\n True,\n True,\n False,\n )\n return render_template(\"member/register/token_success.html\", {\"usr\": usr})\n token.delete()\n\n\ndef generate_token_account(request):\n \"\"\"Generate token for account.\"\"\"\n\n try:\n token = request.GET[\"token\"]\n except KeyError:\n return redirect(reverse(\"zds.pages.views.home\"))\n token = get_object_or_404(TokenRegister, token=token)\n\n # push date\n\n date_end = datetime.now() + timedelta(days=0, hours=1, minutes=0,\n seconds=0)\n token.date_end = date_end\n token.save()\n\n # send email\n\n subject = \"ZDS - Confirmation d'inscription\"\n from_email = \"ZesteDeSavoir <{0}>\".format(settings.MAIL_NOREPLY)\n message_html = get_template(\"email/confirm_register.html\"\n ) \\\n .render(Context({\"username\": token.user.username,\n \"url\": settings.SITE_URL + token.get_absolute_url()}))\n message_txt = get_template(\"email/confirm_register.txt\"\n ) \\\n .render(Context({\"username\": token.user.username,\n \"url\": settings.SITE_URL + token.get_absolute_url()}))\n msg = EmailMultiAlternatives(subject, message_txt, from_email,\n [token.user.email])\n msg.attach_alternative(message_html, \"text/html\")\n try:\n msg.send()\n except:\n msg = None\n return render_template('member/register/token_success.html', {})\n\n\ndef get_client_ip(request):\n \"\"\"Retrieve the real IP address of the client.\"\"\"\n\n if \"HTTP_X_REAL_IP\" in request.META: # nginx\n return request.META.get(\"HTTP_X_REAL_IP\")\n elif \"REMOTE_ADDR\" in request.META:\n # other\n return request.META.get(\"REMOTE_ADDR\")\n else:\n # should never happend\n return \"0.0.0.0\"\n\n\ndef date_to_chart(posts):\n lst = 24 * [0]\n for i in range(len(lst)):\n lst[i] = 7 * [0]\n for post in posts:\n t = post.pubdate.timetuple()\n lst[t.tm_hour][(t.tm_wday + 1) % 7] = lst[t.tm_hour][(t.tm_wday + 1)\n % 7] + 1\n return lst\n\n\n\n@login_required\n@require_POST\ndef add_oldtuto(request):\n id = request.POST[\"id\"]\n profile_pk = request.POST[\"profile_pk\"]\n profile = get_object_or_404(Profile, pk=profile_pk)\n if profile.sdz_tutorial:\n olds = profile.sdz_tutorial.strip().split(\":\")\n else:\n olds = []\n last = str(id)\n for old in olds:\n last += \":{0}\".format(old)\n profile.sdz_tutorial = last\n profile.save()\n messages.success(request,\n u'Le tutoriel a bien été lié au '\n u'membre {0}'.format(profile.user.username))\n return redirect(reverse(\"zds.member.views.details\",\n args=[profile.user.username]))\n\n\n\n@login_required\ndef remove_oldtuto(request):\n if \"id\" in request.GET:\n id = request.GET[\"id\"]\n else:\n raise Http404\n if \"profile\" in request.GET:\n profile_pk = request.GET[\"profile\"]\n else:\n raise Http404\n profile = get_object_or_404(Profile, pk=profile_pk)\n if profile.sdz_tutorial \\\n or not request.user.has_perm(\"member.change_profile\"):\n olds = profile.sdz_tutorial.strip().split(\":\")\n olds.remove(str(id))\n else:\n raise PermissionDenied\n last = \"\"\n for i in range(len(olds)):\n if i > 0:\n last += \":\"\n last += \"{0}\".format(str(olds[i]))\n profile.sdz_tutorial = last\n profile.save()\n\n messages.success(request,\n u'Le tutoriel a bien été retiré '\n u'au membre {0}'.format(profile.user.username))\n return redirect(reverse(\"zds.member.views.details\",\n args=[profile.user.username]))\n", "path": "zds/member/views.py" } ]
[ { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom datetime import datetime, timedelta\nimport os\nimport uuid\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User, SiteProfileNotAvailable\nfrom django.core.context_processors import csrf\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.core.urlresolvers import reverse\nfrom django.db import transaction\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import redirect, get_object_or_404, render_to_response\nfrom django.template import Context, RequestContext\nfrom django.template.loader import get_template\nfrom django.views.decorators.http import require_POST\nimport json\nimport pygal\n\nfrom forms import LoginForm, MiniProfileForm, ProfileForm, RegisterForm, \\\n ChangePasswordForm, ChangeUserForm, ForgotPasswordForm, NewPasswordForm, \\\n OldTutoForm\nfrom models import Profile, TokenForgotPassword, Ban, TokenRegister, \\\n get_info_old_tuto, logout_user\nfrom zds.gallery.forms import ImageAsAvatarForm\nfrom zds.article.models import Article\nfrom zds.forum.models import Topic\nfrom zds.member.decorator import can_write_and_read_now\nfrom zds.tutorial.models import Tutorial\nfrom zds.utils import render_template\nfrom zds.utils.mps import send_mp\nfrom zds.utils.paginator import paginator_range\nfrom zds.utils.tokens import generate_token\n\n\n\ndef index(request):\n \"\"\"Displays the list of registered users.\"\"\"\n\n if request.is_ajax():\n q = request.GET.get('q', '')\n members = User.objects.filter(username__icontains=q)[:20]\n results = []\n for member in members:\n member_json = {}\n member_json['id'] = member.pk\n member_json['label'] = member.username\n member_json['value'] = member.username\n results.append(member_json)\n data = json.dumps(results)\n\n mimetype = \"application/json\"\n\n return HttpResponse(data, mimetype)\n\n else:\n members = User.objects.order_by(\"-date_joined\")\n # Paginator\n\n paginator = Paginator(members, settings.MEMBERS_PER_PAGE)\n page = request.GET.get(\"page\")\n try:\n shown_members = paginator.page(page)\n page = int(page)\n except PageNotAnInteger:\n shown_members = paginator.page(1)\n page = 1\n except EmptyPage:\n shown_members = paginator.page(paginator.num_pages)\n page = paginator.num_pages\n return render_template(\"member/index.html\", {\n \"members\": shown_members,\n \"count\": members.count(),\n \"pages\": paginator_range(page, paginator.num_pages),\n \"nb\": page,\n })\n\n\n\ndef details(request, user_name):\n \"\"\"Displays details about a profile.\"\"\"\n\n usr = get_object_or_404(User, username=user_name)\n try:\n profile = usr.profile\n bans = Ban.objects.filter(user=usr).order_by(\"-pubdate\")\n except SiteProfileNotAvailable:\n raise Http404\n\n # refresh moderation chart\n\n dot_chart = pygal.Dot(x_label_rotation=30)\n dot_chart.title = u\"Messages postés par période\"\n dot_chart.x_labels = [\n u\"Dimanche\",\n u\"Lundi\",\n u\"Mardi\",\n u\"Mercredi\",\n u\"Jeudi\",\n u\"Vendredi\",\n u\"Samedi\",\n ]\n dot_chart.show_legend = False\n dates = date_to_chart(profile.get_posts())\n for i in range(0, 24):\n dot_chart.add(str(i) + \" h\", dates[(i + 1) % 24])\n img_path = os.path.join(settings.MEDIA_ROOT, \"pygal\")\n if not os.path.isdir(img_path):\n os.makedirs(img_path, mode=0o777)\n fchart = os.path.join(img_path, \"mod-{}.svg\".format(str(usr.pk)))\n dot_chart.render_to_file(fchart)\n my_articles = Article.objects.filter(sha_public__isnull=False).order_by(\n \"-pubdate\").filter(authors__in=[usr]).all()\n my_tutorials = \\\n Tutorial.objects.filter(sha_public__isnull=False) \\\n .filter(authors__in=[usr]) \\\n .order_by(\"-pubdate\"\n ).all()\n my_topics = Topic.objects.filter(author__pk=usr.pk).order_by(\"-pubdate\"\n ).all()\n tops = []\n for top in my_topics:\n if not top.forum.can_read(request.user):\n continue\n else:\n tops.append(top)\n if len(tops) >= 5:\n break\n form = OldTutoForm(profile)\n oldtutos = []\n if profile.sdz_tutorial:\n olds = profile.sdz_tutorial.strip().split(\":\")\n else:\n olds = []\n for old in olds:\n oldtutos.append(get_info_old_tuto(old))\n return render_template(\"member/profile.html\", {\n \"usr\": usr,\n \"profile\": profile,\n \"bans\": bans,\n \"articles\": my_articles,\n \"tutorials\": my_tutorials,\n \"topics\": tops,\n \"form\": form,\n \"old_tutos\": oldtutos,\n })\n\n\n@can_write_and_read_now\n@login_required\[email protected]\ndef modify_profile(request, user_pk):\n \"\"\"Modifies sanction of a user if there is a POST request.\"\"\"\n\n profile = get_object_or_404(Profile, user__pk=user_pk)\n if request.method == \"POST\":\n ban = Ban()\n ban.moderator = request.user\n ban.user = profile.user\n ban.pubdate = datetime.now()\n if \"ls\" in request.POST:\n profile.can_write = False\n ban.type = u\"Lecture Seule\"\n ban.text = request.POST[\"ls-text\"]\n detail = (u'Vous ne pouvez plus poster dans les forums, ni dans les '\n u'commentaires d\\'articles et de tutoriels.')\n if \"ls-temp\" in request.POST:\n ban.type = u\"Lecture Seule Temporaire\"\n ban.text = request.POST[\"ls-temp-text\"]\n profile.can_write = False\n profile.end_ban_write = datetime.now() \\\n + timedelta(days=int(request.POST[\"ls-jrs\"]), hours=0,\n minutes=0, seconds=0)\n detail = (u'Vous ne pouvez plus poster dans les forums, ni dans les '\n u'commentaires d\\'articles et de tutoriels pendant {0} jours.'\n .format(request.POST[\"ls-jrs\"]))\n if \"ban-temp\" in request.POST:\n ban.type = u\"Ban Temporaire\"\n ban.text = request.POST[\"ban-temp-text\"]\n profile.can_read = False\n profile.end_ban_read = datetime.now() \\\n + timedelta(days=int(request.POST[\"ban-jrs\"]), hours=0,\n minutes=0, seconds=0)\n detail = (u'Vous ne pouvez plus vous connecter sur ZesteDeSavoir '\n u'pendant {0} jours.'.format(request.POST[\"ban-jrs\"]))\n logout_user(profile.user.username)\n\n if \"ban\" in request.POST:\n ban.type = u\"Ban définitif\"\n ban.text = request.POST[\"ban-text\"]\n profile.can_read = False\n detail = u\"vous ne pouvez plus vous connecter sur ZesteDeSavoir.\"\n logout_user(profile.user.username)\n if \"un-ls\" in request.POST:\n ban.type = u\"Autorisation d'écrire\"\n ban.text = request.POST[\"unls-text\"]\n profile.can_write = True\n detail = (u'Vous pouvez désormais poster sur les forums, dans les '\n u'commentaires d\\'articles et tutoriels.')\n if \"un-ban\" in request.POST:\n ban.type = u\"Autorisation de se connecter\"\n ban.text = request.POST[\"unban-text\"]\n profile.can_read = True\n detail = u\"vous pouvez désormais vous connecter sur le site.\"\n profile.save()\n ban.save()\n\n # send register message\n\n if \"un-ls\" in request.POST or \"un-ban\" in request.POST:\n msg = \\\n u\"\"\"Bonjour **{0}**,\n\n**Bonne Nouvelle**, la sanction qui pesait sur vous a été levée par **{1}**.\n\nCe qui signifie que {2}\n\nLe motif de votre sanction est :\n\n`{3}`\n\nCordialement, L'équipe ZesteDeSavoir.\n\n\"\"\".format(ban.user,\n ban.moderator, detail, ban.text)\n else:\n msg = \\\n u\"\"\"Bonjour **{0}**,\n\nVous avez été santionné par **{1}**.\n\nLa sanction est de type *{2}*, ce qui signifie que {3}\n\nLe motif de votre sanction est :\n\n`{4}`\n\nCordialement, L'équipe ZesteDeSavoir.\n\n\"\"\".format(ban.user,\n ban.moderator, ban.type, detail, ban.text)\n bot = get_object_or_404(User, username=settings.BOT_ACCOUNT)\n send_mp(\n bot,\n [ban.user],\n ban.type,\n \"Sanction\",\n msg,\n True,\n direct=True,\n )\n return redirect(profile.get_absolute_url())\n\n\n\n@login_required\ndef tutorials(request):\n \"\"\"Returns all tutorials of the authenticated user.\"\"\"\n\n # The type indicate what the user would like to display. We can display\n # public, draft or all user's tutorials.\n\n try:\n type = request.GET[\"type\"]\n except KeyError:\n type = None\n\n # Retrieves all tutorials of the current user.\n\n profile = request.user.profile\n if type == \"draft\":\n user_tutorials = profile.get_draft_tutos()\n elif type == \"public\":\n user_tutorials = profile.get_public_tutos()\n else:\n user_tutorials = profile.get_tutos()\n\n return render_template(\"tutorial/member/index.html\",\n {\"tutorials\": user_tutorials, \"type\": type})\n\n\n\n@login_required\ndef articles(request):\n \"\"\"Returns all articles of the authenticated user.\"\"\"\n\n # The type indicate what the user would like to display. We can display\n # public, draft or all user's articles.\n\n try:\n type = request.GET[\"type\"]\n except KeyError:\n type = None\n\n # Retrieves all articles of the current user.\n\n profile = request.user.profile\n if type == \"draft\":\n user_articles = profile.get_draft_articles()\n elif type == \"public\":\n user_articles = profile.get_public_articles()\n else:\n user_articles = profile.get_articles()\n\n return render_template(\"article/member/index.html\",\n {\"articles\": user_articles, \"type\": type})\n\n\n\n@login_required\ndef actions(request):\n \"\"\"Show avaible actions for current user, like a customized homepage.\n\n This may be very temporary.\n\n \"\"\"\n\n # TODO: Seriously improve this page, and see if cannot be merged in\n # zds.pages.views.home since it will be more coherent to give an enhanced\n # homepage for registered users\n\n return render_template(\"member/actions.html\")\n\n\n# settings for public profile\n\n@can_write_and_read_now\n@login_required\ndef settings_mini_profile(request, user_name):\n \"\"\"Minimal settings of users for staff.\"\"\"\n\n # extra information about the current user\n\n profile = Profile.objects.get(user__username=user_name)\n if request.method == \"POST\":\n form = MiniProfileForm(request.POST)\n c = {\"form\": form, \"profile\": profile}\n if form.is_valid():\n profile.biography = form.data[\"biography\"]\n profile.site = form.data[\"site\"]\n profile.avatar_url = form.data[\"avatar_url\"]\n profile.sign = form.data[\"sign\"]\n\n # Save the profile and redirect the user to the configuration space\n # with message indicate the state of the operation\n\n try:\n profile.save()\n except:\n messages.error(request, \"Une erreur est survenue.\")\n return redirect(reverse(\"zds.member.views.settings_mini_profil\"\n \"e\"))\n messages.success(request,\n \"Le profil a correctement été mis à jour.\")\n return redirect(reverse(\"zds.member.views.details\",\n args=[profile.user.username]))\n else:\n return render_to_response(\"member/settings/profile.html\", c,\n RequestContext(request))\n else:\n form = MiniProfileForm(initial={\n \"biography\": profile.biography,\n \"site\": profile.site,\n \"avatar_url\": profile.avatar_url,\n \"sign\": profile.sign,\n })\n c = {\"form\": form, \"profile\": profile}\n return render_to_response(\"member/settings/profile.html\", c,\n RequestContext(request))\n\n\n@can_write_and_read_now\n@login_required\ndef settings_profile(request):\n \"\"\"User's settings about his personal information.\"\"\"\n\n # extra information about the current user\n\n profile = request.user.profile\n if request.method == \"POST\":\n form = ProfileForm(request.POST)\n c = {\"form\": form}\n if form.is_valid():\n profile.biography = form.data[\"biography\"]\n profile.site = form.data[\"site\"]\n profile.show_email = \"show_email\" \\\n in form.cleaned_data.get(\"options\")\n profile.show_sign = \"show_sign\" in form.cleaned_data.get(\"options\")\n profile.hover_or_click = \"hover_or_click\" \\\n in form.cleaned_data.get(\"options\")\n profile.email_for_answer = \"email_for_answer\" \\\n in form.cleaned_data.get(\"options\")\n profile.avatar_url = form.data[\"avatar_url\"]\n profile.sign = form.data[\"sign\"]\n\n # Save the profile and redirect the user to the configuration space\n # with message indicate the state of the operation\n\n try:\n profile.save()\n except:\n messages.error(request, \"Une erreur est survenue.\")\n return redirect(reverse(\"zds.member.views.settings_profile\"))\n messages.success(request,\n \"Le profil a correctement été mis à jour.\")\n return redirect(reverse(\"zds.member.views.settings_profile\"))\n else:\n return render_to_response(\"member/settings/profile.html\", c,\n RequestContext(request))\n else:\n form = ProfileForm(initial={\n \"biography\": profile.biography,\n \"site\": profile.site,\n \"avatar_url\": profile.avatar_url,\n \"show_email\": profile.show_email,\n \"show_sign\": profile.show_sign,\n \"hover_or_click\": profile.hover_or_click,\n \"email_for_answer\": profile.email_for_answer,\n \"sign\": profile.sign,\n })\n c = {\"form\": form}\n return render_to_response(\"member/settings/profile.html\", c,\n RequestContext(request))\n\n\n@can_write_and_read_now\n@login_required\n@require_POST\ndef update_avatar(request):\n \"\"\"\n Update avatar from gallery.\n Specific method instead using settings_profile() to avoid to handle all required fields.\n \"\"\"\n profile = request.user.profile\n form = ImageAsAvatarForm(request.POST)\n if form.is_valid():\n profile.avatar_url = form.data[\"avatar_url\"]\n try:\n profile.save()\n except:\n messages.error(request, \"Une erreur est survenue.\")\n return redirect(reverse(\"zds.member.views.settings_profile\"))\n messages.success(request, \"L'avatar a correctement été mis à jour.\")\n\n return redirect(reverse(\"zds.member.views.settings_profile\"))\n\n\n@can_write_and_read_now\n@login_required\ndef settings_account(request):\n \"\"\"User's settings about his account.\"\"\"\n\n if request.method == \"POST\":\n form = ChangePasswordForm(request.user, request.POST)\n c = {\"form\": form}\n if form.is_valid():\n try:\n request.user.set_password(form.data[\"password_new\"])\n request.user.save()\n messages.success(request, \"Le mot de passe a bien été modifié.\"\n )\n return redirect(reverse(\"zds.member.views.settings_account\"))\n except:\n messages.error(request, \"Une erreur est survenue.\")\n return redirect(reverse(\"zds.member.views.settings_account\"))\n else:\n return render_to_response(\"member/settings/account.html\", c,\n RequestContext(request))\n else:\n form = ChangePasswordForm(request.user)\n c = {\"form\": form}\n return render_to_response(\"member/settings/account.html\", c,\n RequestContext(request))\n\n\n@can_write_and_read_now\n@login_required\ndef settings_user(request):\n \"\"\"User's settings about his email.\"\"\"\n\n profile = request.user.profile\n if request.method == \"POST\":\n form = ChangeUserForm(request.POST)\n c = {\"form\": form}\n if form.is_valid():\n email_exist = User.objects.filter(email=form.data[\"username_new\"\n ]).count()\n username_exist = \\\n User.objects.filter(username=form.data[\"username_new\"]).count()\n old = User.objects.filter(pk=request.user.pk).all()[0]\n if form.data[\"username_new\"] and username_exist > 0:\n raise Http404\n elif form.data[\"username_new\"]:\n if form.data[\"username_new\"].strip() != \"\":\n old.username = form.data[\"username_new\"]\n if form.data[\"email_new\"] and email_exist > 0:\n raise Http404\n elif form.data[\"email_new\"]:\n if form.data[\"email_new\"].strip() != \"\":\n old.email = form.data[\"email_new\"]\n old.save()\n return redirect(profile.get_absolute_url())\n else:\n return render_to_response(\"member/settings/user.html\", c,\n RequestContext(request))\n else:\n form = ChangeUserForm()\n c = {\"form\": form}\n return render_to_response(\"member/settings/user.html\", c,\n RequestContext(request))\n\n\n\ndef login_view(request):\n \"\"\"Log in user.\"\"\"\n\n csrf_tk = {}\n csrf_tk.update(csrf(request))\n error = False\n\n # Redirecting user once logged in?\n\n if \"next\" in request.GET:\n next_page = request.GET[\"next\"]\n else:\n next_page = None\n if request.method == \"POST\":\n form = LoginForm(request.POST)\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(username=username, password=password)\n if user is not None:\n profile = get_object_or_404(Profile, user=user)\n if user.is_active:\n if profile.can_read_now():\n login(request, user)\n request.session[\"get_token\"] = generate_token()\n if \"remember\" not in request.POST:\n request.session.set_expiry(0)\n\n # redirect the user if needed\n\n try:\n return redirect(next_page)\n except:\n return redirect(reverse(\"zds.pages.views.home\"))\n else:\n messages.error(request,\n \"Vous n'êtes pas autorisé à vous connecter \"\n \"sur le site, vous avez été banni par un \"\n \"modérateur\")\n else:\n messages.error(request,\n \"Vous n'avez pas encore activé votre compte, \"\n \"vous devez le faire pour pouvoir vous \"\n \"connecter sur le site. Regardez dans vos \"\n \"mails : \" + str(user.email))\n else:\n messages.error(request,\n \"Les identifiants fournis ne sont pas valides\")\n form = LoginForm()\n form.helper.form_action = reverse(\"zds.member.views.login_view\") \\\n + \"?next=\" + str(next_page)\n csrf_tk[\"error\"] = error\n csrf_tk[\"form\"] = form\n csrf_tk[\"next_page\"] = next_page\n return render_template(\"member/login.html\",\n {\"form\": form,\n \"csrf_tk\": csrf_tk,\n \"next_page\": next_page})\n\n\n@login_required\n@require_POST\ndef logout_view(request):\n \"\"\"Log out user.\"\"\"\n\n logout(request)\n request.session.clear()\n return redirect(reverse(\"zds.pages.views.home\"))\n\n\ndef register_view(request):\n \"\"\"Register a new user.\"\"\"\n\n if request.method == \"POST\":\n form = RegisterForm(request.POST)\n if form.is_valid():\n data = form.data\n user = User.objects.create_user(data[\"username\"], data[\"email\"],\n data[\"password\"])\n user.is_active = False\n user.save()\n profile = Profile(user=user, show_email=False, show_sign=True,\n hover_or_click=True, email_for_answer=False)\n profile.last_ip_address = get_client_ip(request)\n profile.save()\n user.backend = \"django.contrib.auth.backends.ModelBackend\"\n\n # Generate a valid token during one hour.\n\n uuidToken = str(uuid.uuid4())\n date_end = datetime.now() + timedelta(days=0, hours=1, minutes=0,\n seconds=0)\n token = TokenRegister(user=user, token=uuidToken,\n date_end=date_end)\n token.save()\n\n # send email\n\n subject = \"ZDS - Confirmation d'inscription\"\n from_email = \"Zeste de Savoir <{0}>\".format(settings.MAIL_NOREPLY)\n message_html = get_template(\"email/register/confirm.html\").render(Context(\n {\"username\": user.username, \"url\": settings.SITE_URL + token.get_absolute_url()}))\n message_txt = get_template(\"email/register/confirm.txt\") .render(Context(\n {\"username\": user.username, \"url\": settings.SITE_URL + token.get_absolute_url()}))\n msg = EmailMultiAlternatives(subject, message_txt, from_email,\n [user.email])\n msg.attach_alternative(message_html, \"text/html\")\n try:\n msg.send()\n except:\n msg = None\n return render_template(\"member/register/success.html\", {})\n else:\n return render_template(\"member/register/index.html\", {\"form\": form})\n form = RegisterForm()\n return render_template(\"member/register/index.html\", {\"form\": form})\n\n\n\ndef forgot_password(request):\n \"\"\"If the user forgot his password, he can have a new one.\"\"\"\n\n if request.method == \"POST\":\n form = ForgotPasswordForm(request.POST)\n if form.is_valid():\n data = form.data\n username = data[\"username\"]\n usr = get_object_or_404(User, username=username)\n\n # Generate a valid token during one hour.\n\n uuidToken = str(uuid.uuid4())\n date_end = datetime.now() + timedelta(days=0, hours=1, minutes=0,\n seconds=0)\n token = TokenForgotPassword(user=usr, token=uuidToken,\n date_end=date_end)\n token.save()\n\n # send email\n\n subject = \"ZDS - Mot de passe oublié\"\n from_email = \"ZesteDeSavoir <{0}>\".format(settings.MAIL_NOREPLY)\n message_html = get_template(\"email/forgot_password/confirm.html\").render(Context(\n {\"username\": usr.username, \"url\": settings.SITE_URL + token.get_absolute_url()}))\n message_txt = get_template(\"email/forgot_password/confirm.txt\") .render(Context(\n {\"username\": usr.username, \"url\": settings.SITE_URL + token.get_absolute_url()}))\n msg = EmailMultiAlternatives(subject, message_txt, from_email,\n [usr.email])\n msg.attach_alternative(message_html, \"text/html\")\n msg.send()\n return render_template(\"member/forgot_password/success.html\")\n else:\n return render_template(\"member/forgot_password/index.html\",\n {\"form\": form})\n form = ForgotPasswordForm()\n return render_template(\"member/forgot_password/index.html\", {\"form\": form})\n\n\n\ndef new_password(request):\n \"\"\"Create a new password for a user.\"\"\"\n\n try:\n token = request.GET[\"token\"]\n except KeyError:\n return redirect(reverse(\"zds.pages.views.home\"))\n token = get_object_or_404(TokenForgotPassword, token=token)\n if request.method == \"POST\":\n form = NewPasswordForm(token.user.username, request.POST)\n if form.is_valid():\n data = form.data\n password = data[\"password\"]\n # User can't confirm his request if it is too late.\n\n if datetime.now() > token.date_end:\n return render_template(\"member/new_password/failed.html\")\n token.user.set_password(password)\n token.user.save()\n token.delete()\n return render_template(\"member/new_password/success.html\")\n else:\n return render_template(\"member/new_password.html\", {\"form\": form})\n form = NewPasswordForm(identifier=token.user.username)\n return render_template(\"member/new_password/index.html\", {\"form\": form})\n\n\ndef active_account(request):\n \"\"\"Active token for a user.\"\"\"\n\n try:\n token = request.GET[\"token\"]\n except KeyError:\n return redirect(reverse(\"zds.pages.views.home\"))\n token = get_object_or_404(TokenRegister, token=token)\n usr = token.user\n\n # User can't confirm his request if it is too late.\n\n if datetime.now() > token.date_end:\n return render_template(\"member/register/token_failed.html\",\n {\"token\": token})\n usr.is_active = True\n usr.save()\n\n # send register message\n\n bot = get_object_or_404(User, username=settings.BOT_ACCOUNT)\n msg = (\n u'Bonjour **{0}**,'\n u'\\n\\n'\n u'Ton compte a été activé, et tu es donc officiellement '\n u'membre de la communauté de ZesteDeSavoir.'\n u'\\n\\n'\n u'ZesteDeSavoir est une communauté dont le but est de diffuser des '\n u'connaissances au plus grand nombre.'\n u'\\n\\n'\n u'Sur ce site, tu trouveras un ensemble de [tutoriels]({1}) dans '\n u'plusieurs domaines et plus particulièrement autour de l\\'informatique '\n u'et des sciences. Tu y retrouveras aussi des [articles]({2}) '\n u'traitant de sujets d\\'actualités ou non, qui, tout comme les tutoriels, s'\n u'ont écrits par des [membres]({3}) de la communauté. '\n u'Pendant tes lectures et ton apprentissage, si jamais tu as des '\n u'questions à poser, tu retrouveras sur les [forums]({4}) des personnes '\n u'prêtes à te filer un coup de main et ainsi t\\'éviter de passer '\n u'plusieurs heures sur un problème.'\n u'\\n\\n'\n u'L\\'ensemble du contenu disponible sur le site est et sera toujours gratuit, '\n u'car la communauté de ZesteDeSavoir est attachée aux valeurs du libre '\n u'partage et désire apporter le savoir à tout le monde quelques soit ses moyens.'\n u'\\n\\n'\n u'En espérant que tu te plaira ici, '\n u'je te laisse maintenant faire le tour'\n .format(usr.username,\n settings.SITE_URL + reverse(\"zds.tutorial.views.index\"),\n settings.SITE_URL + reverse(\"zds.article.views.index\"),\n settings.SITE_URL + reverse(\"zds.member.views.index\"),\n settings.SITE_URL + reverse(\"zds.forum.views.index\")))\n send_mp(\n bot,\n [usr],\n u\"Bienvenue sur ZesteDeSavoir\",\n u\"Le manuel du nouveau membre\",\n msg,\n True,\n True,\n False,\n )\n return render_template(\"member/register/token_success.html\", {\"usr\": usr})\n token.delete()\n\n\ndef generate_token_account(request):\n \"\"\"Generate token for account.\"\"\"\n\n try:\n token = request.GET[\"token\"]\n except KeyError:\n return redirect(reverse(\"zds.pages.views.home\"))\n token = get_object_or_404(TokenRegister, token=token)\n\n # push date\n\n date_end = datetime.now() + timedelta(days=0, hours=1, minutes=0,\n seconds=0)\n token.date_end = date_end\n token.save()\n\n # send email\n\n subject = \"ZDS - Confirmation d'inscription\"\n from_email = \"ZesteDeSavoir <{0}>\".format(settings.MAIL_NOREPLY)\n message_html = get_template(\"email/confirm_register.html\"\n ) \\\n .render(Context({\"username\": token.user.username,\n \"url\": settings.SITE_URL + token.get_absolute_url()}))\n message_txt = get_template(\"email/confirm_register.txt\"\n ) \\\n .render(Context({\"username\": token.user.username,\n \"url\": settings.SITE_URL + token.get_absolute_url()}))\n msg = EmailMultiAlternatives(subject, message_txt, from_email,\n [token.user.email])\n msg.attach_alternative(message_html, \"text/html\")\n try:\n msg.send()\n except:\n msg = None\n return render_template('member/register/token_success.html', {})\n\n\ndef get_client_ip(request):\n \"\"\"Retrieve the real IP address of the client.\"\"\"\n\n if \"HTTP_X_REAL_IP\" in request.META: # nginx\n return request.META.get(\"HTTP_X_REAL_IP\")\n elif \"REMOTE_ADDR\" in request.META:\n # other\n return request.META.get(\"REMOTE_ADDR\")\n else:\n # should never happend\n return \"0.0.0.0\"\n\n\ndef date_to_chart(posts):\n lst = 24 * [0]\n for i in range(len(lst)):\n lst[i] = 7 * [0]\n for post in posts:\n t = post.pubdate.timetuple()\n lst[t.tm_hour][(t.tm_wday + 1) % 7] = lst[t.tm_hour][(t.tm_wday + 1)\n % 7] + 1\n return lst\n\n\n\n@login_required\n@require_POST\ndef add_oldtuto(request):\n id = request.POST[\"id\"]\n profile_pk = request.POST[\"profile_pk\"]\n profile = get_object_or_404(Profile, pk=profile_pk)\n if profile.sdz_tutorial:\n olds = profile.sdz_tutorial.strip().split(\":\")\n else:\n olds = []\n last = str(id)\n for old in olds:\n last += \":{0}\".format(old)\n profile.sdz_tutorial = last\n profile.save()\n messages.success(request,\n u'Le tutoriel a bien été lié au '\n u'membre {0}'.format(profile.user.username))\n return redirect(reverse(\"zds.member.views.details\",\n args=[profile.user.username]))\n\n\n\n@login_required\ndef remove_oldtuto(request):\n if \"id\" in request.GET:\n id = request.GET[\"id\"]\n else:\n raise Http404\n if \"profile\" in request.GET:\n profile_pk = request.GET[\"profile\"]\n else:\n raise Http404\n profile = get_object_or_404(Profile, pk=profile_pk)\n if profile.sdz_tutorial \\\n or not request.user.has_perm(\"member.change_profile\"):\n olds = profile.sdz_tutorial.strip().split(\":\")\n olds.remove(str(id))\n else:\n raise PermissionDenied\n last = \"\"\n for i in range(len(olds)):\n if i > 0:\n last += \":\"\n last += \"{0}\".format(str(olds[i]))\n profile.sdz_tutorial = last\n profile.save()\n\n messages.success(request,\n u'Le tutoriel a bien été retiré '\n u'au membre {0}'.format(profile.user.username))\n return redirect(reverse(\"zds.member.views.details\",\n args=[profile.user.username]))\n", "path": "zds/member/views.py" } ]
diff --git a/zds/member/views.py b/zds/member/views.py index 532ac726ae..b1f1502b79 100644 --- a/zds/member/views.py +++ b/zds/member/views.py @@ -55,6 +55,8 @@ def index(request): results.append(member_json) data = json.dumps(results) + mimetype = "application/json" + return HttpResponse(data, mimetype) else:
pytorch__TensorRT-74
Create some sort of serialization / deserialization functionality With INT8 about to land, would be a pain to have to calibrate from scratch every time. There should be some mechanism to save and load modules with the TRT engine included.
[ { "content": "import os\nimport sys\n\nif sys.version_info < (3,):\n raise Exception(\"Python 2 has reached end-of-life and is not supported by TRTorch\")\n\nimport ctypes\nimport torch\n\nfrom trtorch._version import __version__\nfrom trtorch._compiler import *\nfrom trtorch._types import *\nfrom trtorch import logging\n", "path": "py/trtorch/__init__.py" } ]
[ { "content": "import os\nimport sys\n\nif sys.version_info < (3,):\n raise Exception(\"Python 2 has reached end-of-life and is not supported by TRTorch\")\n\nimport ctypes\nimport torch\n\nfrom trtorch._version import __version__\nfrom trtorch._compiler import *\nfrom trtorch._types import *\nfrom trtorch import logging\n\ndef _register_with_torch():\n trtorch_dir = os.path.dirname(__file__)\n torch.ops.load_library(trtorch_dir + '/lib/trtorch.so')", "path": "py/trtorch/__init__.py" } ]
diff --git a/BUILD b/BUILD index 48f2ae5338..02829531b2 100644 --- a/BUILD +++ b/BUILD @@ -8,6 +8,8 @@ pkg_tar( "//core/conversion:include", "//core/conversion/conversionctx:include", "//core/conversion/converters:include", + "//core/conversion/var:include", + "//core/conversion/tensorcontainer:include", "//core/conversion/evaluators:include", "//core/execution:include", "//core/lowering:include", @@ -35,6 +37,15 @@ pkg_tar( ) +pkg_tar( + name = "bin", + package_dir = "bin/", + srcs = [ + "//cpp/trtorchc:trtorchc", + ], + mode = "0755", +) + pkg_tar( @@ -46,6 +57,7 @@ pkg_tar( ], deps = [ ":lib", + ":bin", ":include", ":include_core", ], diff --git a/README.md b/README.md index befe86e8fe..60cfe55e94 100644 --- a/README.md +++ b/README.md @@ -23,6 +23,8 @@ compile_settings.op_precision = torch::kFloat; auto trt_mod = trtorch::CompileGraph(ts_mod, compile_settings); // Run like normal auto results = trt_mod.forward({in_tensor}); +// Save module for later +trt_mod.save("trt_torchscript_module.ts"); ... ``` @@ -46,6 +48,7 @@ trt_ts_module = trtorch.compile(torch_script_module, compile_settings) input_data = input_data.half() result = trt_ts_module(input_data) +torch.jit.save(trt_ts_module, "trt_torchscript_module.ts") ``` > Notes on running in lower precisions: diff --git a/core/compiler.cpp b/core/compiler.cpp index 2f94ba8ead..be0dc895d8 100644 --- a/core/compiler.cpp +++ b/core/compiler.cpp @@ -6,7 +6,9 @@ #include "NvInfer.h" #include "ATen/core/function_schema.h" +#include "ATen/core/jit_type.h" +#include "torch/custom_class.h" #include "torch/csrc/jit/frontend/function_schema_parser.h" #include "torch/csrc/jit/ir/ir.h" #include "torch/csrc/jit/passes/pass_manager.h" @@ -40,32 +42,70 @@ c10::FunctionSchema GenerateGraphSchema(torch::jit::script::Module mod, std::str void AddEngineToGraph(torch::jit::script::Module mod, std::shared_ptr<torch::jit::Graph>& g, std::string& serialized_engine) { - execution::EngineID uid = execution::RegisterEngineFromSerializedEngine(serialized_engine); - auto num_io = execution::GetEngineIO(uid); - - auto self = g->addInput("self.1"); + auto engine = execution::TRTEngine(mod._ivalue()->name(), serialized_engine); + // Get required metadata about the engine out + auto num_io = engine.num_io; + auto name = engine.name; + + // Add the engine as an attribute of the module, this will let the engine be serialized and deserialized + auto engine_ptr = c10::make_intrusive<execution::TRTEngine>(engine); + mod.register_attribute( + name, + c10::getCustomClassType<c10::intrusive_ptr<execution::TRTEngine>>(), + c10::IValue(std::move(engine_ptr)), + false + ); + + // Add the module as an input into the graph + auto self = g->addInput("self_1"); self->setType(mod.type()); - auto id_val = g->insertConstant(uid); + // Start by retriveing the engine from the module attribute list + auto engine_node = g->createGetAttr(self, name); + g->block()->appendNode(engine_node); + // Add inputs to the graph corresponding to the number of input tensors expected by the engine + // Also store those inputs in a vector so that they can be coalesced into a single list at runtime std::vector<torch::jit::Value*> engine_inputs; - engine_inputs.push_back(id_val); - for (uint64_t i = 0; i < num_io.first; i++) { - auto in_val = g->addInput(""); + auto in_val = g->addInput(std::string("input_") + std::to_string(i)); in_val->setType(c10::TensorType::get()); engine_inputs.push_back(in_val); } - auto engine_node = g->create(c10::Symbol::fromQualString("trt::execute_engine"), torch::jit::ArrayRef<torch::jit::Value*>(engine_inputs), num_io.second); - g->block()->appendNode(engine_node); - - if (engine_node->outputs().size() > 1) { - auto return_tuple_node = g->createTuple(engine_node->outputs()); + // Create a node that will merge all of the input tensors into a single list argument to the trt::execute_engine op + // Creates: prim::ListConstruct(<input tensors>) + auto input_list_node = g->createList(c10::TensorType::get(), torch::jit::ArrayRef<torch::jit::Value*>(engine_inputs)); + g->block()->appendNode(input_list_node); + + // Make a list of inputs to the actual trt::execute_engine op + // Note: Ordering of list and then engine is because we can pop off the engine first which contains all the metadata + // needed for execution + std::vector<torch::jit::Value*> execute_node_inputs; + execute_node_inputs.push_back(input_list_node->outputs()[0]); + execute_node_inputs.push_back(engine_node->outputs()[0]); + + // Create the actual execution node trt::execute_engine using the assembled inputs + auto execute_node = g->create(c10::Symbol::fromQualString("trt::execute_engine"), torch::jit::ArrayRef<torch::jit::Value*>(execute_node_inputs), 1); + g->block()->appendNode(execute_node); + execute_node->outputs()[0]->setType(c10::ListType::ofTensors()); + + // Create a node to unpack the list into seperate tensors, in the case of there being only one tensor, the tensor will be returned, + // otherwise they are returned as a tuple of tensors. + // Creates: prim::ListUnpack(<engine output>) + auto unpack_node = g->createListUnpack(execute_node->outputs()[0], num_io.second); + g->block()->appendNode(unpack_node); + + // If there are multiple output tensors from TensorRT we wrap them in a tuple to return + if (unpack_node->outputs().size() > 1) { + // Creates prim::TupleConstruct(<output tensors>) using outputs of the unpack node + auto return_tuple_node = g->createTuple(unpack_node->outputs()); g->block()->appendNode(return_tuple_node); + // Set the output as the produced tuple g->registerOutput(return_tuple_node->outputs()[0]); } else { - g->registerOutput(engine_node->outputs()[0]); + // Set the output as the sole output tensor + g->registerOutput(unpack_node->outputs()[0]); } LOG_DEBUG(*g << "(AddEngineToGraph)\n"); diff --git a/core/conversion/InterfaceTypes.cpp b/core/conversion/InterfaceTypes.cpp index ac90085583..3ec3d93178 100644 --- a/core/conversion/InterfaceTypes.cpp +++ b/core/conversion/InterfaceTypes.cpp @@ -34,7 +34,7 @@ InputRange::InputRange(std::vector<int64_t> d) { min = util::toDims(d); max = util::toDims(d); input_shape = util::toDims(d); - + input_is_dynamic = false; } @@ -67,6 +67,7 @@ InputRange::InputRange(std::vector<int64_t> min_shape, std::vector<int64_t> opt_ dim.insert(max_shape[i]); if (dim.size() != 1) { dyn_shape.push_back(-1); + input_is_dynamic = true; } else { dyn_shape.push_back(opt_shape[i]); } diff --git a/core/conversion/conversion.cpp b/core/conversion/conversion.cpp index 911e58e039..fc4e75ca88 100644 --- a/core/conversion/conversion.cpp +++ b/core/conversion/conversion.cpp @@ -155,6 +155,10 @@ void AddInputs(ConversionCtx* ctx, profile->setDimensions(trt_in->getName(), nvinfer1::OptProfileSelector::kOPT, dims.opt); profile->setDimensions(trt_in->getName(), nvinfer1::OptProfileSelector::kMAX, dims.max); + if (dims.input_is_dynamic) { + ctx->input_is_dynamic = true; + } + ctx->value_tensor_map[in] = trt_in; } diff --git a/core/conversion/conversion.h b/core/conversion/conversion.h index 529d04f6b6..1c7a790025 100644 --- a/core/conversion/conversion.h +++ b/core/conversion/conversion.h @@ -15,6 +15,7 @@ struct InputRange { nvinfer1::Dims max; nvinfer1::Dims opt; nvinfer1::Dims input_shape; + bool input_is_dynamic = false; // Should we restrict to unsigned? InputRange(std::vector<int64_t> d); InputRange(std::vector<int64_t> min_shape, diff --git a/core/conversion/conversionctx/ConversionCtx.h b/core/conversion/conversionctx/ConversionCtx.h index 76653037a9..abd49cf22e 100644 --- a/core/conversion/conversionctx/ConversionCtx.h +++ b/core/conversion/conversionctx/ConversionCtx.h @@ -42,6 +42,7 @@ struct ConversionCtx { ~ConversionCtx(); + bool input_is_dynamic = false; nvinfer1::IBuilder* builder; nvinfer1::INetworkDefinition* net; nvinfer1::IBuilderConfig* cfg; diff --git a/core/conversion/converters/impl/batch_norm.cpp b/core/conversion/converters/impl/batch_norm.cpp index bd923310a0..a7b6045737 100644 --- a/core/conversion/converters/impl/batch_norm.cpp +++ b/core/conversion/converters/impl/batch_norm.cpp @@ -19,12 +19,24 @@ auto batch_norm_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() auto orig_shape = input->getDimensions(); auto shape = util::toVec(orig_shape); auto options = torch::TensorOptions().dtype(torch::kFloat32); - auto gamma = args[1].unwrapToTensor(at::full({shape}, 1, {options})); - auto beta = args[2].unwrapToTensor(at::full({shape}, 1, {options})); - auto mean = args[3].unwrapToTensor(at::full({shape}, 0, {options})); - auto var = args[4].unwrapToTensor(at::full({shape}, 0, {options})); + + torch::Tensor gamma, beta, mean, var; + + if (ctx->input_is_dynamic) { + gamma = args[1].unwrapToTensor(); + beta = args[2].unwrapToTensor(); + mean = args[3].unwrapToTensor(); + var = args[4].unwrapToTensor(); + } else { + gamma = args[1].unwrapToTensor(at::full({shape}, 1, {options})); + beta = args[2].unwrapToTensor(at::full({shape}, 1, {options})); + mean = args[3].unwrapToTensor(at::full({shape}, 0, {options})); + var = args[4].unwrapToTensor(at::full({shape}, 0, {options})); + } + auto eps = args[7].unwrapToDouble(1e-5f); + LOG_DEBUG("momentum disregarded"); LOG_DEBUG("training disregarded"); LOG_DEBUG("cudnn disregarded"); diff --git a/core/conversion/converters/impl/concat.cpp b/core/conversion/converters/impl/concat.cpp index da3853291c..2063d8921f 100644 --- a/core/conversion/converters/impl/concat.cpp +++ b/core/conversion/converters/impl/concat.cpp @@ -8,7 +8,7 @@ namespace conversion { namespace converters { namespace impl { namespace { -auto cat_registrations = RegisterNodeConversionPatterns() +auto cat_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ "aten::cat(Tensor[] tensors, int dim=0) -> Tensor", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { diff --git a/core/conversion/converters/impl/constant.cpp b/core/conversion/converters/impl/constant.cpp index 432eb6bf85..1c23cb6a8b 100644 --- a/core/conversion/converters/impl/constant.cpp +++ b/core/conversion/converters/impl/constant.cpp @@ -7,7 +7,7 @@ namespace conversion { namespace converters { namespace impl { namespace { -auto constant_registrations = RegisterNodeConversionPatterns() +auto constant_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ "trt::const(Tensor self) -> Tensor", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { diff --git a/core/conversion/converters/impl/conv_deconv.cpp b/core/conversion/converters/impl/conv_deconv.cpp index 37cf3ff3ad..3388a26741 100644 --- a/core/conversion/converters/impl/conv_deconv.cpp +++ b/core/conversion/converters/impl/conv_deconv.cpp @@ -9,7 +9,7 @@ namespace conversion { namespace converters { namespace impl { namespace { -auto conv_registrations = RegisterNodeConversionPatterns() +auto conv_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ R"SIG(aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, diff --git a/core/conversion/converters/impl/element_wise.cpp b/core/conversion/converters/impl/element_wise.cpp index 375e7a2d8f..4cb2e03a19 100644 --- a/core/conversion/converters/impl/element_wise.cpp +++ b/core/conversion/converters/impl/element_wise.cpp @@ -68,7 +68,7 @@ nvinfer1::ILayer* add_elementwise(ConversionCtx* ctx, nvinfer1::ElementWiseOpera } -auto element_wise_registrations = RegisterNodeConversionPatterns() +auto element_wise_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ "aten::add.Tensor(Tensor self, Tensor other, Scalar alpha=1) -> Tensor", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { diff --git a/core/conversion/converters/impl/linear.cpp b/core/conversion/converters/impl/linear.cpp index f4c49ec020..e22664afe0 100644 --- a/core/conversion/converters/impl/linear.cpp +++ b/core/conversion/converters/impl/linear.cpp @@ -8,7 +8,7 @@ namespace converters { namespace impl { namespace { -auto linear_registrations = RegisterNodeConversionPatterns() +auto linear_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ "aten::linear(Tensor input, Tensor weight, Tensor? bias = None) -> (Tensor)", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { diff --git a/core/conversion/converters/impl/matrix_multiply.cpp b/core/conversion/converters/impl/matrix_multiply.cpp index c6d2d99f1e..cbebdc13b2 100644 --- a/core/conversion/converters/impl/matrix_multiply.cpp +++ b/core/conversion/converters/impl/matrix_multiply.cpp @@ -8,7 +8,7 @@ namespace converters { namespace impl { namespace { -auto mm_registrations = RegisterNodeConversionPatterns() +auto mm_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ "aten::matmul(Tensor self, Tensor other) -> (Tensor)", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { diff --git a/core/conversion/converters/impl/pooling.cpp b/core/conversion/converters/impl/pooling.cpp index 04472ce5fc..e18c78c1ed 100644 --- a/core/conversion/converters/impl/pooling.cpp +++ b/core/conversion/converters/impl/pooling.cpp @@ -8,7 +8,7 @@ namespace converters { namespace impl { namespace { -auto pooling_registrations = RegisterNodeConversionPatterns() +auto pooling_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ "aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=[0, 0], int[2] dilation=[1, 1], bool ceil_mode=False) -> (Tensor)", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { diff --git a/core/conversion/converters/impl/reduce.cpp b/core/conversion/converters/impl/reduce.cpp index 0127f83285..16e0d9dd83 100644 --- a/core/conversion/converters/impl/reduce.cpp +++ b/core/conversion/converters/impl/reduce.cpp @@ -11,7 +11,7 @@ namespace { -auto reduce_registrations = RegisterNodeConversionPatterns() +auto reduce_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ "aten::mean(Tensor self, *, ScalarType? dtype=None) -> (Tensor)", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { diff --git a/core/conversion/converters/impl/shape.cpp b/core/conversion/converters/impl/shape.cpp index d5b3577a34..613ce43fe9 100644 --- a/core/conversion/converters/impl/shape.cpp +++ b/core/conversion/converters/impl/shape.cpp @@ -9,7 +9,7 @@ namespace converters { namespace impl { namespace { -static auto shape_registrations = RegisterNodeConversionPatterns() +static auto shape_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ // To use in static input size cases (explicit batch) "aten::size.int(Tensor self, int dim) -> (Tensor)", diff --git a/core/conversion/converters/impl/shuffle.cpp b/core/conversion/converters/impl/shuffle.cpp index ceda35a5d9..951635a8fc 100644 --- a/core/conversion/converters/impl/shuffle.cpp +++ b/core/conversion/converters/impl/shuffle.cpp @@ -9,7 +9,7 @@ namespace converters { namespace impl { namespace { -static auto shuffle_registrations = RegisterNodeConversionPatterns() +static auto shuffle_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ "aten::flatten.using_ints(Tensor self, int start_dim=0, int end_dim=-1) -> (Tensor)", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { @@ -50,12 +50,10 @@ static auto shuffle_registrations = RegisterNodeConversionPatterns() [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { auto in = args[0].ITensor(); auto in_shape = util::toVec(in->getDimensions()); - auto ex_tensor = torch::rand(in_shape); - auto new_shape = ex_tensor.view(args[1].unwrapToIntList().vec()).sizes(); auto shuffle = ctx->net->addShuffle(*in); TRTORCH_CHECK(shuffle, "Unable to create shuffle layer from node: " << *n); - shuffle->setReshapeDimensions(util::toDims(new_shape)); + shuffle->setReshapeDimensions(util::toDims(args[1].unwrapToIntList().vec())); shuffle->setName(util::node_info(n).c_str()); auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], shuffle->getOutput(0)); diff --git a/core/conversion/converters/impl/softmax.cpp b/core/conversion/converters/impl/softmax.cpp index 35f6f04ef1..6a81b974a2 100644 --- a/core/conversion/converters/impl/softmax.cpp +++ b/core/conversion/converters/impl/softmax.cpp @@ -7,7 +7,7 @@ namespace converters { namespace impl { namespace { -static auto softmax_registrations = RegisterNodeConversionPatterns() +static auto softmax_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns() .pattern({ "aten::softmax.int(Tensor self, int dim, int? dtype=None) -> (Tensor)", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { diff --git a/core/conversion/tensorcontainer/TensorContainer.cpp b/core/conversion/tensorcontainer/TensorContainer.cpp index 536d578eae..6fad66335d 100644 --- a/core/conversion/tensorcontainer/TensorContainer.cpp +++ b/core/conversion/tensorcontainer/TensorContainer.cpp @@ -6,7 +6,7 @@ namespace conversion { namespace { static auto tensor_container = - torch::class_<TensorContainer>("_eval_ivalue_types", "TensorContainer") + torch::class_<TensorContainer>("_trtorch_eval_ivalue_types", "TensorContainer") .def(torch::init<>()); } // namespace } // conversion diff --git a/core/conversion/var/BUILD b/core/conversion/var/BUILD index e1c92efb12..247f939e48 100644 --- a/core/conversion/var/BUILD +++ b/core/conversion/var/BUILD @@ -30,7 +30,7 @@ load("@rules_pkg//:pkg.bzl", "pkg_tar") pkg_tar( name = "include", - package_dir = "core/conversion/arg/", + package_dir = "core/conversion/var/", srcs = [ "Var.h", "Var_inl.h" diff --git a/core/execution/BUILD b/core/execution/BUILD index 009092d3e6..1741249624 100644 --- a/core/execution/BUILD +++ b/core/execution/BUILD @@ -14,7 +14,6 @@ cc_library( ], srcs = [ "TRTEngine.cpp", - "TRTEngineManager.cpp", "register_trt_op.cpp", ], deps = [ diff --git a/core/execution/TRTEngine.cpp b/core/execution/TRTEngine.cpp index 3370ea6f5b..3d4dbc8033 100644 --- a/core/execution/TRTEngine.cpp +++ b/core/execution/TRTEngine.cpp @@ -10,12 +10,32 @@ namespace trtorch { namespace core { namespace execution { -TRTEngine::TRTEngine(nvinfer1::ILogger& logger, std::string& serialized_engine) { +std::string slugify(std::string s) { + std::replace(s.begin(), s.end(), '.', '_'); + return s; +} + +TRTEngine::TRTEngine(std::string serialized_engine) + : logger(std::string("[] - "), + util::logging::get_logger().get_reportable_severity(), + util::logging::get_logger().get_is_colored_output_on()) { + std::string _name = "deserialized_trt"; + new (this) TRTEngine(_name, serialized_engine); +} + +TRTEngine::TRTEngine(std::string mod_name, std::string serialized_engine) + : logger(std::string("[") + mod_name + std::string("_engine] - "), + util::logging::get_logger().get_reportable_severity(), + util::logging::get_logger().get_is_colored_output_on()) { + rt = nvinfer1::createInferRuntime(logger); + name = slugify(mod_name) + "_engine"; + cuda_engine = rt->deserializeCudaEngine(serialized_engine.c_str(), serialized_engine.size()); // Easy way to get a unique name for each engine, maybe there is a more descriptive way (using something associated with the graph maybe) id = reinterpret_cast<EngineID>(cuda_engine); + exec_ctx = cuda_engine->createExecutionContext(); uint64_t inputs = 0; @@ -40,7 +60,28 @@ TRTEngine& TRTEngine::operator=(const TRTEngine& other) { return (*this); } +// TODO: Implement a call method +// c10::List<at::Tensor> TRTEngine::Run(c10::List<at::Tensor> inputs) { +// auto input_vec = inputs.vec(); +// auto output_vec = RunCudaEngine(exec_ctx, num_io, input_vec); +// +// return c10::List<at::Tensor>(output_vec); +// } + +static auto TRTORCH_UNUSED TRTEngineTSRegistrtion = torch::class_<TRTEngine>("tensorrt", "Engine") + .def(torch::init<std::string>()) + // TODO: .def("__call__", &TRTEngine::Run) + // TODO: .def("run", &TRTEngine::Run) + .def_pickle( + [](const c10::intrusive_ptr<TRTEngine>& self) -> std::string { + auto serialized_engine = self->cuda_engine->serialize(); + return std::string((const char*)serialized_engine->data(), serialized_engine->size()); + }, + [](std::string seralized_engine) -> c10::intrusive_ptr<TRTEngine> { + return c10::make_intrusive<TRTEngine>(std::move(seralized_engine)); + } + ); + } // namespace execution } // namespace core } // namespace trtorch - diff --git a/core/execution/TRTEngineManager.cpp b/core/execution/TRTEngineManager.cpp deleted file mode 100644 index 27a6aeff28..0000000000 --- a/core/execution/TRTEngineManager.cpp +++ /dev/null @@ -1,82 +0,0 @@ -#include "core/util/prelude.h" -#include "core/execution/execution.h" - -namespace trtorch { -namespace core { -namespace execution { -namespace { -class TRTEngineManager { -public: - TRTEngineManager() - : logger_("[TRTorch Execution Manager] - ", - util::logging::get_logger().get_reportable_severity(), - util::logging::get_logger().get_is_colored_output_on()) { - } - - TRTEngine* get_engine(EngineID uid) { - auto iter = engine_registry_.find(uid); - - TRTORCH_ASSERT(iter != engine_registry_.end(), "Unabled to find requested engine (ID: " << uid << ") in TensorRT Execution Manager"); - - return &(iter->second); - } - - // TODO: Should we have standing engines ready to run or should we be creating execution contexts JIT? - EngineID register_engine(std::string& serialized_engine) { - auto engine = TRTEngine(logger_, serialized_engine); - EngineID uid = engine.id; - engine_registry_[uid] = std::move(engine); - LOG_DEBUG(logger_, "Registering new engine (ID: " << std::hex << uid << ") in TensorRT Execution Manager"); - return uid; - } - - void deregister_engine(EngineID uid) { - auto iter = engine_registry_.find(uid); - TRTORCH_ASSERT(iter != engine_registry_.end(), "Unabled to find requested engine (ID: " << uid << ") in TensorRT Execution Manager"); - - auto engine = iter->second; - // Doing this here since for some reason the destructor causes segfaults - engine.exec_ctx->destroy(); - engine.cuda_engine->destroy(); - engine_registry_.erase(uid); - } - -private: - util::logging::TRTorchLogger logger_; - std::unordered_map<EngineID, TRTEngine> engine_registry_; -}; - -TRTEngineManager& get_engine_manager() { - static TRTEngineManager engine_man; - return engine_man; -} -} // namespace - -uint64_t RegisterEngineFromSerializedEngine(std::string& serialized_engine) { - return get_engine_manager().register_engine(serialized_engine); -} - -nvinfer1::ICudaEngine* GetCudaEngine(EngineID id) { - // Assuming exception will be thrown inside the manager if there is no corresponding engine - return get_engine_manager().get_engine(id)->cuda_engine; -} - -nvinfer1::IExecutionContext* GetExecCtx(EngineID id) { - // Assuming exception will be thrown inside the manager if there is no corresponding engine - return get_engine_manager().get_engine(id)->exec_ctx; -} - -std::pair<uint64_t, uint64_t> GetEngineIO(EngineID id) { - // Assuming exception will be thrown inside the manager if there is no corresponding engine - return get_engine_manager().get_engine(id)->num_io; -} - -void DeregisterEngine(EngineID id) { - get_engine_manager().deregister_engine(id); -} - -} // namespace execution -} // namespace core -} // namespace trtorch - - diff --git a/core/execution/execution.h b/core/execution/execution.h index 8c50dd4207..9b0ca41cb4 100644 --- a/core/execution/execution.h +++ b/core/execution/execution.h @@ -2,6 +2,9 @@ #include <utility> #include "NvInfer.h" #include "ATen/core/function_schema.h" +#include "torch/custom_class.h" +#include "core/util/prelude.h" + namespace trtorch { namespace core { @@ -9,25 +12,25 @@ namespace execution { using EngineID = int64_t; -struct TRTEngine { +struct TRTEngine : torch::CustomClassHolder { // Each engine needs it's own runtime object nvinfer1::IRuntime* rt; nvinfer1::ICudaEngine* cuda_engine; nvinfer1::IExecutionContext* exec_ctx; std::pair<uint64_t, uint64_t> num_io; EngineID id; + std::string name; + util::logging::TRTorchLogger logger; TRTEngine() = default; - TRTEngine(nvinfer1::ILogger& logger, std::string& serialized_engine); + TRTEngine(std::string serialized_engine); + TRTEngine(std::string mod_name, std::string serialized_engine); TRTEngine& operator=(const TRTEngine& other); + // TODO: Implement a call method + //c10::List<at::Tensor> Run(c10::List<at::Tensor> inputs); }; -void RegisterEngineOp(TRTEngine& engine); -uint64_t RegisterEngineFromSerializedEngine(std::string& serialized_engine); -nvinfer1::ICudaEngine* GetCudaEngine(EngineID id); -nvinfer1::IExecutionContext* GetExecCtx(EngineID id); -std::pair<uint64_t, uint64_t> GetEngineIO(EngineID id); -void DeregisterEngine(EngineID id); +std::vector<at::Tensor> RunCudaEngine(nvinfer1::IExecutionContext* ctx, std::pair<uint64_t, uint64_t> io, std::vector<at::Tensor>& inputs); } // namespace execution } // namespace core diff --git a/core/execution/register_trt_op.cpp b/core/execution/register_trt_op.cpp index d9f57452dc..b7c10912be 100644 --- a/core/execution/register_trt_op.cpp +++ b/core/execution/register_trt_op.cpp @@ -9,7 +9,6 @@ namespace trtorch { namespace core { namespace execution { -namespace { std::vector<at::Tensor> RunCudaEngine(nvinfer1::IExecutionContext* ctx, std::pair<uint64_t, uint64_t> io, std::vector<at::Tensor>& inputs) { std::vector<void*> gpu_handles; @@ -47,6 +46,7 @@ std::vector<at::Tensor> RunCudaEngine(nvinfer1::IExecutionContext* ctx, std::pai return outputs; } +namespace { c10::AliasAnalysisKind aliasAnalysisFromSchema() { return c10::AliasAnalysisKind::FROM_SCHEMA; } @@ -54,27 +54,19 @@ c10::AliasAnalysisKind aliasAnalysisFromSchema() { // Switched to a global operator because op implementations need to be non-capturing lambdas in PYT 1.5.0+ torch::jit::RegisterOperators jit_registry({ torch::jit::Operator( - "trt::execute_engine(int id, ...) -> ...", + "trt::execute_engine(Tensor[] inputs, __torch__.torch.classes.tensorrt.Engine engine) -> Tensor[]", [](torch::jit::Stack& stack) -> int { - size_t num_inputs = torch::jit::pop(stack).toInt(); // Verify calling convention (right to left or left to right) - std::vector<at::Tensor> inputs; - for (uint64_t i = 0; i < num_inputs - 1; i++) { - at::Tensor in; - torch::jit::pop(stack, in); - inputs.insert(inputs.begin(), std::move(in)); - } + auto engine = torch::jit::pop(stack).toCustomClass<TRTEngine>(); + LOG_DEBUG("Attempting to run engine (ID: " << std::hex << engine->name << ")"); + + auto inputs = torch::jit::pop(stack).toTensorVector(); - int64_t id = torch::jit::pop(stack).toInt(); - LOG_DEBUG("Attempting to run engine (ID: " << std::hex << id << ")"); - auto io = GetEngineIO(id); - auto num_out = io.second; + auto io = engine->num_io; - auto ctx = GetExecCtx(id); + auto ctx = engine->exec_ctx; auto outputs = RunCudaEngine(ctx, io, inputs); - for (uint64_t o = 0; o < num_out; o++) { - torch::jit::push(stack, std::move(outputs[o])); - } + torch::jit::push(stack, std::move(outputs)); return 0; }, aliasAnalysisFromSchema()) diff --git a/cpp/api/include/trtorch/ptq.h b/cpp/api/include/trtorch/ptq.h index afae26a85c..ce59395b4c 100644 --- a/cpp/api/include/trtorch/ptq.h +++ b/cpp/api/include/trtorch/ptq.h @@ -104,18 +104,17 @@ class Int8Calibrator : Algorithm { std::stringstream ss; ss << "Reading Calibration Cache from " << cache_file_path_; logging::log(logging::Level::kINFO, ss.str()); + cache_.clear(); - std::ifstream cache_file(cache_file_path_, std::ios::binary); - cache_file >> std::noskipws; - if (cache_file.good()) { - std::copy(std::istream_iterator<char>(cache_file), - std::istream_iterator<char>(), - std::back_inserter(cache_)); - ss << "Cache read"; - logging::log(logging::Level::kDEBUG, ss.str()); + std::ifstream input(cache_file_path_, std::ios::binary); + input >> std::noskipws; + if (input.good()) { + std::copy(std::istream_iterator<char>(input), std::istream_iterator<char>(), + std::back_inserter(cache_)); + logging::log(logging::Level::kDEBUG, "Cache read"); } - cache_size_ = cache_.size(); - return cache_size_ ? cache_.data() : nullptr; + length = cache_.size(); + return length ? cache_.data() : nullptr; } return nullptr; } @@ -220,23 +219,17 @@ class Int8CacheCalibrator : Algorithm { std::stringstream ss; ss << "Reading Calibration Cache from " << cache_file_path_; logging::log(logging::Level::kINFO, ss.str()); + cache_.clear(); - std::ifstream cache_file; - cache_file.open(cache_file_path_, std::ios::in | std::ios::binary); - cache_file.unsetf(std::ios::skipws); - cache_file.seekg(0, std::ios::beg); - cache_.reserve(cache_file.tellg()); - cache_file.seekg(0, std::ios::beg); - if (cache_file.good()) { - std::cout << "Trying to read cache" << std::endl; - std::copy(std::istreambuf_iterator<char>(cache_file), - std::istreambuf_iterator<char>(), - std::back_inserter(cache_)); - ss << "Cache read"; - logging::log(logging::Level::kDEBUG, ss.str()); + std::ifstream input(cache_file_path_, std::ios::binary); + input >> std::noskipws; + if (input.good()) { + std::copy(std::istream_iterator<char>(input), std::istream_iterator<char>(), + std::back_inserter(cache_)); + logging::log(logging::Level::kDEBUG, "Cache read"); } - cache_size_ = cache_.size(); - return cache_size_ ? cache_.data() : nullptr; + length = cache_.size(); + return length ? cache_.data() : nullptr; } diff --git a/cpp/api/include/trtorch/trtorch.h b/cpp/api/include/trtorch/trtorch.h index 9b3f98e355..8f26e0bd8f 100644 --- a/cpp/api/include/trtorch/trtorch.h +++ b/cpp/api/include/trtorch/trtorch.h @@ -142,6 +142,14 @@ struct TRTORCH_API ExtraInfo { * @return false */ constexpr bool operator==(DataType other) const { return value == other.value; } + /** + * @brief Comparision operator for DataType + * + * @param other + * @return true + * @return false + */ + constexpr bool operator==(DataType::Value other) const { return value == other; } /** * @brief Comparision operator for DataType * @@ -150,6 +158,14 @@ struct TRTORCH_API ExtraInfo { * @return false */ constexpr bool operator!=(DataType other) const { return value != other.value; } + /** + * @brief Comparision operator for DataType + * + * @param other + * @return true + * @return false + */ + constexpr bool operator!=(DataType::Value other) const { return value != other; } private: Value value; }; diff --git a/cpp/trtorchc/BUILD b/cpp/trtorchc/BUILD new file mode 100644 index 0000000000..7fa89836f5 --- /dev/null +++ b/cpp/trtorchc/BUILD @@ -0,0 +1,14 @@ +package(default_visibility = ["//visibility:public"]) + +cc_binary( + name = "trtorchc", + srcs = [ + "main.cpp" + ], + deps = [ + "@libtorch//:libtorch", + "@libtorch//:caffe2", + "//third_party/args", + "//cpp/api:trtorch" + ], +) diff --git a/cpp/trtorchc/README.md b/cpp/trtorchc/README.md new file mode 100644 index 0000000000..25a59efb27 --- /dev/null +++ b/cpp/trtorchc/README.md @@ -0,0 +1,87 @@ +# trtorhc + +trtorchc is a compiler CLI application using the TRTorch compiler. It serves as an easy way to compile a +TorchScript Module with TRTorch from the command-line to quickly check support or as part of +a deployment pipeline. All basic features of the compiler are supported including post training +quantization (though you must already have a calibration cache file to use). The compiler can +output two formats, either a TorchScript program with the TensorRT engine embedded or +the TensorRT engine itself as a PLAN file. + +All that is required to run the program after compilation is for C++ linking against libtrtorch.so +or in Python importing the trtorch package. All other aspects of using compiled modules are identical +to standard TorchScript. Load with `torch.jit.load()` and run like you would run any other module. + + +``` +trtorchc [input_file_path] [output_file_path] + [input_shapes...] {OPTIONS} + + TRTorch is a compiler for TorchScript, it will compile and optimize + TorchScript programs to run on NVIDIA GPUs using TensorRT + + OPTIONS: + + -h, --help Display this help menu + Verbiosity of the compiler + -v, --verbose Dumps debugging information about the + compilation process onto the console + -w, --warnings Disables warnings generated during + compilation onto the console (warnings + are on by default) + --info Dumps info messages generated during + compilation onto the console + --build-debuggable-engine Creates a debuggable engine + --use-strict-types Restrict operating type to only use set + default operation precision + (op_precision) + --allow-gpu-fallback (Only used when targeting DLA + (device-type)) Lets engine run layers on + GPU if they are not supported on DLA + -p[precision], + --default-op-precision=[precision] + Default operating precision for the + engine (Int8 requires a + calibration-cache argument) [ float | + float32 | f32 | half | float16 | f16 | + int8 | i8 ] (default: float) + -d[type], --device-type=[type] The type of device the engine should be + built for [ gpu | dla ] (default: gpu) + --engine-capability=[capability] The type of device the engine should be + built for [ default | safe_gpu | + safe_dla ] + --calibration-cache-file=[file_path] + Path to calibration cache file to use + for post training quantization + --num-min-timing-iter=[num_iters] Number of minimization timing iterations + used to select kernels + --num-avg-timing-iters=[num_iters] + Number of averaging timing iterations + used to select kernels + --workspace-size=[workspace_size] Maximum size of workspace given to + TensorRT + --max-batch-size=[max_batch_size] Maximum batch size (must be >= 1 to be + set, 0 means not set) + -t[threshold], + --threshold=[threshold] Maximum acceptable numerical deviation + from standard torchscript output + (default 2e-5) + --save-engine Instead of compiling a full a + TorchScript program, save the created + engine to the path specified as the + output path + input_file_path Path to input TorchScript file + output_file_path Path for compiled TorchScript (or + TensorRT engine) file + input_shapes... Sizes for inputs to engine, can either + be a single size or a range defined by + Min, Optimal, Max sizes, e.g. + "(N,..,C,H,W)" + "[(MIN_N,..,MIN_C,MIN_H,MIN_W);(OPT_N,..,OPT_C,OPT_H,OPT_W);(MAX_N,..,MAX_C,MAX_H,MAX_W)]" + "--" can be used to terminate flag options and force all following + arguments to be treated as positional options +``` + +e.g. +``` +trtorchc tests/modules/ssd_traced.jit.pt ssd_trt.ts "[(1,3,300,300); (1,3,512,512); (1, 3, 1024, 1024)]" -p f16 +``` \ No newline at end of file diff --git a/cpp/trtorchc/main.cpp b/cpp/trtorchc/main.cpp new file mode 100644 index 0000000000..5dab59a4ea --- /dev/null +++ b/cpp/trtorchc/main.cpp @@ -0,0 +1,366 @@ +#include <iostream> +#include <sstream> +#include <stdlib.h> +#include <unistd.h> + +#ifdef linux +#include <linux/limits.h> +#else +#define PATH_MAX 260 +#endif + +#include "NvInfer.h" +#include "third_party/args/args.hpp" +#include "torch/torch.h" +#include "torch/script.h" +#include "trtorch/trtorch.h" + +bool checkRtol(const at::Tensor& diff, const std::vector<at::Tensor> inputs, float threshold) { + double maxValue = 0.0; + for (auto& tensor : inputs) { + maxValue = fmax(tensor.abs().max().item<float>(), maxValue); + } + trtorch::logging::log(trtorch::logging::Level::kDEBUG, std::string("Max Difference: ") + std::to_string(diff.abs().max().item<float>())); + trtorch::logging::log(trtorch::logging::Level::kDEBUG, std::string("Acceptable Threshold: ") + std::to_string(threshold)); + return diff.abs().max().item<float>() <= threshold * maxValue; +} + +bool almostEqual(const at::Tensor& a, const at::Tensor& b, float threshold) { + return checkRtol(a - b, {a, b}, threshold); +} + +std::vector<int64_t> parseSingleDim(std::string shape_str) { + std::vector<int64_t> shape; + std::stringstream ss; + for (auto c : shape_str) { + if (c == '(' || c == ' ') { + continue; + } else if (c == ',') { + int64_t dim; + ss >> dim; + shape.push_back(dim); + ss.clear(); + } else if (c == ')') { + int64_t dim; + ss >> dim; + shape.push_back(dim); + ss.clear(); + return shape; + } else { + ss << c; + } + } + + trtorch::logging::log(trtorch::logging::Level::kERROR, "Shapes need dimensions delimited by comma in parentheses, \"(N,..,C,H,W)\"\n e.g \"(3,3,200,200)\""); + exit(1); + return {}; +} + +trtorch::ExtraInfo::InputRange parseDynamicDim(std::string shape_str) { + shape_str = shape_str.substr(1, shape_str.size() - 2); + std::vector<std::vector<int64_t>> shape; + std::stringstream ss; + + std::string delimiter = ";"; + + size_t pos = 0; + while ((pos = shape_str.find(delimiter)) != std::string::npos) { + auto token = shape_str.substr(0, pos); + auto range = parseSingleDim(token); + shape_str.erase(0, pos + delimiter.length()); + shape.push_back(range); + } + + auto range = parseSingleDim(shape_str); + shape.push_back(range); + + if (shape.size() != 3) { + trtorch::logging::log(trtorch::logging::Level::kERROR, "Dynamic shapes need three sets of dimensions delimited by semi-colons, \"[(MIN_N,..,MIN_C,MIN_H,MIN_W);(OPT_N,..,OPT_C,OPT_H,OPT_W);(MAX_N,..,MAX_C,MAX_H,MAX_W)]\"\n e.g \"[(3,3,100,100);(3,3,200,200);(3,3,300,300)]\""); + exit(1); + } + + return trtorch::ExtraInfo::InputRange(shape[0], shape[1], shape[2]); +} + +std::string get_cwd() { + char buff[FILENAME_MAX]; //create string buffer to hold path + if (getcwd(buff, FILENAME_MAX)) { + std::string current_working_dir(buff); + return current_working_dir; + } else { + trtorch::logging::log(trtorch::logging::Level::kERROR, "Unable to get current directory"); + exit(1); + } +} + +std::string real_path(std::string path) { + auto abs_path = path; + char real_path_c[PATH_MAX]; + char* res = realpath(abs_path.c_str(), real_path_c); + if (res) { + return std::string(real_path_c); + } else { + trtorch::logging::log(trtorch::logging::Level::kERROR, std::string("Unable to find file ") + abs_path); + exit(1); + } +} + +std::string resolve_path(std::string path) { + auto rpath = path; + if (!(rpath.rfind("/", 0) == 0)) { + rpath = get_cwd() + '/' + rpath; + } + return rpath; +} + +int main(int argc, char** argv) { + trtorch::logging::set_is_colored_output_on(true); + trtorch::logging::set_reportable_log_level(trtorch::logging::Level::kWARNING); + trtorch::logging::set_logging_prefix(""); + + + args::ArgumentParser parser("TRTorch is a compiler for TorchScript, it will compile and optimize TorchScript programs to run on NVIDIA GPUs using TensorRT", ""); + args::HelpFlag help(parser, "help", "Display this help menu", {'h', "help"}); + + args::Group group(parser, "Verbiosity of the compiler", args::Group::Validators::AtMostOne); + args::Flag verbose(group, "verbose", "Dumps debugging information about the compilation process onto the console", {'v', "verbose"}); + args::Flag warning(group, "warning", "Disables warnings generated during compilation onto the console (warnings are on by default)", {'w', "warnings"}); + args::Flag info(group, "info", "Dumps info messages generated during compilation onto the console", {"i", "info"}); + + args::Flag build_debuggable_engine(parser, "build-debuggable-engine", "Creates a debuggable engine", {"build-debuggable-engine"}); + args::Flag use_strict_types(parser, "use-strict-types", "Restrict operating type to only use set default operation precision (op_precision)", {"use-strict-types"}); + args::Flag allow_gpu_fallback(parser, "allow-gpu-fallback", "(Only used when targeting DLA (device-type)) Lets engine run layers on GPU if they are not supported on DLA", {"allow-gpu-fallback"}); + + args::ValueFlag<std::string> op_precision(parser, "precision", "Default operating precision for the engine (Int8 requires a calibration-cache argument) [ float | float32 | f32 | half | float16 | f16 | int8 | i8 ] (default: float)", {'p', "default-op-precision"}); + args::ValueFlag<std::string> device_type(parser, "type", "The type of device the engine should be built for [ gpu | dla ] (default: gpu)", {'d', "device-type"}); + args::ValueFlag<std::string> engine_capability(parser, "capability", "The type of device the engine should be built for [ default | safe_gpu | safe_dla ]", {"engine-capability"}); + + args::ValueFlag<std::string> calibration_cache_file(parser, "file_path", "Path to calibration cache file to use for post training quantization", {"calibration-cache-file"}); + args::ValueFlag<int> num_min_timing_iters(parser, "num_iters", "Number of minimization timing iterations used to select kernels", {"num-min-timing-iter"}); + args::ValueFlag<int> num_avg_timing_iters(parser, "num_iters", "Number of averaging timing iterations used to select kernels", {"num-avg-timing-iters"}); + args::ValueFlag<int> workspace_size(parser, "workspace_size", "Maximum size of workspace given to TensorRT", {"workspace-size"}); + args::ValueFlag<int> max_batch_size(parser, "max_batch_size", "Maximum batch size (must be >= 1 to be set, 0 means not set)", {"max-batch-size"}); + args::ValueFlag<double> threshold(parser, "threshold", "Maximum acceptable numerical deviation from standard torchscript output (default 2e-5)", {'t', "threshold"}); + + + args::Flag save_engine(parser, "save_engine", "Instead of compiling a full a TorchScript program, save the created engine to the path specified as the output path", {"save-engine"}); + args::Positional<std::string> input_path(parser, "input_file_path", "Path to input TorchScript file"); + args::Positional<std::string> output_path(parser, "output_file_path", "Path for compiled TorchScript (or TensorRT engine) file"); + args::PositionalList<std::string> input_shapes(parser, "input_shapes", "Sizes for inputs to engine, can either be a single size or a range defined by Min, Optimal, Max sizes, e.g. \"(N,..,C,H,W)\" \"[(MIN_N,..,MIN_C,MIN_H,MIN_W);(OPT_N,..,OPT_C,OPT_H,OPT_W);(MAX_N,..,MAX_C,MAX_H,MAX_W)]\""); + + + try + { + parser.ParseCLI(argc, argv); + } + catch (args::Help) + { + std::cout << parser; + return 0; + } + catch (args::ParseError e) + { + std::cerr << e.what() << std::endl; + std::cerr << parser; + return 1; + } + catch (args::ValidationError e) + { + std::cerr << e.what() << std::endl; + std::cerr << parser; + return 1; + } + + if (verbose) { + trtorch::logging::set_reportable_log_level(trtorch::logging::Level::kDEBUG); + } else if (info) { + trtorch::logging::set_reportable_log_level(trtorch::logging::Level::kINFO); + } else if (warning) { + trtorch::logging::set_reportable_log_level(trtorch::logging::Level::kERROR); + } + + + std::vector<trtorch::ExtraInfo::InputRange> ranges; + for (const auto shapes : args::get(input_shapes)) { + if (shapes.rfind("(", 0) == 0) { + ranges.push_back(trtorch::ExtraInfo::InputRange(parseSingleDim(shapes))); + } else if (shapes.rfind("[", 0) == 0) { + ranges.push_back(parseDynamicDim(shapes)); + } else { + trtorch::logging::log(trtorch::logging::Level::kERROR, "Dimensions should be specified in one of these types \"(N,..,C,H,W)\" \"[(MIN_N,..,MIN_C,MIN_H,MIN_W);(OPT_N,..,OPT_C,OPT_H,OPT_W);(MAX_N,..,MAX_C,MAX_H,MAX_W)]\"\n e.g \"(3,3,300,300)\" \"[(3,3,100,100);(3,3,200,200);(3,3,300,300)]\""); + std::cerr << parser; + exit(1); + } + } + + auto compile_settings = trtorch::ExtraInfo(ranges); + + if (build_debuggable_engine) { + compile_settings.debug = true; + } + + if (use_strict_types) { + compile_settings.strict_types = true; + } + + if (allow_gpu_fallback) { + compile_settings.allow_gpu_fallback = true; + } + + std::string calibration_cache_file_path = ""; + if (calibration_cache_file) { + calibration_cache_file_path = resolve_path(args::get(calibration_cache_file)); + } + + auto calibrator = trtorch::ptq::make_int8_cache_calibrator(calibration_cache_file_path); + + if (op_precision) { + auto precision = args::get(op_precision); + std::transform(precision.begin(), precision.end(), precision.begin(), [](unsigned char c){ return std::tolower(c); }); + if (precision == "float" || precision == "float32" || precision == "f32") { + compile_settings.op_precision = torch::kF32; + } else if (precision == "half" || precision == "float16" || precision == "f16") { + compile_settings.op_precision = torch::kF16; + } else if (precision == "int8" || precision == "i8") { + compile_settings.op_precision = torch::kI8; + if (calibration_cache_file) { + compile_settings.ptq_calibrator = calibrator; + } else { + trtorch::logging::log(trtorch::logging::Level::kERROR, "If targeting INT8 default operating precision with trtorchc, a calibration cache file must be provided"); + std::cerr << parser; + return 1; + } + } else { + trtorch::logging::log(trtorch::logging::Level::kERROR, "Invalid default operating precision, options are [ float | float32 | f32 | half | float16 | f16 | int8 | i8 ]"); + std::cerr << parser; + return 1; + } + } + + if (device_type) { + auto device = args::get(device_type); + std::transform(device.begin(), device.end(), device.begin(), [](unsigned char c){ return std::tolower(c); }); + if (device == "gpu") { + compile_settings.device = trtorch::ExtraInfo::DeviceType::kGPU; + } else if (device == "dla") { + compile_settings.device = trtorch::ExtraInfo::DeviceType::kDLA; + } else { + trtorch::logging::log(trtorch::logging::Level::kERROR, "Invalid device type, options are [ gpu | dla ]"); + std::cerr << parser; + return 1; + } + } + + if (engine_capability) { + auto capability = args::get(engine_capability); + std::transform(capability.begin(), capability.end(), capability.begin(), [](unsigned char c){ return std::tolower(c); }); + if (capability == "default") { + compile_settings.capability = trtorch::ExtraInfo::EngineCapability::kDEFAULT; + } else if (capability == "safe_gpu") { + compile_settings.capability = trtorch::ExtraInfo::EngineCapability::kSAFE_GPU; + } else if (capability == "safe_dla") { + compile_settings.capability = trtorch::ExtraInfo::EngineCapability::kSAFE_DLA; + } else { + trtorch::logging::log(trtorch::logging::Level::kERROR, "Invalid engine capability, options are [ default | safe_gpu | safe_dla ]"); + std::cerr << parser; + return 1; + } + } + + if (num_min_timing_iters) { + compile_settings.num_min_timing_iters = args::get(num_min_timing_iters); + } + + if (num_avg_timing_iters) { + compile_settings.num_avg_timing_iters = args::get(num_avg_timing_iters); + } + + if (workspace_size) { + compile_settings.workspace_size = args::get(workspace_size); + } + + if (max_batch_size) { + compile_settings.max_batch_size = args::get(max_batch_size); + } + + auto real_input_path = resolve_path(args::get(input_path)); + auto real_output_path = resolve_path(args::get(output_path)); + + torch::jit::Module mod; + try { + // Deserialize the ScriptModule from a file using torch::jit::load(). + mod = torch::jit::load(real_input_path); + } + catch (const c10::Error& e) { + trtorch::logging::log(trtorch::logging::Level::kERROR, "Error loading the model (path may be incorrect)"); + std::cerr << parser; + return 1; + } + + if (!trtorch::CheckMethodOperatorSupport(mod, "forward")) { + trtorch::logging::log(trtorch::logging::Level::kERROR, "Module is not currently supported by TRTorch"); + return 1; + } + + if (save_engine) { + auto engine = trtorch::ConvertGraphToTRTEngine(mod, "forward", compile_settings); + std::ofstream out(real_output_path); + out << engine; + out.close(); + } else { + auto trt_mod = trtorch::CompileGraph(mod, compile_settings); + + if (compile_settings.op_precision == trtorch::ExtraInfo::DataType::kFloat) { + double threshold_val = 2e-5; + if (threshold) { + threshold_val = args::get(threshold); + } + + std::vector<torch::jit::IValue> jit_inputs_ivalues; + std::vector<torch::jit::IValue> trt_inputs_ivalues; + + for (auto i : ranges) { + auto in = at::randn(i.opt, {at::kCUDA}); + jit_inputs_ivalues.push_back(in.clone()); + trt_inputs_ivalues.push_back(in.clone()); + } + + torch::jit::IValue jit_results_ivalues = mod.forward(jit_inputs_ivalues); + std::vector<at::Tensor> jit_results; + if (jit_results_ivalues.isTensor()) { + jit_results.push_back(jit_results_ivalues.toTensor()); + } else { + auto results = jit_results_ivalues.toTuple()->elements(); + for (auto r : results) { + jit_results.push_back(r.toTensor()); + } + } + + + torch::jit::IValue trt_results_ivalues = trt_mod.forward(trt_inputs_ivalues); + std::vector<at::Tensor> trt_results; + if (trt_results_ivalues.isTensor()) { + trt_results.push_back(trt_results_ivalues.toTensor()); + } else { + auto results = trt_results_ivalues.toTuple()->elements(); + for (auto r : results) { + trt_results.push_back(r.toTensor()); + } + } + + for (size_t i = 0; i < trt_results.size(); i++) { + if (!almostEqual(jit_results[i], trt_results[i].reshape_as(jit_results[i]), threshold_val)) { + std::ostringstream threshold_ss; + threshold_ss << threshold_val; + trtorch::logging::log(trtorch::logging::Level::kWARNING, std::string("Maximum numerical deviation for output exceeds set threshold (") + threshold_ss.str() + std::string(")")); + } + } + } else { + trtorch::logging::log(trtorch::logging::Level::kWARNING, "Due to change in operating data type, numerical precision is not checked"); + } + + trt_mod.save(real_output_path); + } + + return 0; +} \ No newline at end of file diff --git a/cpp/trtorchexec/main.cpp b/cpp/trtorchexec/main.cpp index 2085928b6f..8b3e114e62 100644 --- a/cpp/trtorchexec/main.cpp +++ b/cpp/trtorchexec/main.cpp @@ -38,6 +38,7 @@ int main(int argc, const char* argv[]) { } mod.to(at::kCUDA); + mod.eval(); std::vector<std::vector<int64_t>> dims; for (int i = 2; i < argc; i++) { @@ -92,7 +93,7 @@ int main(int argc, const char* argv[]) { std::cout << "Running TRT module" << std::endl; torch::jit::IValue trt_results_ivalues = trt_mod.forward(trt_inputs_ivalues); std::vector<at::Tensor> trt_results; - if (trt_results_ivalues.isTensor()) { + if (trt_results_ivalues.isTensor()) { trt_results.push_back(trt_results_ivalues.toTensor()); } else { auto results = trt_results_ivalues.toTuple()->elements(); @@ -106,5 +107,8 @@ int main(int argc, const char* argv[]) { } std::cout << "Converted Engine saved to /tmp/engine_converted_from_jit.trt" << std::endl; + + trt_mod.save("/tmp/ts_trt.ts"); + std::cout << "Compiled TorchScript program saved to /tmp/ts_trt.ts" << std::endl; std::cout << "ok\n"; } diff --git a/docs/._index.html b/docs/._index.html new file mode 100644 index 0000000000..e9528f4621 Binary files /dev/null and b/docs/._index.html differ diff --git a/docs/_cpp_api/class_view_hierarchy.html b/docs/_cpp_api/class_view_hierarchy.html index fbdc65da2f..f85121680e 100644 --- a/docs/_cpp_api/class_view_hierarchy.html +++ b/docs/_cpp_api/class_view_hierarchy.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.html b/docs/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.html index 3bb12ab4c5..c87d34fbc9 100644 --- a/docs/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.html +++ b/docs/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -1029,6 +1034,102 @@ <h2 id="class-documentation"> </dl> </dd> </dl> + <dl class="cpp function"> + <dt id="_CPPv4NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE"> + <span id="_CPPv3NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE"> + </span> + <span id="_CPPv2NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE"> + </span> + <span id="trtorch::ExtraInfo::DataType::eq-operator__DataType::ValueCCE"> + </span> + <span class="target" id="classtrtorch_1_1ExtraInfo_1_1DataType_1a61d6f3e6a3929edec1d3659330b8297d"> + </span> + <em class="property"> + constexpr + </em> + bool + <code class="sig-name descname"> + operator== + </code> + <span class="sig-paren"> + ( + </span> + <a class="reference internal" href="#_CPPv4N7trtorch9ExtraInfo8DataTypeE" title="trtorch::ExtraInfo::DataType"> + DataType + </a> + :: + <a class="reference internal" href="#_CPPv4N7trtorch9ExtraInfo8DataType5ValueE" title="trtorch::ExtraInfo::DataType::Value"> + Value + </a> + <em> + other + </em> + <span class="sig-paren"> + ) + </span> + <em class="property"> + const + </em> + <a class="headerlink" href="#_CPPv4NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE" title="Permalink to this definition"> + ¶ + </a> + <br/> + </dt> + <dd> + <p> + Comparision operator for + <a class="reference internal" href="structtrtorch_1_1ExtraInfo.html#classtrtorch_1_1ExtraInfo_1_1DataType"> + <span class="std std-ref"> + DataType + </span> + </a> + . + </p> + <p> + </p> + <dl class="simple"> + <dt> + <strong> + Return + </strong> + </dt> + <dd> + <p> + true + </p> + </dd> + <dt> + <strong> + Return + </strong> + </dt> + <dd> + <p> + false + </p> + </dd> + <dt> + <strong> + Parameters + </strong> + </dt> + <dd> + <ul class="breatheparameterlist simple"> + <li> + <p> + <code class="docutils literal notranslate"> + <span class="pre"> + other + </span> + </code> + : + </p> + </li> + </ul> + </dd> + </dl> + </dd> + </dl> <dl class="cpp function"> <dt id="_CPPv4NK7trtorch9ExtraInfo8DataTypeneE8DataType"> <span id="_CPPv3NK7trtorch9ExtraInfo8DataTypeneE8DataType"> @@ -1121,6 +1222,102 @@ <h2 id="class-documentation"> </dl> </dd> </dl> + <dl class="cpp function"> + <dt id="_CPPv4NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE"> + <span id="_CPPv3NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE"> + </span> + <span id="_CPPv2NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE"> + </span> + <span id="trtorch::ExtraInfo::DataType::neq-operator__DataType::ValueCCE"> + </span> + <span class="target" id="classtrtorch_1_1ExtraInfo_1_1DataType_1afac08806f8da094821031a3bf0ee5fa7"> + </span> + <em class="property"> + constexpr + </em> + bool + <code class="sig-name descname"> + operator!= + </code> + <span class="sig-paren"> + ( + </span> + <a class="reference internal" href="#_CPPv4N7trtorch9ExtraInfo8DataTypeE" title="trtorch::ExtraInfo::DataType"> + DataType + </a> + :: + <a class="reference internal" href="#_CPPv4N7trtorch9ExtraInfo8DataType5ValueE" title="trtorch::ExtraInfo::DataType::Value"> + Value + </a> + <em> + other + </em> + <span class="sig-paren"> + ) + </span> + <em class="property"> + const + </em> + <a class="headerlink" href="#_CPPv4NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE" title="Permalink to this definition"> + ¶ + </a> + <br/> + </dt> + <dd> + <p> + Comparision operator for + <a class="reference internal" href="structtrtorch_1_1ExtraInfo.html#classtrtorch_1_1ExtraInfo_1_1DataType"> + <span class="std std-ref"> + DataType + </span> + </a> + . + </p> + <p> + </p> + <dl class="simple"> + <dt> + <strong> + Return + </strong> + </dt> + <dd> + <p> + true + </p> + </dd> + <dt> + <strong> + Return + </strong> + </dt> + <dd> + <p> + false + </p> + </dd> + <dt> + <strong> + Parameters + </strong> + </dt> + <dd> + <ul class="breatheparameterlist simple"> + <li> + <p> + <code class="docutils literal notranslate"> + <span class="pre"> + other + </span> + </code> + : + </p> + </li> + </ul> + </dd> + </dl> + </dd> + </dl> </div> </dd> </dl> diff --git a/docs/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DeviceType.html b/docs/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DeviceType.html index 2448917b3b..9307df8de1 100644 --- a/docs/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DeviceType.html +++ b/docs/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DeviceType.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator.html b/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator.html index f597b95aab..034a7ad417 100644 --- a/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator.html +++ b/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator.html b/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator.html index fa3ae5e34f..b116598b13 100644 --- a/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator.html +++ b/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.html b/docs/_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.html index b60ed74d7c..baab8f4257 100644 --- a/docs/_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.html +++ b/docs/_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/define_macros_8h_1a20c1fbeb21757871c52299dc52351b5f.html b/docs/_cpp_api/define_macros_8h_1a20c1fbeb21757871c52299dc52351b5f.html index 26e7ea5e09..4343e11651 100644 --- a/docs/_cpp_api/define_macros_8h_1a20c1fbeb21757871c52299dc52351b5f.html +++ b/docs/_cpp_api/define_macros_8h_1a20c1fbeb21757871c52299dc52351b5f.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/define_macros_8h_1a25ee153c325dfc7466a33cbd5c1ff055.html b/docs/_cpp_api/define_macros_8h_1a25ee153c325dfc7466a33cbd5c1ff055.html index 86ad8f94e7..3d633d42c2 100644 --- a/docs/_cpp_api/define_macros_8h_1a25ee153c325dfc7466a33cbd5c1ff055.html +++ b/docs/_cpp_api/define_macros_8h_1a25ee153c325dfc7466a33cbd5c1ff055.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/define_macros_8h_1a48d6029a45583a06848891cb0e86f7ba.html b/docs/_cpp_api/define_macros_8h_1a48d6029a45583a06848891cb0e86f7ba.html index 17cd191e37..e66b846828 100644 --- a/docs/_cpp_api/define_macros_8h_1a48d6029a45583a06848891cb0e86f7ba.html +++ b/docs/_cpp_api/define_macros_8h_1a48d6029a45583a06848891cb0e86f7ba.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/define_macros_8h_1a71b02dddfabe869498ad5a88e11c440f.html b/docs/_cpp_api/define_macros_8h_1a71b02dddfabe869498ad5a88e11c440f.html index 92e0b92577..70bab59633 100644 --- a/docs/_cpp_api/define_macros_8h_1a71b02dddfabe869498ad5a88e11c440f.html +++ b/docs/_cpp_api/define_macros_8h_1a71b02dddfabe869498ad5a88e11c440f.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/define_macros_8h_1a9d31d0569348d109b1b069b972dd143e.html b/docs/_cpp_api/define_macros_8h_1a9d31d0569348d109b1b069b972dd143e.html index f7af8b6d00..cface47f06 100644 --- a/docs/_cpp_api/define_macros_8h_1a9d31d0569348d109b1b069b972dd143e.html +++ b/docs/_cpp_api/define_macros_8h_1a9d31d0569348d109b1b069b972dd143e.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.html b/docs/_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.html index adffa0adbb..c235e0d9f1 100644 --- a/docs/_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.html +++ b/docs/_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.html @@ -300,6 +300,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/define_macros_8h_1ae1c56ab8a40af292a9a4964651524d84.html b/docs/_cpp_api/define_macros_8h_1ae1c56ab8a40af292a9a4964651524d84.html index a37e731f3d..71be6933d1 100644 --- a/docs/_cpp_api/define_macros_8h_1ae1c56ab8a40af292a9a4964651524d84.html +++ b/docs/_cpp_api/define_macros_8h_1ae1c56ab8a40af292a9a4964651524d84.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/dir_cpp.html b/docs/_cpp_api/dir_cpp.html index c9e4c2ea38..aa3f7ff7a7 100644 --- a/docs/_cpp_api/dir_cpp.html +++ b/docs/_cpp_api/dir_cpp.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/dir_cpp_api.html b/docs/_cpp_api/dir_cpp_api.html index 7fbc4e9dc3..347bd9aec6 100644 --- a/docs/_cpp_api/dir_cpp_api.html +++ b/docs/_cpp_api/dir_cpp_api.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/dir_cpp_api_include.html b/docs/_cpp_api/dir_cpp_api_include.html index 5e429a64c4..d0dfe41b75 100644 --- a/docs/_cpp_api/dir_cpp_api_include.html +++ b/docs/_cpp_api/dir_cpp_api_include.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/dir_cpp_api_include_trtorch.html b/docs/_cpp_api/dir_cpp_api_include_trtorch.html index 21764a1793..f62d859e63 100644 --- a/docs/_cpp_api/dir_cpp_api_include_trtorch.html +++ b/docs/_cpp_api/dir_cpp_api_include_trtorch.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/enum_logging_8h_1a5f612ff2f783ff4fbe89d168f0d817d4.html b/docs/_cpp_api/enum_logging_8h_1a5f612ff2f783ff4fbe89d168f0d817d4.html index 542e81728f..129e059169 100644 --- a/docs/_cpp_api/enum_logging_8h_1a5f612ff2f783ff4fbe89d168f0d817d4.html +++ b/docs/_cpp_api/enum_logging_8h_1a5f612ff2f783ff4fbe89d168f0d817d4.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/file_cpp_api_include_trtorch_logging.h.html b/docs/_cpp_api/file_cpp_api_include_trtorch_logging.h.html index 31065539ec..04ade4a022 100644 --- a/docs/_cpp_api/file_cpp_api_include_trtorch_logging.h.html +++ b/docs/_cpp_api/file_cpp_api_include_trtorch_logging.h.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -666,6 +671,15 @@ <h2 id="included-by"> </a> </h2> <ul class="simple"> + <li> + <p> + <a class="reference internal" href="file_cpp_api_include_trtorch_ptq.h.html#file-cpp-api-include-trtorch-ptq-h"> + <span class="std std-ref"> + File ptq.h + </span> + </a> + </p> + </li> <li> <p> <a class="reference internal" href="file_cpp_api_include_trtorch_trtorch.h.html#file-cpp-api-include-trtorch-trtorch-h"> diff --git a/docs/_cpp_api/file_cpp_api_include_trtorch_macros.h.html b/docs/_cpp_api/file_cpp_api_include_trtorch_macros.h.html index 15a55ee514..08496d4df3 100644 --- a/docs/_cpp_api/file_cpp_api_include_trtorch_macros.h.html +++ b/docs/_cpp_api/file_cpp_api_include_trtorch_macros.h.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/file_cpp_api_include_trtorch_ptq.h.html b/docs/_cpp_api/file_cpp_api_include_trtorch_ptq.h.html index 4fa2f78fe5..2523a931e6 100644 --- a/docs/_cpp_api/file_cpp_api_include_trtorch_ptq.h.html +++ b/docs/_cpp_api/file_cpp_api_include_trtorch_ptq.h.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -657,6 +662,22 @@ <h2 id="includes"> </code> </p> </li> + <li> + <p> + <code class="docutils literal notranslate"> + <span class="pre"> + trtorch/logging.h + </span> + </code> + ( + <a class="reference internal" href="file_cpp_api_include_trtorch_logging.h.html#file-cpp-api-include-trtorch-logging-h"> + <span class="std std-ref"> + File logging.h + </span> + </a> + ) + </p> + </li> <li> <p> <code class="docutils literal notranslate"> diff --git a/docs/_cpp_api/file_cpp_api_include_trtorch_trtorch.h.html b/docs/_cpp_api/file_cpp_api_include_trtorch_trtorch.h.html index 195f6874ff..aebe61a64f 100644 --- a/docs/_cpp_api/file_cpp_api_include_trtorch_trtorch.h.html +++ b/docs/_cpp_api/file_cpp_api_include_trtorch_trtorch.h.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/file_view_hierarchy.html b/docs/_cpp_api/file_view_hierarchy.html index 7032b84fb8..c78cea8757 100644 --- a/docs/_cpp_api/file_view_hierarchy.html +++ b/docs/_cpp_api/file_view_hierarchy.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_logging_8h_1a118d65b179defff7fff279eb9cd126cb.html b/docs/_cpp_api/function_logging_8h_1a118d65b179defff7fff279eb9cd126cb.html index 0f8843cbdd..05e7dc9113 100644 --- a/docs/_cpp_api/function_logging_8h_1a118d65b179defff7fff279eb9cd126cb.html +++ b/docs/_cpp_api/function_logging_8h_1a118d65b179defff7fff279eb9cd126cb.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_logging_8h_1a396a688110397538f8b3fb7dfdaf38bb.html b/docs/_cpp_api/function_logging_8h_1a396a688110397538f8b3fb7dfdaf38bb.html index 114f9718ad..64a730c483 100644 --- a/docs/_cpp_api/function_logging_8h_1a396a688110397538f8b3fb7dfdaf38bb.html +++ b/docs/_cpp_api/function_logging_8h_1a396a688110397538f8b3fb7dfdaf38bb.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_logging_8h_1a9b420280bfacc016d7e36a5704021949.html b/docs/_cpp_api/function_logging_8h_1a9b420280bfacc016d7e36a5704021949.html index d380324c31..24130faded 100644 --- a/docs/_cpp_api/function_logging_8h_1a9b420280bfacc016d7e36a5704021949.html +++ b/docs/_cpp_api/function_logging_8h_1a9b420280bfacc016d7e36a5704021949.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_logging_8h_1aa533955a2b908db9e5df5acdfa24715f.html b/docs/_cpp_api/function_logging_8h_1aa533955a2b908db9e5df5acdfa24715f.html index 88d8902db6..1691a73dcf 100644 --- a/docs/_cpp_api/function_logging_8h_1aa533955a2b908db9e5df5acdfa24715f.html +++ b/docs/_cpp_api/function_logging_8h_1aa533955a2b908db9e5df5acdfa24715f.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_logging_8h_1abc57d473f3af292551dee8b9c78373ad.html b/docs/_cpp_api/function_logging_8h_1abc57d473f3af292551dee8b9c78373ad.html index 985f30ec38..f2768d2b4c 100644 --- a/docs/_cpp_api/function_logging_8h_1abc57d473f3af292551dee8b9c78373ad.html +++ b/docs/_cpp_api/function_logging_8h_1abc57d473f3af292551dee8b9c78373ad.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_logging_8h_1adf5435f0dbb09c0d931a1b851847236b.html b/docs/_cpp_api/function_logging_8h_1adf5435f0dbb09c0d931a1b851847236b.html index c5e37b48fc..f939b71ee0 100644 --- a/docs/_cpp_api/function_logging_8h_1adf5435f0dbb09c0d931a1b851847236b.html +++ b/docs/_cpp_api/function_logging_8h_1adf5435f0dbb09c0d931a1b851847236b.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_logging_8h_1aef44b69c62af7cf2edc8875a9506641a.html b/docs/_cpp_api/function_logging_8h_1aef44b69c62af7cf2edc8875a9506641a.html index 67a76c7ef1..0f0febae9b 100644 --- a/docs/_cpp_api/function_logging_8h_1aef44b69c62af7cf2edc8875a9506641a.html +++ b/docs/_cpp_api/function_logging_8h_1aef44b69c62af7cf2edc8875a9506641a.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_trtorch_8h_1a2cf17d43ba9117b3b4d652744b4f0447.html b/docs/_cpp_api/function_trtorch_8h_1a2cf17d43ba9117b3b4d652744b4f0447.html index 604ba98513..a732856cd5 100644 --- a/docs/_cpp_api/function_trtorch_8h_1a2cf17d43ba9117b3b4d652744b4f0447.html +++ b/docs/_cpp_api/function_trtorch_8h_1a2cf17d43ba9117b3b4d652744b4f0447.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_trtorch_8h_1a4422781719d7befedb364cacd91c6247.html b/docs/_cpp_api/function_trtorch_8h_1a4422781719d7befedb364cacd91c6247.html index 5cd9ff7479..7a4cd7291b 100644 --- a/docs/_cpp_api/function_trtorch_8h_1a4422781719d7befedb364cacd91c6247.html +++ b/docs/_cpp_api/function_trtorch_8h_1a4422781719d7befedb364cacd91c6247.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_trtorch_8h_1a536bba54b70e44554099d23fa3d7e804.html b/docs/_cpp_api/function_trtorch_8h_1a536bba54b70e44554099d23fa3d7e804.html index f415c4dbbb..4a3777ac7e 100644 --- a/docs/_cpp_api/function_trtorch_8h_1a536bba54b70e44554099d23fa3d7e804.html +++ b/docs/_cpp_api/function_trtorch_8h_1a536bba54b70e44554099d23fa3d7e804.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_trtorch_8h_1a5f33b142bc2f3f2aaf462270b3ad7e31.html b/docs/_cpp_api/function_trtorch_8h_1a5f33b142bc2f3f2aaf462270b3ad7e31.html index 98ee58c07b..b64edced5a 100644 --- a/docs/_cpp_api/function_trtorch_8h_1a5f33b142bc2f3f2aaf462270b3ad7e31.html +++ b/docs/_cpp_api/function_trtorch_8h_1a5f33b142bc2f3f2aaf462270b3ad7e31.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_trtorch_8h_1a726f6e7091b6b7be45b5a4275b2ffb10.html b/docs/_cpp_api/function_trtorch_8h_1a726f6e7091b6b7be45b5a4275b2ffb10.html index 11de089949..445ad9224a 100644 --- a/docs/_cpp_api/function_trtorch_8h_1a726f6e7091b6b7be45b5a4275b2ffb10.html +++ b/docs/_cpp_api/function_trtorch_8h_1a726f6e7091b6b7be45b5a4275b2ffb10.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_trtorch_8h_1ab01696cfe08b6a5293c55935a9713c25.html b/docs/_cpp_api/function_trtorch_8h_1ab01696cfe08b6a5293c55935a9713c25.html index a5b0c66cc7..a75b7ee703 100644 --- a/docs/_cpp_api/function_trtorch_8h_1ab01696cfe08b6a5293c55935a9713c25.html +++ b/docs/_cpp_api/function_trtorch_8h_1ab01696cfe08b6a5293c55935a9713c25.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/function_trtorch_8h_1ae38897d1ca4438227c970029d0f76fb5.html b/docs/_cpp_api/function_trtorch_8h_1ae38897d1ca4438227c970029d0f76fb5.html index e8d434f4f4..59793d3c32 100644 --- a/docs/_cpp_api/function_trtorch_8h_1ae38897d1ca4438227c970029d0f76fb5.html +++ b/docs/_cpp_api/function_trtorch_8h_1ae38897d1ca4438227c970029d0f76fb5.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/namespace_trtorch.html b/docs/_cpp_api/namespace_trtorch.html index bae0f7be6c..2a12108e0e 100644 --- a/docs/_cpp_api/namespace_trtorch.html +++ b/docs/_cpp_api/namespace_trtorch.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/namespace_trtorch__logging.html b/docs/_cpp_api/namespace_trtorch__logging.html index 4a625f5b9e..4582223721 100644 --- a/docs/_cpp_api/namespace_trtorch__logging.html +++ b/docs/_cpp_api/namespace_trtorch__logging.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/namespace_trtorch__ptq.html b/docs/_cpp_api/namespace_trtorch__ptq.html index a31b22746d..05e742a6c4 100644 --- a/docs/_cpp_api/namespace_trtorch__ptq.html +++ b/docs/_cpp_api/namespace_trtorch__ptq.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_logging.h.html b/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_logging.h.html index 92d75d8ea7..51289e027e 100644 --- a/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_logging.h.html +++ b/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_logging.h.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_macros.h.html b/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_macros.h.html index 7873a19d40..1a259290fd 100644 --- a/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_macros.h.html +++ b/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_macros.h.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.html b/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.html index 9eeffec98d..a6f6a84e77 100644 --- a/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.html +++ b/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -512,6 +517,8 @@ <h1 id="cpp-api-program-listing-file-cpp-api-include-trtorch-ptq-h--page-root"> <span class="cp">#include</span> <span class="cpf">&lt;iostream&gt;</span><span class="cp"></span> <span class="cp">#include</span> <span class="cpf">&lt;sstream&gt;</span><span class="cp"></span> +<span class="cp">#include</span> <span class="cpf">"trtorch/logging.h"</span><span class="cp"></span> + <span class="cp">#ifndef DOXYGEN_SHOULD_SKIP_THIS</span> <span class="k">namespace</span> <span class="n">nvinfer1</span> <span class="p">{</span> <span class="k">class</span> <span class="nc">IInt8Calibrator</span><span class="p">;</span> @@ -519,9 +526,12 @@ <h1 id="cpp-api-program-listing-file-cpp-api-include-trtorch-ptq-h--page-root"> <span class="p">}</span> <span class="k">namespace</span> <span class="n">torch</span> <span class="p">{</span> -<span class="k">namespace</span> <span class="n">data</span> <span class="p">{</span> -<span class="k">template</span><span class="o">&lt;</span><span class="k">typename</span> <span class="n">Example</span><span class="o">&gt;</span> -<span class="k">class</span> <span class="nc">Iterator</span><span class="p">;</span> +<span class="k">class</span> <span class="nc">Tensor</span><span class="p">;</span> +<span class="p">}</span> + +<span class="k">namespace</span> <span class="n">trtorch</span> <span class="p">{</span> +<span class="k">namespace</span> <span class="n">ptq</span> <span class="p">{</span> +<span class="kt">bool</span> <span class="n">get_batch_impl</span><span class="p">(</span><span class="kt">void</span><span class="o">*</span> <span class="n">bindings</span><span class="p">[],</span> <span class="k">const</span> <span class="kt">char</span><span class="o">*</span> <span class="n">names</span><span class="p">[],</span> <span class="kt">int</span> <span class="n">nbBindings</span><span class="p">,</span> <span class="n">torch</span><span class="o">::</span><span class="n">Tensor</span><span class="o">&amp;</span> <span class="n">data</span><span class="p">);</span> <span class="p">}</span> <span class="p">}</span> <span class="cp">#endif </span><span class="c1">//DOXYGEN_SHOULD_SKIP_THIS</span> @@ -535,7 +545,12 @@ <h1 id="cpp-api-program-listing-file-cpp-api-include-trtorch-ptq-h--page-root"> <span class="k">using</span> <span class="n">Batch</span> <span class="o">=</span> <span class="k">typename</span> <span class="n">DataLoader</span><span class="o">::</span><span class="n">super</span><span class="o">::</span><span class="n">BatchType</span><span class="p">;</span> <span class="k">public</span><span class="o">:</span> <span class="n">Int8Calibrator</span><span class="p">(</span><span class="n">DataLoaderUniquePtr</span> <span class="n">dataloader</span><span class="p">,</span> <span class="k">const</span> <span class="n">std</span><span class="o">::</span><span class="n">string</span><span class="o">&amp;</span> <span class="n">cache_file_path</span><span class="p">,</span> <span class="kt">bool</span> <span class="n">use_cache</span><span class="p">)</span> - <span class="o">:</span> <span class="n">dataloader_</span><span class="p">(</span><span class="n">dataloader</span><span class="p">.</span><span class="n">get</span><span class="p">()),</span> <span class="n">it_</span><span class="p">(</span><span class="n">dataloader_</span><span class="o">-&gt;</span><span class="n">end</span><span class="p">()),</span> <span class="n">cache_file_path_</span><span class="p">(</span><span class="n">cache_file_path</span><span class="p">),</span> <span class="n">use_cache_</span><span class="p">(</span><span class="n">use_cache</span><span class="p">)</span> <span class="p">{}</span> + <span class="o">:</span> <span class="n">dataloader_</span><span class="p">(</span><span class="n">dataloader</span><span class="p">.</span><span class="n">get</span><span class="p">()),</span> <span class="n">cache_file_path_</span><span class="p">(</span><span class="n">cache_file_path</span><span class="p">),</span> <span class="n">use_cache_</span><span class="p">(</span><span class="n">use_cache</span><span class="p">)</span> <span class="p">{</span> + <span class="k">for</span> <span class="p">(</span><span class="k">auto</span> <span class="nl">batch</span> <span class="p">:</span> <span class="o">*</span><span class="n">dataloader_</span><span class="p">)</span> <span class="p">{</span> + <span class="n">batched_data_</span><span class="p">.</span><span class="n">push_back</span><span class="p">(</span><span class="n">batch</span><span class="p">.</span><span class="n">data</span><span class="p">);</span> + <span class="p">}</span> + <span class="n">it_</span> <span class="o">=</span> <span class="n">batched_data_</span><span class="p">.</span><span class="n">begin</span><span class="p">();</span> + <span class="p">}</span> <span class="kt">int</span> <span class="n">getBatchSize</span><span class="p">()</span> <span class="k">const</span> <span class="k">override</span> <span class="p">{</span> <span class="c1">// HACK: TRTorch only uses explict batch sizing, INT8 Calibrator does not</span> @@ -546,26 +561,15 @@ <h1 id="cpp-api-program-listing-file-cpp-api-include-trtorch-ptq-h--page-root"> <span class="p">}</span> <span class="kt">bool</span> <span class="n">getBatch</span><span class="p">(</span><span class="kt">void</span><span class="o">*</span> <span class="n">bindings</span><span class="p">[],</span> <span class="k">const</span> <span class="kt">char</span><span class="o">*</span> <span class="n">names</span><span class="p">[],</span> <span class="kt">int</span> <span class="n">nbBindings</span><span class="p">)</span> <span class="k">override</span> <span class="p">{</span> - <span class="c1">// HACK: doesnt seem like the first try in the initializer list works</span> - <span class="k">if</span> <span class="p">(</span><span class="o">!</span> <span class="n">it_created_</span><span class="p">)</span> <span class="p">{</span> - <span class="n">it_</span> <span class="o">=</span> <span class="n">dataloader_</span><span class="o">-&gt;</span><span class="n">begin</span><span class="p">();</span> - <span class="n">it_created_</span> <span class="o">=</span> <span class="nb">true</span><span class="p">;</span> - <span class="p">}</span> - - <span class="k">if</span> <span class="p">(</span><span class="n">it_</span> <span class="o">==</span> <span class="n">dataloader_</span><span class="o">-&gt;</span><span class="n">end</span><span class="p">())</span> <span class="p">{</span> + <span class="k">if</span> <span class="p">(</span><span class="n">it_</span> <span class="o">!=</span> <span class="n">batched_data_</span><span class="p">.</span><span class="n">end</span><span class="p">())</span> <span class="p">{</span> + <span class="k">auto</span> <span class="n">status</span> <span class="o">=</span> <span class="n">get_batch_impl</span><span class="p">(</span><span class="n">bindings</span><span class="p">,</span> <span class="n">names</span><span class="p">,</span> <span class="n">nbBindings</span><span class="p">,</span> <span class="o">*</span><span class="n">it_</span><span class="p">);</span> + <span class="n">it_</span> <span class="o">=</span> <span class="o">++</span><span class="n">it_</span><span class="p">;</span> + <span class="k">return</span> <span class="n">status</span><span class="p">;</span> + <span class="p">}</span> <span class="k">else</span> <span class="p">{</span> + <span class="c1">// Reset iterator if incase calibrator is going to be used again</span> + <span class="n">it_</span> <span class="o">=</span> <span class="n">batched_data_</span><span class="p">.</span><span class="n">begin</span><span class="p">();</span> <span class="k">return</span> <span class="nb">false</span><span class="p">;</span> <span class="p">}</span> - - <span class="k">auto</span> <span class="n">batch</span> <span class="o">=</span> <span class="o">*</span><span class="n">it_</span><span class="p">;</span> - - <span class="k">for</span> <span class="p">(</span><span class="kt">int</span> <span class="n">i</span> <span class="o">=</span> <span class="mi">0</span><span class="p">;</span> <span class="n">i</span> <span class="o">&lt;</span> <span class="n">nbBindings</span><span class="p">;</span> <span class="n">i</span><span class="o">++</span><span class="p">)</span> <span class="p">{</span> - <span class="k">auto</span> <span class="n">data</span> <span class="o">=</span> <span class="n">batch</span><span class="p">.</span><span class="n">data</span><span class="p">;</span> - <span class="n">data</span> <span class="o">=</span> <span class="n">data</span><span class="p">.</span><span class="n">to</span><span class="p">(</span><span class="n">at</span><span class="o">::</span><span class="n">kCUDA</span><span class="p">).</span><span class="n">contiguous</span><span class="p">();</span> - <span class="n">bindings</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">=</span> <span class="n">data</span><span class="p">.</span><span class="n">data_ptr</span><span class="p">();</span> - <span class="p">}</span> - - <span class="n">it_</span> <span class="o">=</span> <span class="o">++</span><span class="n">it_</span><span class="p">;</span> - <span class="k">return</span> <span class="nb">true</span><span class="p">;</span> <span class="p">}</span> <span class="k">const</span> <span class="kt">void</span><span class="o">*</span> <span class="n">readCalibrationCache</span><span class="p">(</span><span class="kt">size_t</span><span class="o">&amp;</span> <span class="n">length</span><span class="p">)</span> <span class="k">override</span> <span class="p">{</span> @@ -573,18 +577,17 @@ <h1 id="cpp-api-program-listing-file-cpp-api-include-trtorch-ptq-h--page-root"> <span class="n">std</span><span class="o">::</span><span class="n">stringstream</span> <span class="n">ss</span><span class="p">;</span> <span class="n">ss</span> <span class="o">&lt;&lt;</span> <span class="s">"Reading Calibration Cache from "</span> <span class="o">&lt;&lt;</span> <span class="n">cache_file_path_</span><span class="p">;</span> <span class="n">logging</span><span class="o">::</span><span class="n">log</span><span class="p">(</span><span class="n">logging</span><span class="o">::</span><span class="n">Level</span><span class="o">::</span><span class="n">kINFO</span><span class="p">,</span> <span class="n">ss</span><span class="p">.</span><span class="n">str</span><span class="p">());</span> + <span class="n">cache_</span><span class="p">.</span><span class="n">clear</span><span class="p">();</span> - <span class="n">std</span><span class="o">::</span><span class="n">ifstream</span> <span class="n">cache_file</span><span class="p">(</span><span class="n">cache_file_path_</span><span class="p">,</span> <span class="n">std</span><span class="o">::</span><span class="n">ios</span><span class="o">::</span><span class="n">binary</span><span class="p">);</span> - <span class="n">cache_file</span> <span class="o">&gt;&gt;</span> <span class="n">std</span><span class="o">::</span><span class="n">noskipws</span><span class="p">;</span> - <span class="k">if</span> <span class="p">(</span><span class="n">cache_file</span><span class="p">.</span><span class="n">good</span><span class="p">())</span> <span class="p">{</span> - <span class="n">std</span><span class="o">::</span><span class="n">copy</span><span class="p">(</span><span class="n">std</span><span class="o">::</span><span class="n">istream_iterator</span><span class="o">&lt;</span><span class="kt">char</span><span class="o">&gt;</span><span class="p">(</span><span class="n">cache_file</span><span class="p">),</span> - <span class="n">std</span><span class="o">::</span><span class="n">istream_iterator</span><span class="o">&lt;</span><span class="kt">char</span><span class="o">&gt;</span><span class="p">(),</span> - <span class="n">std</span><span class="o">::</span><span class="n">back_inserter</span><span class="p">(</span><span class="n">cache_</span><span class="p">));</span> - <span class="n">ss</span> <span class="o">&lt;&lt;</span> <span class="s">"Cache read"</span><span class="p">;</span> - <span class="n">logging</span><span class="o">::</span><span class="n">log</span><span class="p">(</span><span class="n">logging</span><span class="o">::</span><span class="n">Level</span><span class="o">::</span><span class="n">kDEBUG</span><span class="p">,</span> <span class="n">ss</span><span class="p">.</span><span class="n">str</span><span class="p">());</span> + <span class="n">std</span><span class="o">::</span><span class="n">ifstream</span> <span class="n">input</span><span class="p">(</span><span class="n">cache_file_path_</span><span class="p">,</span> <span class="n">std</span><span class="o">::</span><span class="n">ios</span><span class="o">::</span><span class="n">binary</span><span class="p">);</span> + <span class="n">input</span> <span class="o">&gt;&gt;</span> <span class="n">std</span><span class="o">::</span><span class="n">noskipws</span><span class="p">;</span> + <span class="k">if</span> <span class="p">(</span><span class="n">input</span><span class="p">.</span><span class="n">good</span><span class="p">())</span> <span class="p">{</span> + <span class="n">std</span><span class="o">::</span><span class="n">copy</span><span class="p">(</span><span class="n">std</span><span class="o">::</span><span class="n">istream_iterator</span><span class="o">&lt;</span><span class="kt">char</span><span class="o">&gt;</span><span class="p">(</span><span class="n">input</span><span class="p">),</span> <span class="n">std</span><span class="o">::</span><span class="n">istream_iterator</span><span class="o">&lt;</span><span class="kt">char</span><span class="o">&gt;</span><span class="p">(),</span> + <span class="n">std</span><span class="o">::</span><span class="n">back_inserter</span><span class="p">(</span><span class="n">cache_</span><span class="p">));</span> + <span class="n">logging</span><span class="o">::</span><span class="n">log</span><span class="p">(</span><span class="n">logging</span><span class="o">::</span><span class="n">Level</span><span class="o">::</span><span class="n">kDEBUG</span><span class="p">,</span> <span class="s">"Cache read"</span><span class="p">);</span> <span class="p">}</span> - <span class="n">cache_size_</span> <span class="o">=</span> <span class="n">cache_</span><span class="p">.</span><span class="n">size</span><span class="p">();</span> - <span class="k">return</span> <span class="n">cache_size_</span> <span class="o">?</span> <span class="n">cache_</span><span class="p">.</span><span class="n">data</span><span class="p">()</span> <span class="o">:</span> <span class="k">nullptr</span><span class="p">;</span> + <span class="n">length</span> <span class="o">=</span> <span class="n">cache_</span><span class="p">.</span><span class="n">size</span><span class="p">();</span> + <span class="k">return</span> <span class="n">length</span> <span class="o">?</span> <span class="n">cache_</span><span class="p">.</span><span class="n">data</span><span class="p">()</span> <span class="o">:</span> <span class="k">nullptr</span><span class="p">;</span> <span class="p">}</span> <span class="k">return</span> <span class="k">nullptr</span><span class="p">;</span> <span class="p">}</span> @@ -603,12 +606,13 @@ <h1 id="cpp-api-program-listing-file-cpp-api-include-trtorch-ptq-h--page-root"> <span class="k">private</span><span class="o">:</span> <span class="n">DataLoader</span><span class="o">*</span> <span class="n">dataloader_</span><span class="p">;</span> - <span class="n">torch</span><span class="o">::</span><span class="n">data</span><span class="o">::</span><span class="n">Iterator</span><span class="o">&lt;</span><span class="n">Batch</span><span class="o">&gt;</span> <span class="n">it_</span><span class="p">;</span> <span class="k">const</span> <span class="n">std</span><span class="o">::</span><span class="n">string</span><span class="o">&amp;</span> <span class="n">cache_file_path_</span><span class="p">;</span> <span class="kt">size_t</span> <span class="n">cache_size_</span> <span class="o">=</span> <span class="mi">0</span><span class="p">;</span> <span class="kt">bool</span> <span class="n">use_cache_</span><span class="p">;</span> <span class="n">std</span><span class="o">::</span><span class="n">vector</span><span class="o">&lt;</span><span class="kt">char</span><span class="o">&gt;</span> <span class="n">cache_</span><span class="p">;</span> - <span class="kt">bool</span> <span class="n">it_created_</span> <span class="o">=</span> <span class="nb">false</span><span class="p">;</span> + <span class="n">std</span><span class="o">::</span><span class="n">vector</span><span class="o">&lt;</span><span class="n">torch</span><span class="o">::</span><span class="n">Tensor</span><span class="o">&gt;</span> <span class="n">batched_data_</span><span class="p">;</span> + <span class="n">std</span><span class="o">::</span><span class="n">vector</span><span class="o">&lt;</span><span class="n">torch</span><span class="o">::</span><span class="n">Tensor</span><span class="o">&gt;::</span><span class="n">iterator</span> <span class="n">it_</span><span class="p">;</span> + <span class="p">};</span> <span class="k">template</span><span class="o">&lt;</span><span class="k">typename</span> <span class="n">Algorithm</span><span class="o">&gt;</span> @@ -632,23 +636,17 @@ <h1 id="cpp-api-program-listing-file-cpp-api-include-trtorch-ptq-h--page-root"> <span class="n">std</span><span class="o">::</span><span class="n">stringstream</span> <span class="n">ss</span><span class="p">;</span> <span class="n">ss</span> <span class="o">&lt;&lt;</span> <span class="s">"Reading Calibration Cache from "</span> <span class="o">&lt;&lt;</span> <span class="n">cache_file_path_</span><span class="p">;</span> <span class="n">logging</span><span class="o">::</span><span class="n">log</span><span class="p">(</span><span class="n">logging</span><span class="o">::</span><span class="n">Level</span><span class="o">::</span><span class="n">kINFO</span><span class="p">,</span> <span class="n">ss</span><span class="p">.</span><span class="n">str</span><span class="p">());</span> + <span class="n">cache_</span><span class="p">.</span><span class="n">clear</span><span class="p">();</span> - <span class="n">std</span><span class="o">::</span><span class="n">ifstream</span> <span class="n">cache_file</span><span class="p">;</span> - <span class="n">cache_file</span><span class="p">.</span><span class="n">open</span><span class="p">(</span><span class="n">cache_file_path_</span><span class="p">,</span> <span class="n">std</span><span class="o">::</span><span class="n">ios</span><span class="o">::</span><span class="n">in</span> <span class="o">|</span> <span class="n">std</span><span class="o">::</span><span class="n">ios</span><span class="o">::</span><span class="n">binary</span><span class="p">);</span> - <span class="n">cache_file</span><span class="p">.</span><span class="n">unsetf</span><span class="p">(</span><span class="n">std</span><span class="o">::</span><span class="n">ios</span><span class="o">::</span><span class="n">skipws</span><span class="p">);</span> - <span class="n">cache_file</span><span class="p">.</span><span class="n">seekg</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="n">std</span><span class="o">::</span><span class="n">ios</span><span class="o">::</span><span class="n">beg</span><span class="p">);</span> - <span class="n">cache_</span><span class="p">.</span><span class="n">reserve</span><span class="p">(</span><span class="n">cache_file</span><span class="p">.</span><span class="n">tellg</span><span class="p">());</span> - <span class="n">cache_file</span><span class="p">.</span><span class="n">seekg</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="n">std</span><span class="o">::</span><span class="n">ios</span><span class="o">::</span><span class="n">beg</span><span class="p">);</span> - <span class="k">if</span> <span class="p">(</span><span class="n">cache_file</span><span class="p">.</span><span class="n">good</span><span class="p">())</span> <span class="p">{</span> - <span class="n">std</span><span class="o">::</span><span class="n">cout</span> <span class="o">&lt;&lt;</span> <span class="s">"Trying to read cache"</span> <span class="o">&lt;&lt;</span> <span class="n">std</span><span class="o">::</span><span class="n">endl</span><span class="p">;</span> - <span class="n">std</span><span class="o">::</span><span class="n">copy</span><span class="p">(</span><span class="n">std</span><span class="o">::</span><span class="n">istreambuf_iterator</span><span class="o">&lt;</span><span class="kt">char</span><span class="o">&gt;</span><span class="p">(</span><span class="n">cache_file</span><span class="p">),</span> - <span class="n">std</span><span class="o">::</span><span class="n">istreambuf_iterator</span><span class="o">&lt;</span><span class="kt">char</span><span class="o">&gt;</span><span class="p">(),</span> - <span class="n">std</span><span class="o">::</span><span class="n">back_inserter</span><span class="p">(</span><span class="n">cache_</span><span class="p">));</span> - <span class="n">ss</span> <span class="o">&lt;&lt;</span> <span class="s">"Cache read"</span><span class="p">;</span> - <span class="n">logging</span><span class="o">::</span><span class="n">log</span><span class="p">(</span><span class="n">logging</span><span class="o">::</span><span class="n">Level</span><span class="o">::</span><span class="n">kDEBUG</span><span class="p">,</span> <span class="n">ss</span><span class="p">.</span><span class="n">str</span><span class="p">());</span> + <span class="n">std</span><span class="o">::</span><span class="n">ifstream</span> <span class="n">input</span><span class="p">(</span><span class="n">cache_file_path_</span><span class="p">,</span> <span class="n">std</span><span class="o">::</span><span class="n">ios</span><span class="o">::</span><span class="n">binary</span><span class="p">);</span> + <span class="n">input</span> <span class="o">&gt;&gt;</span> <span class="n">std</span><span class="o">::</span><span class="n">noskipws</span><span class="p">;</span> + <span class="k">if</span> <span class="p">(</span><span class="n">input</span><span class="p">.</span><span class="n">good</span><span class="p">())</span> <span class="p">{</span> + <span class="n">std</span><span class="o">::</span><span class="n">copy</span><span class="p">(</span><span class="n">std</span><span class="o">::</span><span class="n">istream_iterator</span><span class="o">&lt;</span><span class="kt">char</span><span class="o">&gt;</span><span class="p">(</span><span class="n">input</span><span class="p">),</span> <span class="n">std</span><span class="o">::</span><span class="n">istream_iterator</span><span class="o">&lt;</span><span class="kt">char</span><span class="o">&gt;</span><span class="p">(),</span> + <span class="n">std</span><span class="o">::</span><span class="n">back_inserter</span><span class="p">(</span><span class="n">cache_</span><span class="p">));</span> + <span class="n">logging</span><span class="o">::</span><span class="n">log</span><span class="p">(</span><span class="n">logging</span><span class="o">::</span><span class="n">Level</span><span class="o">::</span><span class="n">kDEBUG</span><span class="p">,</span> <span class="s">"Cache read"</span><span class="p">);</span> <span class="p">}</span> - <span class="n">cache_size_</span> <span class="o">=</span> <span class="n">cache_</span><span class="p">.</span><span class="n">size</span><span class="p">();</span> - <span class="k">return</span> <span class="n">cache_size_</span> <span class="o">?</span> <span class="n">cache_</span><span class="p">.</span><span class="n">data</span><span class="p">()</span> <span class="o">:</span> <span class="k">nullptr</span><span class="p">;</span> + <span class="n">length</span> <span class="o">=</span> <span class="n">cache_</span><span class="p">.</span><span class="n">size</span><span class="p">();</span> + <span class="k">return</span> <span class="n">length</span> <span class="o">?</span> <span class="n">cache_</span><span class="p">.</span><span class="n">data</span><span class="p">()</span> <span class="o">:</span> <span class="k">nullptr</span><span class="p">;</span> <span class="p">}</span> diff --git a/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.html b/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.html index 1184e3258d..c85c92f209 100644 --- a/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.html +++ b/docs/_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -568,7 +573,9 @@ <h1 id="cpp-api-program-listing-file-cpp-api-include-trtorch-trtorch-h--page-roo <span class="k">operator</span> <span class="nf">Value</span><span class="p">()</span> <span class="k">const</span> <span class="p">{</span> <span class="k">return</span> <span class="n">value</span><span class="p">;</span> <span class="p">}</span> <span class="k">explicit</span> <span class="k">operator</span> <span class="nf">bool</span><span class="p">()</span> <span class="o">=</span> <span class="k">delete</span><span class="p">;</span> <span class="k">constexpr</span> <span class="kt">bool</span> <span class="k">operator</span><span class="o">==</span><span class="p">(</span><span class="n">DataType</span> <span class="n">other</span><span class="p">)</span> <span class="k">const</span> <span class="p">{</span> <span class="k">return</span> <span class="n">value</span> <span class="o">==</span> <span class="n">other</span><span class="p">.</span><span class="n">value</span><span class="p">;</span> <span class="p">}</span> + <span class="k">constexpr</span> <span class="kt">bool</span> <span class="k">operator</span><span class="o">==</span><span class="p">(</span><span class="n">DataType</span><span class="o">::</span><span class="n">Value</span> <span class="n">other</span><span class="p">)</span> <span class="k">const</span> <span class="p">{</span> <span class="k">return</span> <span class="n">value</span> <span class="o">==</span> <span class="n">other</span><span class="p">;</span> <span class="p">}</span> <span class="k">constexpr</span> <span class="kt">bool</span> <span class="k">operator</span><span class="o">!=</span><span class="p">(</span><span class="n">DataType</span> <span class="n">other</span><span class="p">)</span> <span class="k">const</span> <span class="p">{</span> <span class="k">return</span> <span class="n">value</span> <span class="o">!=</span> <span class="n">other</span><span class="p">.</span><span class="n">value</span><span class="p">;</span> <span class="p">}</span> + <span class="k">constexpr</span> <span class="kt">bool</span> <span class="k">operator</span><span class="o">!=</span><span class="p">(</span><span class="n">DataType</span><span class="o">::</span><span class="n">Value</span> <span class="n">other</span><span class="p">)</span> <span class="k">const</span> <span class="p">{</span> <span class="k">return</span> <span class="n">value</span> <span class="o">!=</span> <span class="n">other</span><span class="p">;</span> <span class="p">}</span> <span class="k">private</span><span class="o">:</span> <span class="n">Value</span> <span class="n">value</span><span class="p">;</span> <span class="p">};</span> diff --git a/docs/_cpp_api/structtrtorch_1_1ExtraInfo.html b/docs/_cpp_api/structtrtorch_1_1ExtraInfo.html index e5719a16e4..e98a7cedbd 100644 --- a/docs/_cpp_api/structtrtorch_1_1ExtraInfo.html +++ b/docs/_cpp_api/structtrtorch_1_1ExtraInfo.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -955,7 +960,10 @@ <h2 id="struct-documentation"> <a class="reference internal" href="classtrtorch_1_1ExtraInfo_1_1DataType.html#_CPPv4N7trtorch9ExtraInfo8DataTypeE" title="trtorch::ExtraInfo::DataType"> DataType </a> - ::kFloat + :: + <a class="reference internal" href="classtrtorch_1_1ExtraInfo_1_1DataType.html#_CPPv4N7trtorch9ExtraInfo8DataType5Value6kFloatE" title="trtorch::ExtraInfo::DataType::kFloat"> + kFloat + </a> <a class="headerlink" href="#_CPPv4N7trtorch9ExtraInfo12op_precisionE" title="Permalink to this definition"> ¶ </a> @@ -1091,7 +1099,10 @@ <h2 id="struct-documentation"> <a class="reference internal" href="classtrtorch_1_1ExtraInfo_1_1DeviceType.html#_CPPv4N7trtorch9ExtraInfo10DeviceTypeE" title="trtorch::ExtraInfo::DeviceType"> DeviceType </a> - ::kGPU + :: + <a class="reference internal" href="classtrtorch_1_1ExtraInfo_1_1DeviceType.html#_CPPv4N7trtorch9ExtraInfo10DeviceType5Value4kGPUE" title="trtorch::ExtraInfo::DeviceType::kGPU"> + kGPU + </a> <a class="headerlink" href="#_CPPv4N7trtorch9ExtraInfo6deviceE" title="Permalink to this definition"> ¶ </a> @@ -1707,6 +1718,99 @@ <h2 id="struct-documentation"> </dl> </dd> </dl> + <dl class="cpp function"> + <dt id="_CPPv4NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE"> + <span id="_CPPv3NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE"> + </span> + <span id="_CPPv2NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE"> + </span> + <span id="trtorch::ExtraInfo::DataType::eq-operator__DataType::ValueCCE"> + </span> + <span class="target" id="classtrtorch_1_1ExtraInfo_1_1DataType_1a61d6f3e6a3929edec1d3659330b8297d"> + </span> + <em class="property"> + constexpr + </em> + bool + <code class="sig-name descname"> + operator== + </code> + <span class="sig-paren"> + ( + </span> + <a class="reference internal" href="classtrtorch_1_1ExtraInfo_1_1DataType.html#_CPPv4N7trtorch9ExtraInfo8DataTypeE" title="trtorch::ExtraInfo::DataType"> + DataType + </a> + :: + <a class="reference internal" href="classtrtorch_1_1ExtraInfo_1_1DataType.html#_CPPv4N7trtorch9ExtraInfo8DataType5ValueE" title="trtorch::ExtraInfo::DataType::Value"> + Value + </a> + <em> + other + </em> + <span class="sig-paren"> + ) + </span> + <em class="property"> + const + </em> + <br/> + </dt> + <dd> + <p> + Comparision operator for + <a class="reference internal" href="#classtrtorch_1_1ExtraInfo_1_1DataType"> + <span class="std std-ref"> + DataType + </span> + </a> + . + </p> + <p> + </p> + <dl class="simple"> + <dt> + <strong> + Return + </strong> + </dt> + <dd> + <p> + true + </p> + </dd> + <dt> + <strong> + Return + </strong> + </dt> + <dd> + <p> + false + </p> + </dd> + <dt> + <strong> + Parameters + </strong> + </dt> + <dd> + <ul class="breatheparameterlist simple"> + <li> + <p> + <code class="docutils literal notranslate"> + <span class="pre"> + other + </span> + </code> + : + </p> + </li> + </ul> + </dd> + </dl> + </dd> + </dl> <dl class="cpp function"> <dt id="_CPPv4NK7trtorch9ExtraInfo8DataTypeneE8DataType"> <span id="_CPPv3NK7trtorch9ExtraInfo8DataTypeneE8DataType"> @@ -1796,6 +1900,99 @@ <h2 id="struct-documentation"> </dl> </dd> </dl> + <dl class="cpp function"> + <dt id="_CPPv4NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE"> + <span id="_CPPv3NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE"> + </span> + <span id="_CPPv2NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE"> + </span> + <span id="trtorch::ExtraInfo::DataType::neq-operator__DataType::ValueCCE"> + </span> + <span class="target" id="classtrtorch_1_1ExtraInfo_1_1DataType_1afac08806f8da094821031a3bf0ee5fa7"> + </span> + <em class="property"> + constexpr + </em> + bool + <code class="sig-name descname"> + operator!= + </code> + <span class="sig-paren"> + ( + </span> + <a class="reference internal" href="classtrtorch_1_1ExtraInfo_1_1DataType.html#_CPPv4N7trtorch9ExtraInfo8DataTypeE" title="trtorch::ExtraInfo::DataType"> + DataType + </a> + :: + <a class="reference internal" href="classtrtorch_1_1ExtraInfo_1_1DataType.html#_CPPv4N7trtorch9ExtraInfo8DataType5ValueE" title="trtorch::ExtraInfo::DataType::Value"> + Value + </a> + <em> + other + </em> + <span class="sig-paren"> + ) + </span> + <em class="property"> + const + </em> + <br/> + </dt> + <dd> + <p> + Comparision operator for + <a class="reference internal" href="#classtrtorch_1_1ExtraInfo_1_1DataType"> + <span class="std std-ref"> + DataType + </span> + </a> + . + </p> + <p> + </p> + <dl class="simple"> + <dt> + <strong> + Return + </strong> + </dt> + <dd> + <p> + true + </p> + </dd> + <dt> + <strong> + Return + </strong> + </dt> + <dd> + <p> + false + </p> + </dd> + <dt> + <strong> + Parameters + </strong> + </dt> + <dd> + <ul class="breatheparameterlist simple"> + <li> + <p> + <code class="docutils literal notranslate"> + <span class="pre"> + other + </span> + </code> + : + </p> + </li> + </ul> + </dd> + </dl> + </dd> + </dl> </div> </dd> </dl> diff --git a/docs/_cpp_api/structtrtorch_1_1ExtraInfo_1_1InputRange.html b/docs/_cpp_api/structtrtorch_1_1ExtraInfo_1_1InputRange.html index 8b58f44947..569b00db96 100644 --- a/docs/_cpp_api/structtrtorch_1_1ExtraInfo_1_1InputRange.html +++ b/docs/_cpp_api/structtrtorch_1_1ExtraInfo_1_1InputRange.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/trtorch_cpp.html b/docs/_cpp_api/trtorch_cpp.html index f23f54243e..44e285ede8 100644 --- a/docs/_cpp_api/trtorch_cpp.html +++ b/docs/_cpp_api/trtorch_cpp.html @@ -296,6 +296,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/unabridged_api.html b/docs/_cpp_api/unabridged_api.html index f862c086d1..96a93493a0 100644 --- a/docs/_cpp_api/unabridged_api.html +++ b/docs/_cpp_api/unabridged_api.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_cpp_api/unabridged_orphan.html b/docs/_cpp_api/unabridged_orphan.html index cc96ebf58c..554cd70624 100644 --- a/docs/_cpp_api/unabridged_orphan.html +++ b/docs/_cpp_api/unabridged_orphan.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/_sources/_cpp_api/file_cpp_api_include_trtorch_logging.h.rst.txt b/docs/_sources/_cpp_api/file_cpp_api_include_trtorch_logging.h.rst.txt index bec2619937..1ca71e13ce 100644 --- a/docs/_sources/_cpp_api/file_cpp_api_include_trtorch_logging.h.rst.txt +++ b/docs/_sources/_cpp_api/file_cpp_api_include_trtorch_logging.h.rst.txt @@ -39,6 +39,8 @@ Included By ----------- +- :ref:`file_cpp_api_include_trtorch_ptq.h` + - :ref:`file_cpp_api_include_trtorch_trtorch.h` diff --git a/docs/_sources/_cpp_api/file_cpp_api_include_trtorch_ptq.h.rst.txt b/docs/_sources/_cpp_api/file_cpp_api_include_trtorch_ptq.h.rst.txt index ff8e4dacc1..a5f33139f7 100644 --- a/docs/_sources/_cpp_api/file_cpp_api_include_trtorch_ptq.h.rst.txt +++ b/docs/_sources/_cpp_api/file_cpp_api_include_trtorch_ptq.h.rst.txt @@ -37,6 +37,8 @@ Includes - ``string`` +- ``trtorch/logging.h`` (:ref:`file_cpp_api_include_trtorch_logging.h`) + - ``vector`` diff --git a/docs/_sources/_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.rst.txt b/docs/_sources/_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.rst.txt index 93cefc6c66..6d02e502f4 100644 --- a/docs/_sources/_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.rst.txt +++ b/docs/_sources/_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.rst.txt @@ -18,6 +18,8 @@ Program Listing for File ptq.h #include <iostream> #include <sstream> + #include "trtorch/logging.h" + #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace nvinfer1 { class IInt8Calibrator; @@ -25,9 +27,12 @@ Program Listing for File ptq.h } namespace torch { - namespace data { - template<typename Example> - class Iterator; + class Tensor; + } + + namespace trtorch { + namespace ptq { + bool get_batch_impl(void* bindings[], const char* names[], int nbBindings, torch::Tensor& data); } } #endif //DOXYGEN_SHOULD_SKIP_THIS @@ -41,7 +46,12 @@ Program Listing for File ptq.h using Batch = typename DataLoader::super::BatchType; public: Int8Calibrator(DataLoaderUniquePtr dataloader, const std::string& cache_file_path, bool use_cache) - : dataloader_(dataloader.get()), it_(dataloader_->end()), cache_file_path_(cache_file_path), use_cache_(use_cache) {} + : dataloader_(dataloader.get()), cache_file_path_(cache_file_path), use_cache_(use_cache) { + for (auto batch : *dataloader_) { + batched_data_.push_back(batch.data); + } + it_ = batched_data_.begin(); + } int getBatchSize() const override { // HACK: TRTorch only uses explict batch sizing, INT8 Calibrator does not @@ -52,26 +62,15 @@ Program Listing for File ptq.h } bool getBatch(void* bindings[], const char* names[], int nbBindings) override { - // HACK: doesnt seem like the first try in the initializer list works - if (! it_created_) { - it_ = dataloader_->begin(); - it_created_ = true; - } - - if (it_ == dataloader_->end()) { + if (it_ != batched_data_.end()) { + auto status = get_batch_impl(bindings, names, nbBindings, *it_); + it_ = ++it_; + return status; + } else { + // Reset iterator if incase calibrator is going to be used again + it_ = batched_data_.begin(); return false; } - - auto batch = *it_; - - for (int i = 0; i < nbBindings; i++) { - auto data = batch.data; - data = data.to(at::kCUDA).contiguous(); - bindings[i] = data.data_ptr(); - } - - it_ = ++it_; - return true; } const void* readCalibrationCache(size_t& length) override { @@ -79,18 +78,17 @@ Program Listing for File ptq.h std::stringstream ss; ss << "Reading Calibration Cache from " << cache_file_path_; logging::log(logging::Level::kINFO, ss.str()); + cache_.clear(); - std::ifstream cache_file(cache_file_path_, std::ios::binary); - cache_file >> std::noskipws; - if (cache_file.good()) { - std::copy(std::istream_iterator<char>(cache_file), - std::istream_iterator<char>(), - std::back_inserter(cache_)); - ss << "Cache read"; - logging::log(logging::Level::kDEBUG, ss.str()); + std::ifstream input(cache_file_path_, std::ios::binary); + input >> std::noskipws; + if (input.good()) { + std::copy(std::istream_iterator<char>(input), std::istream_iterator<char>(), + std::back_inserter(cache_)); + logging::log(logging::Level::kDEBUG, "Cache read"); } - cache_size_ = cache_.size(); - return cache_size_ ? cache_.data() : nullptr; + length = cache_.size(); + return length ? cache_.data() : nullptr; } return nullptr; } @@ -109,12 +107,13 @@ Program Listing for File ptq.h private: DataLoader* dataloader_; - torch::data::Iterator<Batch> it_; const std::string& cache_file_path_; size_t cache_size_ = 0; bool use_cache_; std::vector<char> cache_; - bool it_created_ = false; + std::vector<torch::Tensor> batched_data_; + std::vector<torch::Tensor>::iterator it_; + }; template<typename Algorithm> @@ -138,23 +137,17 @@ Program Listing for File ptq.h std::stringstream ss; ss << "Reading Calibration Cache from " << cache_file_path_; logging::log(logging::Level::kINFO, ss.str()); + cache_.clear(); - std::ifstream cache_file; - cache_file.open(cache_file_path_, std::ios::in | std::ios::binary); - cache_file.unsetf(std::ios::skipws); - cache_file.seekg(0, std::ios::beg); - cache_.reserve(cache_file.tellg()); - cache_file.seekg(0, std::ios::beg); - if (cache_file.good()) { - std::cout << "Trying to read cache" << std::endl; - std::copy(std::istreambuf_iterator<char>(cache_file), - std::istreambuf_iterator<char>(), - std::back_inserter(cache_)); - ss << "Cache read"; - logging::log(logging::Level::kDEBUG, ss.str()); + std::ifstream input(cache_file_path_, std::ios::binary); + input >> std::noskipws; + if (input.good()) { + std::copy(std::istream_iterator<char>(input), std::istream_iterator<char>(), + std::back_inserter(cache_)); + logging::log(logging::Level::kDEBUG, "Cache read"); } - cache_size_ = cache_.size(); - return cache_size_ ? cache_.data() : nullptr; + length = cache_.size(); + return length ? cache_.data() : nullptr; } diff --git a/docs/_sources/_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.rst.txt b/docs/_sources/_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.rst.txt index 14d26d87b9..fa356d80b0 100644 --- a/docs/_sources/_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.rst.txt +++ b/docs/_sources/_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.rst.txt @@ -74,7 +74,9 @@ Program Listing for File trtorch.h operator Value() const { return value; } explicit operator bool() = delete; constexpr bool operator==(DataType other) const { return value == other.value; } + constexpr bool operator==(DataType::Value other) const { return value == other; } constexpr bool operator!=(DataType other) const { return value != other.value; } + constexpr bool operator!=(DataType::Value other) const { return value != other; } private: Value value; }; diff --git a/docs/_sources/index.rst.txt b/docs/_sources/index.rst.txt index 5255135f58..45a1610b49 100644 --- a/docs/_sources/index.rst.txt +++ b/docs/_sources/index.rst.txt @@ -23,15 +23,18 @@ Getting Started * :ref:`installation` * :ref:`getting_started` * :ref:`ptq` +* :ref:`trtorchc` + .. toctree:: :caption: Getting Started - :maxdepth: 2 + :maxdepth: 1 :hidden: tutorials/installation tutorials/getting_started tutorials/ptq + tutorials/trtorchc Contributor Documentation -------------------------------- diff --git a/docs/_sources/tutorials/getting_started.rst.txt b/docs/_sources/tutorials/getting_started.rst.txt index 0d133a7eab..45c08b8637 100644 --- a/docs/_sources/tutorials/getting_started.rst.txt +++ b/docs/_sources/tutorials/getting_started.rst.txt @@ -130,7 +130,8 @@ To compile your TorchScript module with TRTorch, all you need to do is provide t to TRTorch and you will be returned an optimized TorchScript module to run or add into another PyTorch module. The only required setting is the input size or input range which is defined as a list of either list types like ``lists``, ``tuples`` or PyTorch ``size`` objects or dictionaries of minimum, optimial and maximum sizes. You can also specify settings such as -operating precision for the engine or target device. +operating precision for the engine or target device. After compilation you can save the module just like any other module +to load in a deployment application. In order to load a TensorRT/TorchScript module, make sure you first import ``trtorch``. .. code-block:: python @@ -152,6 +153,17 @@ operating precision for the engine or target device. input_data = input_data.half() result = trt_ts_module(input_data) + torch.jit.save(trt_ts_module, "trt_ts_module.ts") + +.. code-block:: python + + # Deployment application + import torch + import trtorch + + trt_ts_module = torch.jit.load("trt_ts_module.ts") + input_data = input_data.half() + result = trt_ts_module(input_data) .. _ts_in_cc: @@ -251,7 +263,35 @@ We can also set settings like operating precision to run in FP16. auto trt_mod = trtorch::CompileGraph(mod, info); auto out = trt_mod.forward({in}); -And now we are running the module in FP16 precision. +And now we are running the module in FP16 precision. You can then save the module to load later. + +.. code-block:: c++ + + trt_mod.save("<PATH TO SAVED TRT/TS MOD>") + +TRTorch compiled TorchScript modules are loaded in the same way as normal TorchScript module. Make sure your deployment application is linked against ``libtrtorch.so`` + +.. code-block:: c++ + + #include "torch/script.h" + #include "trtorch/trtorch.h" + + int main(int argc, const char* argv[]) { + torch::jit::Module module; + try { + // Deserialize the ScriptModule from a file using torch::jit::load(). + module = torch::jit::load("<PATH TO SAVED TRT/TS MOD>"); + } + catch (const c10::Error& e) { + std::cerr << "error loading the model\n"; + return -1; + } + + torch::Tensor in = torch::randn({1, 1, 32, 32}, torch::kCUDA); + auto out = mod.forward(in); + + std::cout << "ok\n"; + } If you want to save the engine produced by TRTorch to use in a TensorRT application you can use the ``ConvertGraphToTRTEngine`` API. diff --git a/docs/_sources/tutorials/trtorchc.rst.txt b/docs/_sources/tutorials/trtorchc.rst.txt new file mode 100644 index 0000000000..5561ee86ed --- /dev/null +++ b/docs/_sources/tutorials/trtorchc.rst.txt @@ -0,0 +1,91 @@ +.. _trtorchc: + +trtorchc +================================= + +``trtorchc`` is a CLI application for using the TRTorch compiler. It serves as an easy way to compile a +TorchScript Module with TRTorch from the command-line to quickly check support or as part of +a deployment pipeline. All basic features of the compiler are supported including post training +quantization (though you must already have a calibration cache file to use the PTQ feature). The compiler can +output two formats, either a TorchScript program with the TensorRT engine embedded or +the TensorRT engine itself as a PLAN file. + +All that is required to run the program after compilation is for C++ linking against ``libtrtorch.so`` +or in Python importing the trtorch package. All other aspects of using compiled modules are identical +to standard TorchScript. Load with ``torch.jit.load()`` and run like you would run any other module. + +.. code-block:: txt + + trtorchc [input_file_path] [output_file_path] + [input_shapes...] {OPTIONS} + + TRTorch is a compiler for TorchScript, it will compile and optimize + TorchScript programs to run on NVIDIA GPUs using TensorRT + + OPTIONS: + + -h, --help Display this help menu + Verbiosity of the compiler + -v, --verbose Dumps debugging information about the + compilation process onto the console + -w, --warnings Disables warnings generated during + compilation onto the console (warnings + are on by default) + --info Dumps info messages generated during + compilation onto the console + --build-debuggable-engine Creates a debuggable engine + --use-strict-types Restrict operating type to only use set + default operation precision + (op_precision) + --allow-gpu-fallback (Only used when targeting DLA + (device-type)) Lets engine run layers on + GPU if they are not supported on DLA + -p[precision], + --default-op-precision=[precision] + Default operating precision for the + engine (Int8 requires a + calibration-cache argument) [ float | + float32 | f32 | half | float16 | f16 | + int8 | i8 ] (default: float) + -d[type], --device-type=[type] The type of device the engine should be + built for [ gpu | dla ] (default: gpu) + --engine-capability=[capability] The type of device the engine should be + built for [ default | safe_gpu | + safe_dla ] + --calibration-cache-file=[file_path] + Path to calibration cache file to use + for post training quantization + --num-min-timing-iter=[num_iters] Number of minimization timing iterations + used to select kernels + --num-avg-timing-iters=[num_iters] + Number of averaging timing iterations + used to select kernels + --workspace-size=[workspace_size] Maximum size of workspace given to + TensorRT + --max-batch-size=[max_batch_size] Maximum batch size (must be >= 1 to be + set, 0 means not set) + -t[threshold], + --threshold=[threshold] Maximum acceptable numerical deviation + from standard torchscript output + (default 2e-5) + --save-engine Instead of compiling a full a + TorchScript program, save the created + engine to the path specified as the + output path + input_file_path Path to input TorchScript file + output_file_path Path for compiled TorchScript (or + TensorRT engine) file + input_shapes... Sizes for inputs to engine, can either + be a single size or a range defined by + Min, Optimal, Max sizes, e.g. + "(N,..,C,H,W)" + "[(MIN_N,..,MIN_C,MIN_H,MIN_W);(OPT_N,..,OPT_C,OPT_H,OPT_W);(MAX_N,..,MAX_C,MAX_H,MAX_W)]" + "--" can be used to terminate flag options and force all following + arguments to be treated as positional options + + +e.g. + +.. code-block:: txt + + trtorchc tests/modules/ssd_traced.jit.pt ssd_trt.ts "[(1,3,300,300); (1,3,512,512); (1, 3, 1024, 1024)]" -p f16 diff --git a/docs/contributors/conversion.html b/docs/contributors/conversion.html index 078214b750..3249c81d4f 100644 --- a/docs/contributors/conversion.html +++ b/docs/contributors/conversion.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/contributors/execution.html b/docs/contributors/execution.html index 9b77950c3b..b80c618615 100644 --- a/docs/contributors/execution.html +++ b/docs/contributors/execution.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/contributors/lowering.html b/docs/contributors/lowering.html index 9d3728b25d..ee974b61a2 100644 --- a/docs/contributors/lowering.html +++ b/docs/contributors/lowering.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/contributors/phases.html b/docs/contributors/phases.html index f5e355f36b..1583f5200b 100644 --- a/docs/contributors/phases.html +++ b/docs/contributors/phases.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/contributors/system_overview.html b/docs/contributors/system_overview.html index 53e8c4bbdd..05f7f760fc 100644 --- a/docs/contributors/system_overview.html +++ b/docs/contributors/system_overview.html @@ -57,7 +57,7 @@ <link href="../genindex.html" rel="index" title="Index"/> <link href="../search.html" rel="search" title="Search"/> <link href="lowering.html" rel="next" title="Lowering Phase"/> - <link href="../tutorials/ptq.html" rel="prev" title="Post Training Quantization (PTQ)"/> + <link href="../tutorials/trtorchc.html" rel="prev" title="trtorchc"/> </head> <body data-md-color-accent="light-green" data-md-color-primary="light-green" dir="ltr"> <svg class="md-svg"> @@ -296,6 +296,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -695,7 +700,7 @@ <h3 id="execution"> <footer class="md-footer"> <div class="md-footer-nav"> <nav class="md-footer-nav__inner md-grid"> - <a class="md-flex md-footer-nav__link md-footer-nav__link--prev" href="../tutorials/ptq.html" rel="prev" title="Post Training Quantization (PTQ)"> + <a class="md-flex md-footer-nav__link md-footer-nav__link--prev" href="../tutorials/trtorchc.html" rel="prev" title="trtorchc"> <div class="md-flex__cell md-flex__cell--shrink"> <i class="md-icon md-icon--arrow-back md-footer-nav__button"> </i> @@ -705,7 +710,7 @@ <h3 id="execution"> <span class="md-footer-nav__direction"> Previous </span> - Post Training Quantization (PTQ) + trtorchc </span> </div> </a> diff --git a/docs/contributors/useful_links.html b/docs/contributors/useful_links.html index 9277c3b0d7..8352444c8a 100644 --- a/docs/contributors/useful_links.html +++ b/docs/contributors/useful_links.html @@ -296,6 +296,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/contributors/writing_converters.html b/docs/contributors/writing_converters.html index c3f4fdce29..3bfa302d38 100644 --- a/docs/contributors/writing_converters.html +++ b/docs/contributors/writing_converters.html @@ -296,6 +296,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/genindex.html b/docs/genindex.html index a5c96be619..088b5a35ce 100644 --- a/docs/genindex.html +++ b/docs/genindex.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -1000,18 +1005,34 @@ <h2 id="T"> trtorch::ExtraInfo::DataType::operator!= (C++ function) </a> , - <a href="_cpp_api/structtrtorch_1_1ExtraInfo.html#_CPPv4NK7trtorch9ExtraInfo8DataTypeneE8DataType"> + <a href="_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.html#_CPPv4NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE"> [1] </a> + , + <a href="_cpp_api/structtrtorch_1_1ExtraInfo.html#_CPPv4NK7trtorch9ExtraInfo8DataTypeneE8DataType"> + [2] + </a> + , + <a href="_cpp_api/structtrtorch_1_1ExtraInfo.html#_CPPv4NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE"> + [3] + </a> </li> <li> <a href="_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.html#_CPPv4NK7trtorch9ExtraInfo8DataTypeeqE8DataType"> trtorch::ExtraInfo::DataType::operator== (C++ function) </a> , - <a href="_cpp_api/structtrtorch_1_1ExtraInfo.html#_CPPv4NK7trtorch9ExtraInfo8DataTypeeqE8DataType"> + <a href="_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.html#_CPPv4NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE"> [1] </a> + , + <a href="_cpp_api/structtrtorch_1_1ExtraInfo.html#_CPPv4NK7trtorch9ExtraInfo8DataTypeeqE8DataType"> + [2] + </a> + , + <a href="_cpp_api/structtrtorch_1_1ExtraInfo.html#_CPPv4NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE"> + [3] + </a> </li> <li> <a href="_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.html#_CPPv4N7trtorch9ExtraInfo8DataType5ValueE"> diff --git a/docs/index.html b/docs/index.html index 6eaea7c19f..2cb263f129 100644 --- a/docs/index.html +++ b/docs/index.html @@ -295,6 +295,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -593,6 +598,15 @@ <h2 id="getting-started"> </a> </p> </li> + <li> + <p> + <a class="reference internal" href="tutorials/trtorchc.html#trtorchc"> + <span class="std std-ref"> + trtorchc + </span> + </a> + </p> + </li> </ul> <div class="toctree-wrapper compound"> </div> diff --git a/docs/objects.inv b/docs/objects.inv index 2b21b404b1..895a3e0b2e 100644 Binary files a/docs/objects.inv and b/docs/objects.inv differ diff --git a/docs/py-modindex.html b/docs/py-modindex.html index 0a746c3fd4..f67e5422e0 100644 --- a/docs/py-modindex.html +++ b/docs/py-modindex.html @@ -294,6 +294,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/py_api/logging.html b/docs/py_api/logging.html index f264fabe08..a19e7f27a4 100644 --- a/docs/py_api/logging.html +++ b/docs/py_api/logging.html @@ -301,6 +301,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/py_api/trtorch.html b/docs/py_api/trtorch.html index 2ebeabd12d..1d1bb164cb 100644 --- a/docs/py_api/trtorch.html +++ b/docs/py_api/trtorch.html @@ -296,6 +296,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/search.html b/docs/search.html index 072ef595d6..a058f1d279 100644 --- a/docs/search.html +++ b/docs/search.html @@ -298,6 +298,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="tutorials/trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/searchindex.js b/docs/searchindex.js index 85e7236e9b..34d173a0d1 100644 --- a/docs/searchindex.js +++ b/docs/searchindex.js @@ -1 +1 @@ -Search.setIndex({docnames:["_cpp_api/class_view_hierarchy","_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType","_cpp_api/classtrtorch_1_1ExtraInfo_1_1DeviceType","_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator","_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator","_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502","_cpp_api/define_macros_8h_1a20c1fbeb21757871c52299dc52351b5f","_cpp_api/define_macros_8h_1a25ee153c325dfc7466a33cbd5c1ff055","_cpp_api/define_macros_8h_1a48d6029a45583a06848891cb0e86f7ba","_cpp_api/define_macros_8h_1a71b02dddfabe869498ad5a88e11c440f","_cpp_api/define_macros_8h_1a9d31d0569348d109b1b069b972dd143e","_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da","_cpp_api/define_macros_8h_1ae1c56ab8a40af292a9a4964651524d84","_cpp_api/dir_cpp","_cpp_api/dir_cpp_api","_cpp_api/dir_cpp_api_include","_cpp_api/dir_cpp_api_include_trtorch","_cpp_api/enum_logging_8h_1a5f612ff2f783ff4fbe89d168f0d817d4","_cpp_api/file_cpp_api_include_trtorch_logging.h","_cpp_api/file_cpp_api_include_trtorch_macros.h","_cpp_api/file_cpp_api_include_trtorch_ptq.h","_cpp_api/file_cpp_api_include_trtorch_trtorch.h","_cpp_api/file_view_hierarchy","_cpp_api/function_logging_8h_1a118d65b179defff7fff279eb9cd126cb","_cpp_api/function_logging_8h_1a396a688110397538f8b3fb7dfdaf38bb","_cpp_api/function_logging_8h_1a9b420280bfacc016d7e36a5704021949","_cpp_api/function_logging_8h_1aa533955a2b908db9e5df5acdfa24715f","_cpp_api/function_logging_8h_1abc57d473f3af292551dee8b9c78373ad","_cpp_api/function_logging_8h_1adf5435f0dbb09c0d931a1b851847236b","_cpp_api/function_logging_8h_1aef44b69c62af7cf2edc8875a9506641a","_cpp_api/function_trtorch_8h_1a2cf17d43ba9117b3b4d652744b4f0447","_cpp_api/function_trtorch_8h_1a4422781719d7befedb364cacd91c6247","_cpp_api/function_trtorch_8h_1a536bba54b70e44554099d23fa3d7e804","_cpp_api/function_trtorch_8h_1a5f33b142bc2f3f2aaf462270b3ad7e31","_cpp_api/function_trtorch_8h_1a726f6e7091b6b7be45b5a4275b2ffb10","_cpp_api/function_trtorch_8h_1ab01696cfe08b6a5293c55935a9713c25","_cpp_api/function_trtorch_8h_1ae38897d1ca4438227c970029d0f76fb5","_cpp_api/namespace_trtorch","_cpp_api/namespace_trtorch__logging","_cpp_api/namespace_trtorch__ptq","_cpp_api/program_listing_file_cpp_api_include_trtorch_logging.h","_cpp_api/program_listing_file_cpp_api_include_trtorch_macros.h","_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h","_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h","_cpp_api/structtrtorch_1_1ExtraInfo","_cpp_api/structtrtorch_1_1ExtraInfo_1_1InputRange","_cpp_api/trtorch_cpp","_cpp_api/unabridged_api","_cpp_api/unabridged_orphan","contributors/conversion","contributors/execution","contributors/lowering","contributors/phases","contributors/system_overview","contributors/useful_links","contributors/writing_converters","index","py_api/logging","py_api/trtorch","tutorials/getting_started","tutorials/installation","tutorials/ptq"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":2,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,sphinx:56},filenames:["_cpp_api/class_view_hierarchy.rst","_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.rst","_cpp_api/classtrtorch_1_1ExtraInfo_1_1DeviceType.rst","_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator.rst","_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator.rst","_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.rst","_cpp_api/define_macros_8h_1a20c1fbeb21757871c52299dc52351b5f.rst","_cpp_api/define_macros_8h_1a25ee153c325dfc7466a33cbd5c1ff055.rst","_cpp_api/define_macros_8h_1a48d6029a45583a06848891cb0e86f7ba.rst","_cpp_api/define_macros_8h_1a71b02dddfabe869498ad5a88e11c440f.rst","_cpp_api/define_macros_8h_1a9d31d0569348d109b1b069b972dd143e.rst","_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.rst","_cpp_api/define_macros_8h_1ae1c56ab8a40af292a9a4964651524d84.rst","_cpp_api/dir_cpp.rst","_cpp_api/dir_cpp_api.rst","_cpp_api/dir_cpp_api_include.rst","_cpp_api/dir_cpp_api_include_trtorch.rst","_cpp_api/enum_logging_8h_1a5f612ff2f783ff4fbe89d168f0d817d4.rst","_cpp_api/file_cpp_api_include_trtorch_logging.h.rst","_cpp_api/file_cpp_api_include_trtorch_macros.h.rst","_cpp_api/file_cpp_api_include_trtorch_ptq.h.rst","_cpp_api/file_cpp_api_include_trtorch_trtorch.h.rst","_cpp_api/file_view_hierarchy.rst","_cpp_api/function_logging_8h_1a118d65b179defff7fff279eb9cd126cb.rst","_cpp_api/function_logging_8h_1a396a688110397538f8b3fb7dfdaf38bb.rst","_cpp_api/function_logging_8h_1a9b420280bfacc016d7e36a5704021949.rst","_cpp_api/function_logging_8h_1aa533955a2b908db9e5df5acdfa24715f.rst","_cpp_api/function_logging_8h_1abc57d473f3af292551dee8b9c78373ad.rst","_cpp_api/function_logging_8h_1adf5435f0dbb09c0d931a1b851847236b.rst","_cpp_api/function_logging_8h_1aef44b69c62af7cf2edc8875a9506641a.rst","_cpp_api/function_trtorch_8h_1a2cf17d43ba9117b3b4d652744b4f0447.rst","_cpp_api/function_trtorch_8h_1a4422781719d7befedb364cacd91c6247.rst","_cpp_api/function_trtorch_8h_1a536bba54b70e44554099d23fa3d7e804.rst","_cpp_api/function_trtorch_8h_1a5f33b142bc2f3f2aaf462270b3ad7e31.rst","_cpp_api/function_trtorch_8h_1a726f6e7091b6b7be45b5a4275b2ffb10.rst","_cpp_api/function_trtorch_8h_1ab01696cfe08b6a5293c55935a9713c25.rst","_cpp_api/function_trtorch_8h_1ae38897d1ca4438227c970029d0f76fb5.rst","_cpp_api/namespace_trtorch.rst","_cpp_api/namespace_trtorch__logging.rst","_cpp_api/namespace_trtorch__ptq.rst","_cpp_api/program_listing_file_cpp_api_include_trtorch_logging.h.rst","_cpp_api/program_listing_file_cpp_api_include_trtorch_macros.h.rst","_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.rst","_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.rst","_cpp_api/structtrtorch_1_1ExtraInfo.rst","_cpp_api/structtrtorch_1_1ExtraInfo_1_1InputRange.rst","_cpp_api/trtorch_cpp.rst","_cpp_api/unabridged_api.rst","_cpp_api/unabridged_orphan.rst","contributors/conversion.rst","contributors/execution.rst","contributors/lowering.rst","contributors/phases.rst","contributors/system_overview.rst","contributors/useful_links.rst","contributors/writing_converters.rst","index.rst","py_api/logging.rst","py_api/trtorch.rst","tutorials/getting_started.rst","tutorials/installation.rst","tutorials/ptq.rst"],objects:{"":{"logging::trtorch::Level":[17,1,1,"_CPPv4N7logging7trtorch5LevelE"],"logging::trtorch::Level::kDEBUG":[17,2,1,"_CPPv4N7logging7trtorch5Level6kDEBUGE"],"logging::trtorch::Level::kERROR":[17,2,1,"_CPPv4N7logging7trtorch5Level6kERRORE"],"logging::trtorch::Level::kGRAPH":[17,2,1,"_CPPv4N7logging7trtorch5Level6kGRAPHE"],"logging::trtorch::Level::kINFO":[17,2,1,"_CPPv4N7logging7trtorch5Level5kINFOE"],"logging::trtorch::Level::kINTERNAL_ERROR":[17,2,1,"_CPPv4N7logging7trtorch5Level15kINTERNAL_ERRORE"],"logging::trtorch::Level::kWARNING":[17,2,1,"_CPPv4N7logging7trtorch5Level8kWARNINGE"],"logging::trtorch::get_is_colored_output_on":[25,3,1,"_CPPv4N7logging7trtorch24get_is_colored_output_onEv"],"logging::trtorch::get_logging_prefix":[29,3,1,"_CPPv4N7logging7trtorch18get_logging_prefixEv"],"logging::trtorch::get_reportable_log_level":[23,3,1,"_CPPv4N7logging7trtorch24get_reportable_log_levelEv"],"logging::trtorch::log":[27,3,1,"_CPPv4N7logging7trtorch3logE5LevelNSt6stringE"],"logging::trtorch::log::lvl":[27,4,1,"_CPPv4N7logging7trtorch3logE5LevelNSt6stringE"],"logging::trtorch::log::msg":[27,4,1,"_CPPv4N7logging7trtorch3logE5LevelNSt6stringE"],"logging::trtorch::set_is_colored_output_on":[26,3,1,"_CPPv4N7logging7trtorch24set_is_colored_output_onEb"],"logging::trtorch::set_is_colored_output_on::colored_output_on":[26,4,1,"_CPPv4N7logging7trtorch24set_is_colored_output_onEb"],"logging::trtorch::set_logging_prefix":[24,3,1,"_CPPv4N7logging7trtorch18set_logging_prefixENSt6stringE"],"logging::trtorch::set_logging_prefix::prefix":[24,4,1,"_CPPv4N7logging7trtorch18set_logging_prefixENSt6stringE"],"logging::trtorch::set_reportable_log_level":[28,3,1,"_CPPv4N7logging7trtorch24set_reportable_log_levelE5Level"],"logging::trtorch::set_reportable_log_level::lvl":[28,4,1,"_CPPv4N7logging7trtorch24set_reportable_log_levelE5Level"],"ptq::trtorch::make_int8_cache_calibrator":[33,3,1,"_CPPv4I0EN3ptq7trtorch26make_int8_cache_calibratorE19Int8CacheCalibratorI9AlgorithmERKNSt6stringE"],"ptq::trtorch::make_int8_cache_calibrator::Algorithm":[33,5,1,"_CPPv4I0EN3ptq7trtorch26make_int8_cache_calibratorE19Int8CacheCalibratorI9AlgorithmERKNSt6stringE"],"ptq::trtorch::make_int8_cache_calibrator::cache_file_path":[33,4,1,"_CPPv4I0EN3ptq7trtorch26make_int8_cache_calibratorE19Int8CacheCalibratorI9AlgorithmERKNSt6stringE"],"ptq::trtorch::make_int8_calibrator":[31,3,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"ptq::trtorch::make_int8_calibrator::Algorithm":[31,5,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"ptq::trtorch::make_int8_calibrator::DataLoader":[31,5,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"ptq::trtorch::make_int8_calibrator::cache_file_path":[31,4,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"ptq::trtorch::make_int8_calibrator::dataloader":[31,4,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"ptq::trtorch::make_int8_calibrator::use_cache":[31,4,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"trtorch::CheckMethodOperatorSupport":[35,3,1,"_CPPv4N7trtorch26CheckMethodOperatorSupportERKN5torch3jit6ModuleENSt6stringE"],"trtorch::CheckMethodOperatorSupport::method_name":[35,4,1,"_CPPv4N7trtorch26CheckMethodOperatorSupportERKN5torch3jit6ModuleENSt6stringE"],"trtorch::CheckMethodOperatorSupport::module":[35,4,1,"_CPPv4N7trtorch26CheckMethodOperatorSupportERKN5torch3jit6ModuleENSt6stringE"],"trtorch::CompileGraph":[36,3,1,"_CPPv4N7trtorch12CompileGraphERKN5torch3jit6ModuleE9ExtraInfo"],"trtorch::CompileGraph::info":[36,4,1,"_CPPv4N7trtorch12CompileGraphERKN5torch3jit6ModuleE9ExtraInfo"],"trtorch::CompileGraph::module":[36,4,1,"_CPPv4N7trtorch12CompileGraphERKN5torch3jit6ModuleE9ExtraInfo"],"trtorch::ConvertGraphToTRTEngine":[32,3,1,"_CPPv4N7trtorch23ConvertGraphToTRTEngineERKN5torch3jit6ModuleENSt6stringE9ExtraInfo"],"trtorch::ConvertGraphToTRTEngine::info":[32,4,1,"_CPPv4N7trtorch23ConvertGraphToTRTEngineERKN5torch3jit6ModuleENSt6stringE9ExtraInfo"],"trtorch::ConvertGraphToTRTEngine::method_name":[32,4,1,"_CPPv4N7trtorch23ConvertGraphToTRTEngineERKN5torch3jit6ModuleENSt6stringE9ExtraInfo"],"trtorch::ConvertGraphToTRTEngine::module":[32,4,1,"_CPPv4N7trtorch23ConvertGraphToTRTEngineERKN5torch3jit6ModuleENSt6stringE9ExtraInfo"],"trtorch::ExtraInfo":[44,6,1,"_CPPv4N7trtorch9ExtraInfoE"],"trtorch::ExtraInfo::DataType":[44,6,1,"_CPPv4N7trtorch9ExtraInfo8DataTypeE"],"trtorch::ExtraInfo::DataType::DataType":[44,3,1,"_CPPv4N7trtorch9ExtraInfo8DataType8DataTypeEv"],"trtorch::ExtraInfo::DataType::DataType::t":[44,4,1,"_CPPv4N7trtorch9ExtraInfo8DataType8DataTypeEN3c1010ScalarTypeE"],"trtorch::ExtraInfo::DataType::Value":[44,1,1,"_CPPv4N7trtorch9ExtraInfo8DataType5ValueE"],"trtorch::ExtraInfo::DataType::Value::kChar":[44,2,1,"_CPPv4N7trtorch9ExtraInfo8DataType5Value5kCharE"],"trtorch::ExtraInfo::DataType::Value::kFloat":[44,2,1,"_CPPv4N7trtorch9ExtraInfo8DataType5Value6kFloatE"],"trtorch::ExtraInfo::DataType::Value::kHalf":[44,2,1,"_CPPv4N7trtorch9ExtraInfo8DataType5Value5kHalfE"],"trtorch::ExtraInfo::DataType::operator Value":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo8DataTypecv5ValueEv"],"trtorch::ExtraInfo::DataType::operator bool":[44,3,1,"_CPPv4N7trtorch9ExtraInfo8DataTypecvbEv"],"trtorch::ExtraInfo::DataType::operator!=":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo8DataTypeneE8DataType"],"trtorch::ExtraInfo::DataType::operator!=::other":[44,4,1,"_CPPv4NK7trtorch9ExtraInfo8DataTypeneE8DataType"],"trtorch::ExtraInfo::DataType::operator==":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo8DataTypeeqE8DataType"],"trtorch::ExtraInfo::DataType::operator==::other":[44,4,1,"_CPPv4NK7trtorch9ExtraInfo8DataTypeeqE8DataType"],"trtorch::ExtraInfo::DeviceType":[44,6,1,"_CPPv4N7trtorch9ExtraInfo10DeviceTypeE"],"trtorch::ExtraInfo::DeviceType::DeviceType":[44,3,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType10DeviceTypeEv"],"trtorch::ExtraInfo::DeviceType::DeviceType::t":[44,4,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType10DeviceTypeEN3c1010DeviceTypeE"],"trtorch::ExtraInfo::DeviceType::Value":[44,1,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType5ValueE"],"trtorch::ExtraInfo::DeviceType::Value::kDLA":[44,2,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType5Value4kDLAE"],"trtorch::ExtraInfo::DeviceType::Value::kGPU":[44,2,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType5Value4kGPUE"],"trtorch::ExtraInfo::DeviceType::operator Value":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo10DeviceTypecv5ValueEv"],"trtorch::ExtraInfo::DeviceType::operator bool":[44,3,1,"_CPPv4N7trtorch9ExtraInfo10DeviceTypecvbEv"],"trtorch::ExtraInfo::DeviceType::operator!=":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo10DeviceTypeneE10DeviceType"],"trtorch::ExtraInfo::DeviceType::operator!=::other":[44,4,1,"_CPPv4NK7trtorch9ExtraInfo10DeviceTypeneE10DeviceType"],"trtorch::ExtraInfo::DeviceType::operator==":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo10DeviceTypeeqE10DeviceType"],"trtorch::ExtraInfo::DeviceType::operator==::other":[44,4,1,"_CPPv4NK7trtorch9ExtraInfo10DeviceTypeeqE10DeviceType"],"trtorch::ExtraInfo::EngineCapability":[44,1,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapabilityE"],"trtorch::ExtraInfo::EngineCapability::kDEFAULT":[44,2,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapability8kDEFAULTE"],"trtorch::ExtraInfo::EngineCapability::kSAFE_DLA":[44,2,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapability9kSAFE_DLAE"],"trtorch::ExtraInfo::EngineCapability::kSAFE_GPU":[44,2,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapability9kSAFE_GPUE"],"trtorch::ExtraInfo::ExtraInfo":[44,3,1,"_CPPv4N7trtorch9ExtraInfo9ExtraInfoENSt6vectorINSt6vectorI7int64_tEEEE"],"trtorch::ExtraInfo::ExtraInfo::fixed_sizes":[44,4,1,"_CPPv4N7trtorch9ExtraInfo9ExtraInfoENSt6vectorINSt6vectorI7int64_tEEEE"],"trtorch::ExtraInfo::ExtraInfo::input_ranges":[44,4,1,"_CPPv4N7trtorch9ExtraInfo9ExtraInfoENSt6vectorI10InputRangeEE"],"trtorch::ExtraInfo::InputRange":[45,6,1,"_CPPv4N7trtorch9ExtraInfo10InputRangeE"],"trtorch::ExtraInfo::InputRange::InputRange":[45,3,1,"_CPPv4N7trtorch9ExtraInfo10InputRange10InputRangeENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE"],"trtorch::ExtraInfo::InputRange::InputRange::max":[45,4,1,"_CPPv4N7trtorch9ExtraInfo10InputRange10InputRangeENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE"],"trtorch::ExtraInfo::InputRange::InputRange::min":[45,4,1,"_CPPv4N7trtorch9ExtraInfo10InputRange10InputRangeENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE"],"trtorch::ExtraInfo::InputRange::InputRange::opt":[45,4,1,"_CPPv4N7trtorch9ExtraInfo10InputRange10InputRangeENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE"],"trtorch::ExtraInfo::InputRange::max":[45,7,1,"_CPPv4N7trtorch9ExtraInfo10InputRange3maxE"],"trtorch::ExtraInfo::InputRange::min":[45,7,1,"_CPPv4N7trtorch9ExtraInfo10InputRange3minE"],"trtorch::ExtraInfo::InputRange::opt":[45,7,1,"_CPPv4N7trtorch9ExtraInfo10InputRange3optE"],"trtorch::ExtraInfo::allow_gpu_fallback":[44,7,1,"_CPPv4N7trtorch9ExtraInfo18allow_gpu_fallbackE"],"trtorch::ExtraInfo::capability":[44,7,1,"_CPPv4N7trtorch9ExtraInfo10capabilityE"],"trtorch::ExtraInfo::debug":[44,7,1,"_CPPv4N7trtorch9ExtraInfo5debugE"],"trtorch::ExtraInfo::device":[44,7,1,"_CPPv4N7trtorch9ExtraInfo6deviceE"],"trtorch::ExtraInfo::input_ranges":[44,7,1,"_CPPv4N7trtorch9ExtraInfo12input_rangesE"],"trtorch::ExtraInfo::max_batch_size":[44,7,1,"_CPPv4N7trtorch9ExtraInfo14max_batch_sizeE"],"trtorch::ExtraInfo::num_avg_timing_iters":[44,7,1,"_CPPv4N7trtorch9ExtraInfo20num_avg_timing_itersE"],"trtorch::ExtraInfo::num_min_timing_iters":[44,7,1,"_CPPv4N7trtorch9ExtraInfo20num_min_timing_itersE"],"trtorch::ExtraInfo::op_precision":[44,7,1,"_CPPv4N7trtorch9ExtraInfo12op_precisionE"],"trtorch::ExtraInfo::ptq_calibrator":[44,7,1,"_CPPv4N7trtorch9ExtraInfo14ptq_calibratorE"],"trtorch::ExtraInfo::refit":[44,7,1,"_CPPv4N7trtorch9ExtraInfo5refitE"],"trtorch::ExtraInfo::strict_types":[44,7,1,"_CPPv4N7trtorch9ExtraInfo12strict_typesE"],"trtorch::ExtraInfo::workspace_size":[44,7,1,"_CPPv4N7trtorch9ExtraInfo14workspace_sizeE"],"trtorch::dump_build_info":[34,3,1,"_CPPv4N7trtorch15dump_build_infoEv"],"trtorch::get_build_info":[30,3,1,"_CPPv4N7trtorch14get_build_infoEv"],"trtorch::ptq::Int8CacheCalibrator":[3,6,1,"_CPPv4I0EN7trtorch3ptq19Int8CacheCalibratorE"],"trtorch::ptq::Int8CacheCalibrator::Algorithm":[3,5,1,"_CPPv4I0EN7trtorch3ptq19Int8CacheCalibratorE"],"trtorch::ptq::Int8CacheCalibrator::Int8CacheCalibrator":[3,3,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator19Int8CacheCalibratorERKNSt6stringE"],"trtorch::ptq::Int8CacheCalibrator::Int8CacheCalibrator::cache_file_path":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator19Int8CacheCalibratorERKNSt6stringE"],"trtorch::ptq::Int8CacheCalibrator::getBatch":[3,3,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8CacheCalibrator::getBatch::bindings":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8CacheCalibrator::getBatch::names":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8CacheCalibrator::getBatch::nbBindings":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8CacheCalibrator::getBatchSize":[3,3,1,"_CPPv4NK7trtorch3ptq19Int8CacheCalibrator12getBatchSizeEv"],"trtorch::ptq::Int8CacheCalibrator::operator nvinfer1::IInt8Calibrator*":[3,3,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibratorcvPN8nvinfer115IInt8CalibratorEEv"],"trtorch::ptq::Int8CacheCalibrator::readCalibrationCache":[3,3,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator20readCalibrationCacheER6size_t"],"trtorch::ptq::Int8CacheCalibrator::readCalibrationCache::length":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator20readCalibrationCacheER6size_t"],"trtorch::ptq::Int8CacheCalibrator::writeCalibrationCache":[3,3,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator21writeCalibrationCacheEPKv6size_t"],"trtorch::ptq::Int8CacheCalibrator::writeCalibrationCache::cache":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator21writeCalibrationCacheEPKv6size_t"],"trtorch::ptq::Int8CacheCalibrator::writeCalibrationCache::length":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator21writeCalibrationCacheEPKv6size_t"],"trtorch::ptq::Int8Calibrator":[4,6,1,"_CPPv4I00EN7trtorch3ptq14Int8CalibratorE"],"trtorch::ptq::Int8Calibrator::Algorithm":[4,5,1,"_CPPv4I00EN7trtorch3ptq14Int8CalibratorE"],"trtorch::ptq::Int8Calibrator::DataLoaderUniquePtr":[4,5,1,"_CPPv4I00EN7trtorch3ptq14Int8CalibratorE"],"trtorch::ptq::Int8Calibrator::Int8Calibrator":[4,3,1,"_CPPv4N7trtorch3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb"],"trtorch::ptq::Int8Calibrator::Int8Calibrator::cache_file_path":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb"],"trtorch::ptq::Int8Calibrator::Int8Calibrator::dataloader":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb"],"trtorch::ptq::Int8Calibrator::Int8Calibrator::use_cache":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb"],"trtorch::ptq::Int8Calibrator::getBatch":[4,3,1,"_CPPv4N7trtorch3ptq14Int8Calibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8Calibrator::getBatch::bindings":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8Calibrator::getBatch::names":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8Calibrator::getBatch::nbBindings":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8Calibrator::getBatchSize":[4,3,1,"_CPPv4NK7trtorch3ptq14Int8Calibrator12getBatchSizeEv"],"trtorch::ptq::Int8Calibrator::operator nvinfer1::IInt8Calibrator*":[4,3,1,"_CPPv4N7trtorch3ptq14Int8CalibratorcvPN8nvinfer115IInt8CalibratorEEv"],"trtorch::ptq::Int8Calibrator::readCalibrationCache":[4,3,1,"_CPPv4N7trtorch3ptq14Int8Calibrator20readCalibrationCacheER6size_t"],"trtorch::ptq::Int8Calibrator::readCalibrationCache::length":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator20readCalibrationCacheER6size_t"],"trtorch::ptq::Int8Calibrator::writeCalibrationCache":[4,3,1,"_CPPv4N7trtorch3ptq14Int8Calibrator21writeCalibrationCacheEPKv6size_t"],"trtorch::ptq::Int8Calibrator::writeCalibrationCache::cache":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator21writeCalibrationCacheEPKv6size_t"],"trtorch::ptq::Int8Calibrator::writeCalibrationCache::length":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator21writeCalibrationCacheEPKv6size_t"],STR:[5,0,1,"c.STR"],TRTORCH_API:[6,0,1,"c.TRTORCH_API"],TRTORCH_HIDDEN:[7,0,1,"c.TRTORCH_HIDDEN"],TRTORCH_MAJOR_VERSION:[8,0,1,"c.TRTORCH_MAJOR_VERSION"],TRTORCH_MINOR_VERSION:[12,0,1,"c.TRTORCH_MINOR_VERSION"],TRTORCH_PATCH_VERSION:[9,0,1,"c.TRTORCH_PATCH_VERSION"],TRTORCH_VERSION:[10,0,1,"c.TRTORCH_VERSION"],XSTR:[11,0,1,"c.XSTR"],trtorch:[58,8,0,"-"]},"trtorch.logging":{Level:[57,9,1,""],get_is_colored_output_on:[57,10,1,""],get_logging_prefix:[57,10,1,""],get_reportable_log_level:[57,10,1,""],log:[57,10,1,""],set_is_colored_output_on:[57,10,1,""],set_logging_prefix:[57,10,1,""],set_reportable_log_level:[57,10,1,""]},"trtorch.logging.Level":{Debug:[57,11,1,""],Error:[57,11,1,""],Info:[57,11,1,""],InternalError:[57,11,1,""],Warning:[57,11,1,""]},trtorch:{DeviceType:[58,9,1,""],EngineCapability:[58,9,1,""],check_method_op_support:[58,10,1,""],compile:[58,10,1,""],convert_method_to_trt_engine:[58,10,1,""],dtype:[58,9,1,""],dump_build_info:[58,10,1,""],get_build_info:[58,10,1,""],logging:[57,8,0,"-"]}},objnames:{"0":["c","macro","C macro"],"1":["cpp","enum","C++ enum"],"10":["py","function","Python function"],"11":["py","attribute","Python attribute"],"2":["cpp","enumerator","C++ enumerator"],"3":["cpp","function","C++ function"],"4":["cpp","functionParam","functionParam"],"5":["cpp","templateParam","templateParam"],"6":["cpp","class","C++ class"],"7":["cpp","member","C++ member"],"8":["py","module","Python module"],"9":["py","class","Python class"]},objtypes:{"0":"c:macro","1":"cpp:enum","10":"py:function","11":"py:attribute","2":"cpp:enumerator","3":"cpp:function","4":"cpp:functionParam","5":"cpp:templateParam","6":"cpp:class","7":"cpp:member","8":"py:module","9":"py:class"},terms:{"abstract":[50,55],"byte":58,"case":[1,2,44,49,55,61],"catch":59,"char":[3,4,42,59],"class":[31,33,42,43,44,48,55,57,58,59,61],"const":[1,2,3,4,31,32,33,35,36,42,43,44,51,55,59,61],"default":[1,2,3,4,17,31,33,41,43,44,58,61],"enum":[1,2,40,43,44,48,57,61],"final":49,"float":[58,59],"function":[1,2,3,4,44,45,48,50,51,55,59],"import":59,"int":[3,4,42,50,59],"long":49,"new":[1,2,3,4,36,44,45,50,53,55,57,59],"public":[1,2,3,4,42,43,44,45,61],"return":[1,2,3,4,23,25,30,31,32,33,35,36,40,41,42,43,44,50,51,52,53,55,57,58,59,61],"static":[44,45,49,55,58,59],"super":[42,59],"throw":[51,59],"true":[1,2,4,42,43,44,51,55,58,59,61],"try":[42,53,59],"void":[3,4,24,26,27,28,34,40,42,43],"while":61,And:59,Are:40,But:59,For:[49,59],Its:55,Not:3,One:[58,59],PRs:59,Thats:59,The:[2,44,49,50,51,52,53,55,57,60,61],Then:[60,61],There:[4,49,55,59,61],These:[49,50],Use:[44,55,58,61],Useful:56,Using:4,Will:35,With:[59,61],___torch_mangle_10:[50,59],___torch_mangle_5:59,___torch_mangle_9:59,__attribute__:41,__gnuc__:41,__init__:59,__torch__:[50,59],__visibility__:41,_all_:51,_convolut:59,aarch64:[53,60],abl:[49,51,55,56,61],about:[49,50,55,58],abov:[28,59],accept:[44,45,50,55],access:[51,55,56,59],accord:55,accuraci:61,across:51,act:50,acthardtanh:55,activ:[59,61],activationtyp:55,actual:[50,51,55,57,59],add:[27,49,51,55,57,59],add_:[51,59],addactiv:55,added:[28,49],addenginetograph:[50,59],addit:[51,59],addlay:59,addshuffl:59,advanc:61,after:[49,56,59],again:55,ahead:59,aim:51,algorithm:[3,4,31,33,42,43,61],all:[17,43,50,51,58,59,61],alloc:55,allow:[44,45,49,51,58],allow_gpu_fallback:[43,44,58],alreadi:[49,51,59],also:[33,49,55,56,59,60,61],alwai:[3,4,26],analogu:55,ani:[49,55,58,59,60],annot:[55,59],anoth:59,aot:[56,59],api:[13,15,16,40,41,42,43,53,55,58,59,61],apidirectori:[22,46],appli:61,applic:[2,33,44,51,59],aquir:59,architectur:56,archiv:60,aren:59,arg:[49,59],argc:59,argument:[50,51,55,59],argv:59,around:[50,55,59],arrai:[3,4,49],arrayref:[43,44,45],arxiv:61,assembl:[49,59],assign:[3,4,50],associ:[49,55,59],associatevalueandivalu:55,associatevalueandtensor:[55,59],aten:[51,54,55,59],attribut:51,auto:[42,55,59,61],avail:55,averag:[44,58],back:[50,51,53,59],back_insert:42,background:59,base:[34,46,47,50,57,59,60,61],batch:[3,4,42,44,55,58,61],batch_norm:55,batch_siz:[42,61],batchnorm:51,batchtyp:42,bazel:[53,60],bdist_wheel:60,becaus:[55,59],becom:55,been:[49,55,59],befor:[51,53,55,56,59,60],beg:42,begin:[42,60],beginn:59,behavior:58,being:59,below:[55,59],benefit:[55,59],best:[44,45],better:[59,61],between:[55,61],bia:[51,59],bin:60,binari:[42,61],bind:[3,4,42],bit:[55,58,59],blob:54,block0:51,block1:51,block:49,bool:[1,2,3,4,25,26,31,35,40,42,43,44,51,55,57,58,59,61],both:59,briefli:59,bsd:43,buffer:[3,4],bug:60,build:[30,31,33,44,49,52,53,55,58,59,61],build_fil:60,builderconfig:43,built:60,c10:[1,2,43,44,45,59,61],c_api:54,c_str:[55,59],cach:[3,4,31,33,42,61],cache_:42,cache_fil:42,cache_file_path:[3,4,31,33,42,43],cache_file_path_:42,cache_size_:42,calcul:[49,59],calibr:[3,4,31,33,42,44,61],calibration_cache_fil:[31,33,61],calibration_dataload:[31,61],calibration_dataset:61,call:[31,33,36,44,50,51,55,58,59],callmethod:59,can:[1,2,4,31,32,33,44,45,49,50,51,52,53,55,58,59,60,61],cannot:[51,59],capabl:[43,44,58],cast:[3,4],caus:[55,60],cdll:59,cerr:59,chain:55,chanc:55,chang:[33,53,61],check:[1,2,35,44,51,55,58,59,60],check_method_op_support:58,checkmethodoperatorsupport:[21,37,43,46,47,59],choos:59,cifar10:61,cifar:61,classif:59,clear:42,close:59,closer:51,code:[53,56,59],collect:59,color:[25,26,57],colored_output_on:[26,40,57],com:[54,59,60,61],comment:60,common:[49,51],commun:59,comparis:[1,44],comparison:[2,44],compat:[1,2,44],compil:[32,35,36,44,50,51,55,58,61],compile_set:59,compilegraph:[21,37,43,46,47,59,61],complet:59,complex:59,compliat:61,compon:[52,53,59],compos:59,composit:59,comput:61,config:60,configur:[32,36,56,58,59,61],connect:51,consid:59,consolid:59,constant:[49,50,51,59],constexpr:[1,2,43,44],construct:[1,2,3,4,44,45,49,52,53,55,59],constructor:[1,44,59],consum:[4,49,59],contain:[31,35,49,51,55,58,59,60,61],content:61,context:[49,50,52,53],contigu:42,contributor:59,control:59,conv1:59,conv2:59,conv2d:59,conv:[55,59],convers:[50,51,58,59],conversionctx:[55,59],convert:[3,4,32,35,36,51,52,53,56,58],convert_method_to_trt_engin:58,convertgraphtotrtengin:[21,37,43,46,47,59],convien:44,convienc:[3,4],convolut:61,coordin:53,copi:[42,55],copyright:43,core:[51,53,59],corespond:50,corpor:43,correct:60,correspond:55,coupl:[49,53],cout:[42,59],cp35:60,cp35m:60,cp36:60,cp36m:60,cp37:60,cp37m:60,cp38:60,cpp:[14,15,16,40,41,42,43,48,51,53,59,61],cpp_frontend:61,cppdirectori:[22,46],cppdoc:59,creat:[31,33,49,55],csrc:[51,54],cstddef:61,ctx:[55,59],ctype:59,cuda:[44,58,59,60],cudafloattyp:59,current:[23,55],data:[1,3,4,31,33,42,44,49,52,53,55,61],data_dir:61,data_ptr:42,dataflow:[55,59],dataload:[4,31,33,42,43,44,61],dataloader_:42,dataloaderopt:61,dataloaderuniqueptr:[4,42],dataset:[33,61],datatyp:[2,21,37,43,44,46,47,58],datatypeclass:[0,46],dbg:60,dead_code_elimin:51,deal:55,debug:[17,26,43,44,55,57,58],debugg:58,dedic:51,deep:[55,56,61],deeplearn:54,def:59,defin:[1,2,3,4,17,23,24,25,26,27,28,29,30,31,32,33,34,35,36,41,44,45,48,59,61],definit:[48,55],delet:[1,2,43,44,51],demo:61,depend:[30,33,49,53,59],deploi:[56,59,61],deploy:[59,61],describ:[44,55,58,59],deseri:[58,59],destroi:55,destructor:55,detail:59,determin:51,develop:[56,59,60],devic:[2,43,44,58,59],devicetyp:[0,21,37,43,44,46,47,58],dict:58,dictionari:[58,59],differ:[33,56,59],dimens:51,directli:[55,61],directori:[18,19,20,21,22,43,46,60,61],disabl:57,disclos:60,distdir:60,distribut:[58,59,61],dla:[2,44,58],doc:[53,54,60],docsrc:53,document:[40,41,42,43,46,47,53,59,61],doe:[41,42,51,55,61],doesn:59,doesnt:42,doing:[49,51,59,61],domain:61,don:[55,61],done:[49,53],dont:40,down:60,download:60,doxygen_should_skip_thi:[42,43],driver:60,dtype:58,due:[3,4],dump:[34,60],dump_build_info:[21,37,43,46,47,58],dure:[55,61],dynam:[44,45,58],each:[3,4,44,49,50,51,55,59],easi:49,easier:[52,53,55,59,61],easili:[3,4],edu:61,effect:[51,59,61],effici:55,either:[44,45,55,58,59,60],element:50,element_typ:42,els:[41,58],emit:49,empti:59,emum:[17,44],enabl:[3,4,25,57,58],encount:60,end:[42,55,59],end_dim:59,endif:[41,42,43],endl:42,enforc:59,engin:[1,2,32,36,44,45,49,52,53,56,58,59,61],engine_converted_from_jit:59,enginecap:[43,44,58],ensur:[33,51],enter:49,entri:[44,55],entropi:[31,33,61],enumer:[1,2,17,44],equival:[36,52,53,55,58,59],equivil:32,error:[17,49,51,53,57,59,60],etc:58,eval:59,evalu:[50,52,53],evaluated_value_map:[49,55],even:59,everi:59,everyth:17,exampl:[42,50,55,59,61],exception_elimin:51,execpt:51,execut:[51,58,59,61],execute_engin:[50,59],exhaust:59,exist:[4,32,35,36,58,60,61],expect:[51,55,59],explic:42,explicit:[3,4,43,51,56,61],explicitli:61,explict:42,explictli:[1,44],extend:55,extent:[56,59],extra:44,extra_info:[58,61],extrainfo:[0,3,4,21,32,36,37,43,46,47,58,59,61],extrainfostruct:[0,46],factori:[4,31,33,61],fail:59,fallback:55,fals:[1,2,3,4,42,43,44,58,59],fashion:59,fc1:59,fc2:59,fc3:59,feat:59,featur:61,fed:[3,4,59],feed:[31,33,59],feel:56,field:[3,4,61],file:[1,2,3,4,5,6,7,8,9,10,11,12,17,23,24,25,26,27,28,29,30,31,32,33,34,35,36,44,45,53,58,59,60,61],find:[4,59],first:[42,49,51,59,61],fix:44,fixed_s:[43,44],flatten:59,flatten_convert:59,float16:58,float32:58,flow:[55,59],fly:59,follow:[59,61],form:49,forward:[31,33,36,50,55,58,59,61],found:[43,59,60,61],fp16:[1,44,56,58,59],fp32:[1,44,56,61],freed:55,freeze_modul:51,from:[1,2,3,4,31,33,42,44,45,49,50,51,52,53,55,59,61],full:[55,59,61],fulli:[35,51,58,59,61],fuse_flatten_linear:51,fuse_linear:51,fusion:55,gaurd:41,gcc:53,gear:61,gener:[3,4,33,50,51,53,55,59,61],get:[1,2,3,4,23,30,42,44,45,55,57,60,61],get_build_info:[21,37,43,46,47,58],get_is_colored_output_on:[18,38,40,46,47,57],get_logging_prefix:[18,38,40,46,47,57],get_reportable_log_level:[18,38,40,46,47,57],getattr:[51,59],getbatch:[3,4,42],getbatchs:[3,4,42],getdimens:[55,59],getoutput:[55,59],github:[54,59,60,61],given:[44,51,58,59],global:[27,59],gnu:60,goal:55,going:59,good:[42,55],got:59,gpu:[2,32,36,44,58,59],graph:[17,32,35,36,43,49,52,53,55,56,58,59],great:59,gtc:56,guard:51,guard_elimin:51,hack:42,half:[58,59],handl:51,happen:59,hardtanh:55,has:[49,51,53,55,59,61],hash:60,have:[33,42,49,55,56,59,60,61],haven:59,header:59,help:[26,49,55],helper:55,here:[42,49,50,59,60,61],hermet:60,hfile:[22,46],hidden:41,high:51,higher:[51,59],hinton:61,hold:[44,45,49,55,61],hood:53,how:[3,4,59],howev:33,html:[54,59,60,61],http:[54,59,60,61],http_archiv:60,idea:51,ifndef:[42,43],ifstream:42,iint8calibr:[3,4,31,33,42,43,44,61],iint8entropycalibrator2:[3,4,31,33,42,43,61],iint8minmaxcalibr:[31,33,61],ilay:55,imag:61,images_:61,implement:[3,4,50,59,61],implic:51,in_shap:59,in_tensor:59,includ:[14,16,17,30,34,40,41,42,43,48,58,59,60,61],includedirectori:[22,46],index:[54,56,61],inetworkdefinit:49,infer:[51,59,61],info:[17,32,36,43,44,55,57,59],inform:[28,30,34,49,56,58,59,61],infrastructur:61,ingest:53,inherit:[46,47,61],initi:42,inlin:[43,51,59],input0:59,input1:59,input2:59,input:[3,4,33,44,45,49,50,51,52,53,55,58,59,61],input_data:59,input_rang:[43,44],input_s:59,input_shap:[58,59,61],inputrang:[21,37,43,44,46,47,59],inputrangeclass:[0,46],inspect:[55,59],instal:[56,59],instanc:[51,59],instanti:[52,53,55,59],instatin:[1,2,44],instead:[49,51,59],instruct:59,insur:60,int16_t:43,int64_t:[43,44,45,61],int8:[1,42,44,56,58,61],int8_t:43,int8cachecalibr:[20,33,39,42,43,46,47],int8cachecalibratortempl:[0,46],int8calibr:[3,20,31,39,42,43,46,47],int8calibratorstruct:[0,46],integ:58,integr:56,interfac:[1,2,44,50,53,55,61],intermedi:[17,59],intern:[2,17,44,55,59],internal_error:57,internalerror:57,interpret:50,intro_to_torchscript_tutori:59,invok:59,ios:42,iostream:[20,42,59],is_train:61,iscustomclass:55,issu:[3,4,59,60],istensor:55,istream_iter:42,istreambuf_iter:42,it_:42,it_created_:42,itensor:[49,55,59],iter:[42,44,49,58],its:[33,49,50,55],itself:[1,2,44],ivalu:[49,50,55,59],jetson:58,jit:[32,35,36,43,49,50,51,52,53,54,55,58,59],just:[42,43,51,56,59],kchar:[1,43,44],kclip:55,kcpu:[2,44],kcuda:[2,42,44,59],kdebug:[17,40,42],kdefault:[43,44],kdla:[2,43,44],kei:[58,59],kernel:[44,55,58],kerror:[17,40],kfloat:[1,43,44],kgpu:[2,43,44],kgraph:[17,40,51],khalf:[1,43,44,59],ki8:61,kind:[49,58],kinfo:[17,40,42],kinternal_error:[17,40],know:[40,55],kriz:61,krizhevski:61,ksafe_dla:[43,44],ksafe_gpu:[43,44],ktest:61,ktrain:61,kwarn:[17,40],label:61,laid:59,lambda:[55,59],languag:59,larg:[52,53,59,61],larger:61,last:51,later:33,layer:[44,49,51,55,58,59,61],ld_library_path:60,ldd:60,learn:[56,61],leav:51,lenet:59,lenet_trt:[50,59],lenetclassifi:59,lenetfeatextractor:59,length:[3,4,42],let:[44,51,55],level:[18,23,27,28,38,40,42,46,47,51,53,57,59],levelnamespac:[0,46],leverag:61,lib:[51,59],librari:[30,43,50,52,53,55,59,60],libtorch:[4,34,55,59,60,61],libtrtorch:60,licens:43,like:[42,49,55,59,61],limit:[51,61],linear:59,link:[49,56],linux:[53,60],linux_x86_64:60,list:[18,19,20,21,35,48,49,55,58,59,60],listconstruct:49,live:55,load:[50,59,61],local:[51,59],locat:61,log:[16,17,19,21,22,37,42,43,46,47,48,51,55,58],log_debug:55,logger:57,loggingenum:[0,46],loglevel:57,look:[49,50,51,52,53,59,61],loop:51,loss:61,lot:55,lower:17,lower_graph:51,lower_tupl:51,loweralltupl:51,lowersimpletupl:51,lvl:[27,28,40],machin:[50,61],macro:[5,6,7,8,9,10,11,12,16,18,21,22,40,43,46,48],made:[51,52,53],mai:[49,53,59,61],main:[50,52,53,55,59],maintain:[50,55],major:53,make:[49,59,60,61],make_data_load:[4,61],make_int8_cache_calibr:[21,39,43,46,47,61],make_int8_calibr:[21,33,39,43,46,47,61],manag:[49,50,52,53,55,59],map:[2,44,49,51,52,53,55,59,61],master:[54,60,61],match:[44,51,60],matmul:[51,59],matrix:54,matur:53,max:[43,44,45,55,58,59],max_batch_s:[43,44,58],max_pool2d:59,max_val:55,maximum:[44,45,58,59],mean:[44,55,56,58],mechan:55,meet:58,member:[44,45,58],memori:[20,21,42,43,51,55,59],messag:[17,27,28,57],metadata:[50,55],method:[32,35,36,51,55,58,59,60],method_nam:[32,35,43,58,59],min:[43,44,45,55,58,59],min_val:55,minim:[44,58,61],minimum:[44,45,57,59],minmax:[31,33,61],miss:59,mod:[59,61],mode:61,mode_:61,model:[59,61],modul:[32,35,36,43,50,52,53,55,56,58,61],modular:59,more:[49,56,59,61],most:53,move:[31,43,59,61],msg:[27,40,57],much:[55,61],multipl:61,must:[44,55,58,59,60],name:[3,4,32,35,42,55,58,59,60],namespac:[0,40,42,43,48,56,61],nativ:[53,54,59],native_funct:54,nbbind:[3,4,42],necessari:40,need:[1,2,28,33,41,44,49,51,55,59,60,61],nest:[46,47],net:[55,59],network:[31,33,55,59,61],new_lay:55,new_local_repositori:60,new_siz:61,next:[3,4,49,50,61],nice:60,ninja:60,nlp:[31,33,61],node:[51,55,59],node_info:[55,59],noexcept:61,none:55,norm:55,normal:[1,2,44,59,61],noskipw:42,note:[2,44,55],now:[53,55,59],nullptr:[42,43,44],num_avg_timing_it:[43,44,58],num_min_timing_it:[43,44,58],number:[3,4,44,51,55,58,59],nvidia:[32,36,43,54,58,59,60,61],nvinfer1:[3,4,31,33,42,43,44,55,61],object:[1,2,3,4,44,45,55,59],obvious:59,off:50,ofstream:[42,59],older:53,onc:[40,41,42,43,49,50,59,61],one:[51,55,57,59],ones:[40,59,60],onli:[2,3,4,17,33,42,44,53,55,57,58,59,61],onnx:51,onto:50,op_precis:[43,44,58,59,61],open:42,oper:[1,2,3,4,35,42,43,44,49,50,51,52,53,55,56,58,61],ops:[51,59],opset:[52,53],opt:[43,44,45,58,59,60],optim:[44,45,56,59],optimi:59,optimin:[44,45],option:[42,58,60,61],order:[44,55,59],org:[54,59,60,61],other:[1,2,43,44,49,56,58,59],our:[53,59],out:[35,42,49,51,52,53,55,57,58,59,60],out_shap:59,out_tensor:[55,59],output:[25,26,44,49,50,51,55,57,59],outself:59,over:[52,53],overrid:[3,4,31,33,42,61],overview:[54,56],own:[55,59],packag:[51,59],page:56,pair:[55,61],paramet:[1,2,3,4,26,27,28,31,32,33,35,36,44,45,49,51,55,57,58,59],parent:[14,15,16,18,19,20,21],pars:59,part:53,pass:[49,50,52,53,55,59,61],path:[4,13,14,15,16,31,33,59,60,61],pathwai:59,pattern:[55,59],perform:[31,33],performac:[44,45,61],phase:[17,55,59],pick:59,pip3:60,piplein:59,place:[51,60,61],plan:53,pleas:60,point:[58,59],pointer:[3,4,61],pop:50,post:[31,33,44,56],power:59,pragma:[40,41,42,43,61],pre_cxx11_abi:60,precis:[44,56,58,59,61],precompil:60,prefix:[24,26,40,57],preprint:61,preprocess:61,preserv:[59,61],prespect:59,pretti:59,previous:33,prim:[49,50,51,59],primarili:[53,59],print:[17,35,42,57,58],priorit:60,privat:[3,4,42,43,61],process:59,produc:[44,45,49,50,55,59],profil:[44,45],program:[18,19,20,21,33,48,52,53,56,59],propog:51,provid:[3,4,44,55,59,60,61],ptq:[3,4,16,21,22,37,43,46,47,48,56],ptq_calibr:[3,4,43,44,61],ptqtemplat:[0,46],pull:60,pure:35,purpos:60,push:50,python3:[51,59,60],python:53,python_api:54,pytorch:[50,52,53,55,58,59,60,61],quantiz:[31,33,56],quantizatiom:44,question:59,quickli:[59,61],quit:[55,59],rais:51,raiseexcept:51,rand:59,randn:59,rang:[44,45,58,59],rather:51,read:[3,4,31,33,42,61],readcalibrationcach:[3,4,42],realiz:50,realli:55,reason:[1,44,59],recalibr:33,recognit:61,recomend:[31,33],recommend:[31,33,59,60],record:[49,59],recurs:49,reduc:[51,52,53,61],refer:[50,52,53],referenc:60,refit:[43,44,58],reflect:43,regard:60,regist:[50,55],registernodeconversionpattern:[55,59],registri:[49,59],reinterpret_cast:42,relationship:[46,47],releas:60,relu:59,remain:[51,61],remove_contigu:51,remove_dropout:51,replac:51,report:[23,42],reportable_log_level:57,repositori:53,repres:[44,45,55,57],represent:[51,55,59],request:59,requir:[33,49,57,58,59,61],reserv:[42,43],resolv:[49,51,52,53],resourc:[49,61],respons:[33,50],restrict:[44,58],result:[49,51,52,53,58,59],reus:[51,61],right:[43,53,55],root:[43,61],run:[2,32,44,49,50,52,53,55,56,58,59,60,61],runtim:[50,56,59],safe:[55,58],safe_dla:58,safe_gpu:58,safeti:[44,58],same:[50,59],sampl:61,save:[33,42,58,59],saw:59,scalar:55,scalartyp:[1,43,44],scale:61,schema:[55,59],scope:51,scratch:33,script:[35,51,58,59],script_model:59,scriptmodul:[58,59],sdk:54,seamlessli:56,search:56,section:61,see:[35,50,51,58,59],seekg:42,seem:42,select:[31,32,33,44,58,61],self:[50,51,55,59],sens:59,serial:[32,50,52,53,58,59],set:[3,4,17,26,28,32,33,36,44,45,49,51,52,53,56,57,58,59,61],set_is_colored_output_on:[18,38,40,46,47,57],set_logging_prefix:[18,38,40,46,47,57],set_reportable_log_level:[18,38,40,46,47,57],setalpha:55,setbeta:55,setnam:[55,59],setreshapedimens:59,setup:[60,61],sever:[17,27,57],sha256:60,shape:[44,45,55,58],ship:59,should:[1,3,4,33,43,44,49,55,56,57,58,61],shown:59,shuffl:59,side:[51,59],signifi:[44,45],significantli:51,similar:[55,59],simonyan:61,simpil:61,simpl:59,simplifi:49,sinc:[51,59,61],singl:[44,45,51,59,61],singular:55,site:[51,59],size:[3,4,42,44,45,51,58,59,61],size_t:[3,4,42,61],skipw:42,softmax:51,sole:61,some:[49,50,51,52,53,55,59,61],someth:[41,51],sort:55,sourc:[43,53,58],space:61,specif:[36,51,52,53,58],specifi:[3,4,55,56,57,58,59],specifii:58,src:54,sstream:[20,42],stabl:54,stack:[50,61],stage:49,stand:50,standard:56,start:[49,60],start_dim:59,state:[49,55,59],statement:51,static_cast:42,std:[3,4,24,27,29,30,31,32,33,35,40,42,43,44,45,59,61],stdout:[34,57,58],steamlin:61,step:[56,61],still:[42,61],stitch:59,stop:59,storag:61,store:[4,49,55,59],str:[19,41,42,46,47,57,58],straight:55,strict_typ:[43,44,58],strictli:58,string:[3,4,18,20,21,24,27,29,30,31,32,33,35,40,42,43,55,58,59,61],stringstream:42,strip_prefix:60,struct:[1,2,21,37,43,61],structur:[33,44,53,55,59],style:43,sub:59,subdirectori:48,subgraph:[49,51,55,59],subject:53,submodul:59,subset:61,suit:56,support:[1,2,26,35,44,45,54,58,59],sure:60,system:[49,55,56,60],take:[32,35,36,49,50,52,53,55,58,59,61],talk:56,tar:[60,61],tarbal:[59,61],target:[2,44,53,56,58,59,61],targets_:61,task:[31,33,61],techinqu:59,techniqu:61,tell:[55,59],tellg:42,templat:[20,21,39,42,43,46,47,59],tensor:[44,45,49,50,55,59,61],tensorcontain:55,tensorlist:55,tensorrt:[1,2,3,4,31,32,33,34,36,43,44,45,49,51,52,53,55,56,58,59,61],term:61,termin:[26,59],test:53,text:57,than:[51,56],thats:[49,61],thei:[44,49,51,55,60],them:[50,59],theori:49,therebi:50,therefor:[33,59],thi:[1,2,31,33,40,41,42,43,44,45,49,50,51,52,53,55,59,60,61],think:55,third_parti:[53,60],those:49,though:[53,55,59],three:[44,45,52,53],thrid_parti:60,through:[49,50,56,59],time:[44,49,51,52,53,55,58,59,61],tini:61,tmp:59,tocustomclass:55,todim:59,togeth:[49,55,59],too:60,tool:[55,59],top:53,torch:[1,2,4,31,32,33,35,36,42,43,44,50,51,54,55,58,59,61],torch_scirpt_modul:59,torch_script_modul:59,torchscript:[32,35,36,52,53,58],toronto:61,tovec:59,toward:61,trace:[58,59],traced_model:59,track:[55,61],tradit:61,traget:36,train:[31,33,44,56,59],trainabl:51,transform:[59,61],translat:59,travers:[52,53],tree:[43,61],trigger:59,trim:61,trt:[1,2,3,4,44,49,50,51,55,59],trt_mod:[59,61],trt_ts_modul:59,trtorch:[0,1,2,3,4,15,17,22,40,41,42,44,45,47,48,49,50,51,52,53,60,61],trtorch_api:[19,23,24,25,26,27,28,29,30,31,32,33,34,35,36,40,41,43,46,47],trtorch_check:55,trtorch_hidden:[19,41,46,47],trtorch_major_vers:[19,41,46,47],trtorch_minor_vers:[19,41,46,47],trtorch_patch_vers:[19,41,46,47],trtorch_unus:55,trtorch_vers:[19,41,46,47],trtorchfil:[22,46],trtorchnamespac:[0,46],tupl:[58,59],tupleconstruct:51,tupleunpack:51,tutori:[59,61],two:[55,59,60,61],type:[1,2,31,45,46,47,49,50,55,57,58,59,61],typenam:[3,4,31,33,42,43],typic:[49,55],uint64_t:[43,44],unabl:[55,59],uncom:60,under:[43,53],underli:[1,2,44,55],union:[55,59],uniqu:4,unique_ptr:[4,31],unlik:56,unpack_addmm:51,unpack_log_softmax:51,unqiue_ptr:4,unsetf:42,unstabl:53,unsupport:[35,58],unsur:55,untest:53,until:[49,53,55],unwrap:55,unwraptodoubl:55,unwraptoint:59,upstream:59,url:60,use:[1,2,3,4,31,33,44,49,50,53,55,57,58,59,60,61],use_cach:[3,4,31,42,43],use_cache_:42,use_subset:61,used:[1,2,3,4,44,45,49,50,51,55,57,58,59,61],useful:55,user:[40,52,53,59,60,61],uses:[31,33,42,55,61],using:[1,2,32,36,42,44,55,56,58,59,61],using_int:59,usr:60,util:[55,59],valid:[2,44,55],valu:[1,2,17,43,44,49,50,55,59],value_tensor_map:[49,55],vector:[20,21,42,43,44,45,59,61],veri:61,version:[30,34,53,60],vgg16:61,via:[56,58],virtual:61,wai:59,want:[40,44,59],warn:[17,42,55,57],websit:60,weight:[49,59],welcom:59,well:[59,61],were:59,what:[4,51,59],whatev:50,when:[26,42,44,49,50,51,52,53,55,57,58,59,60,61],where:[49,51,55,59,61],whether:[4,61],which:[2,32,33,36,44,49,50,51,52,53,55,58,59,61],whl:60,whose:51,within:[50,52,53],without:[55,59,61],work:[42,51,53,55,61],worker:61,workspac:[44,58,60,61],workspace_s:[43,44,58,61],would:[55,59,60],wrap:[52,53,59],wrapper:55,write:[3,4,31,33,42,49,56,59,61],writecalibrationcach:[3,4,42],www:[59,60,61],x86_64:[53,60],xstr:[19,41,46,47],yaml:54,you:[1,2,31,33,44,49,50,51,53,55,56,58,59,60,61],your:[55,56,59,60],yourself:59,zisserman:61},titles:["Class Hierarchy","Class ExtraInfo::DataType","Class ExtraInfo::DeviceType","Template Class Int8CacheCalibrator","Template Class Int8Calibrator","Define STR","Define TRTORCH_API","Define TRTORCH_HIDDEN","Define TRTORCH_MAJOR_VERSION","Define TRTORCH_PATCH_VERSION","Define TRTORCH_VERSION","Define XSTR","Define TRTORCH_MINOR_VERSION","Directory cpp","Directory api","Directory include","Directory trtorch","Enum Level","File logging.h","File macros.h","File ptq.h","File trtorch.h","File Hierarchy","Function trtorch::logging::get_reportable_log_level","Function trtorch::logging::set_logging_prefix","Function trtorch::logging::get_is_colored_output_on","Function trtorch::logging::set_is_colored_output_on","Function trtorch::logging::log","Function trtorch::logging::set_reportable_log_level","Function trtorch::logging::get_logging_prefix","Function trtorch::get_build_info","Template Function trtorch::ptq::make_int8_calibrator","Function trtorch::ConvertGraphToTRTEngine","Template Function trtorch::ptq::make_int8_cache_calibrator","Function trtorch::dump_build_info","Function trtorch::CheckMethodOperatorSupport","Function trtorch::CompileGraph","Namespace trtorch","Namespace trtorch::logging","Namespace trtorch::ptq","Program Listing for File logging.h","Program Listing for File macros.h","Program Listing for File ptq.h","Program Listing for File trtorch.h","Struct ExtraInfo","Struct ExtraInfo::InputRange","TRTorch C++ API","Full API","Full API","Conversion Phase","Execution Phase","Lowering Phase","Compiler Phases","System Overview","Useful Links for TRTorch Development","Writing Converters","TRTorch","trtorch.logging","trtorch","Getting Started","Installation","Post Training Quantization (PTQ)"],titleterms:{"class":[0,1,2,3,4,20,21,37,39,46,47],"enum":[17,18,38,46,47,58],"function":[18,21,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,46,47,54,58],The:59,Used:51,Useful:54,addmm:51,advic:55,ahead:56,api:[14,18,19,20,21,46,47,48,54,56],applic:61,arg:55,avail:54,background:[50,55],base:[3,4],binari:60,build:60,checkmethodoperatorsupport:35,citat:61,code:51,compil:[52,53,56,59,60],compilegraph:36,construct:50,content:[18,19,20,21,37,38,39],context:55,contigu:51,contract:55,contributor:56,convers:[49,52,53,55],convert:[49,55,59],convertgraphtotrtengin:32,cpp:[13,18,19,20,21],creat:[59,61],cudnn:60,custom:59,datatyp:1,dead:51,debug:60,defin:[5,6,7,8,9,10,11,12,19,46,47],definit:[18,19,20,21],depend:60,develop:54,devicetyp:2,dimens:54,directori:[13,14,15,16,48],distribut:60,documen:56,document:[1,2,3,4,5,6,7,8,9,10,11,12,17,23,24,25,26,27,28,29,30,31,32,33,34,35,36,44,45,54,56],dropout:51,dump_build_info:34,easier:54,elimin:51,engin:50,evalu:49,execept:51,execut:[50,52,53],executor:50,expect:54,extrainfo:[1,2,44,45],file:[16,18,19,20,21,22,40,41,42,43,46,48],flatten:51,freez:51,from:60,full:[46,47,48],fuse:51,gaurd:51,get:[56,59],get_build_info:30,get_is_colored_output_on:25,get_logging_prefix:29,get_reportable_log_level:23,gpu:56,graph:[50,51],guarante:55,hierarchi:[0,22,46],hood:59,how:61,includ:[15,18,19,20,21],indic:56,inherit:[3,4],inputrang:45,instal:60,int8cachecalibr:3,int8calibr:4,jit:56,layer:54,level:17,linear:51,link:54,list:[40,41,42,43],local:60,log:[18,23,24,25,26,27,28,29,38,40,57],logsoftmax:51,lower:[51,52,53],macro:[19,41],make_int8_cache_calibr:33,make_int8_calibr:31,modul:[51,59],namespac:[18,20,21,37,38,39,46,47],native_op:54,nest:[1,2,44,45],node:49,nvidia:56,oper:59,other:55,overview:53,own:61,packag:60,pass:51,pattern:51,phase:[49,50,51,52,53],post:61,program:[40,41,42,43],ptq:[20,31,33,39,42,61],python:[54,56,59,60],pytorch:[54,56],quantiz:61,read:54,redund:51,regist:59,relationship:[1,2,3,4,44,45],remov:51,respons:55,result:50,set_is_colored_output_on:26,set_logging_prefix:24,set_reportable_log_level:28,sometim:54,sourc:60,start:[56,59],str:5,struct:[44,45,46,47],subdirectori:[13,14,15],submodul:58,system:53,tarbal:60,templat:[3,4,31,33],tensorrt:[50,54,60],time:56,torchscript:[56,59],train:61,trtorch:[16,18,19,20,21,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,43,46,54,56,57,58,59],trtorch_api:6,trtorch_hidden:7,trtorch_major_vers:8,trtorch_minor_vers:12,trtorch_patch_vers:9,trtorch_vers:10,tupl:51,type:[3,4,44],under:59,unpack:51,unsupport:59,using:60,weight:55,what:55,work:59,write:55,xstr:11,your:61}}) \ No newline at end of file +Search.setIndex({docnames:["_cpp_api/class_view_hierarchy","_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType","_cpp_api/classtrtorch_1_1ExtraInfo_1_1DeviceType","_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator","_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator","_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502","_cpp_api/define_macros_8h_1a20c1fbeb21757871c52299dc52351b5f","_cpp_api/define_macros_8h_1a25ee153c325dfc7466a33cbd5c1ff055","_cpp_api/define_macros_8h_1a48d6029a45583a06848891cb0e86f7ba","_cpp_api/define_macros_8h_1a71b02dddfabe869498ad5a88e11c440f","_cpp_api/define_macros_8h_1a9d31d0569348d109b1b069b972dd143e","_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da","_cpp_api/define_macros_8h_1ae1c56ab8a40af292a9a4964651524d84","_cpp_api/dir_cpp","_cpp_api/dir_cpp_api","_cpp_api/dir_cpp_api_include","_cpp_api/dir_cpp_api_include_trtorch","_cpp_api/enum_logging_8h_1a5f612ff2f783ff4fbe89d168f0d817d4","_cpp_api/file_cpp_api_include_trtorch_logging.h","_cpp_api/file_cpp_api_include_trtorch_macros.h","_cpp_api/file_cpp_api_include_trtorch_ptq.h","_cpp_api/file_cpp_api_include_trtorch_trtorch.h","_cpp_api/file_view_hierarchy","_cpp_api/function_logging_8h_1a118d65b179defff7fff279eb9cd126cb","_cpp_api/function_logging_8h_1a396a688110397538f8b3fb7dfdaf38bb","_cpp_api/function_logging_8h_1a9b420280bfacc016d7e36a5704021949","_cpp_api/function_logging_8h_1aa533955a2b908db9e5df5acdfa24715f","_cpp_api/function_logging_8h_1abc57d473f3af292551dee8b9c78373ad","_cpp_api/function_logging_8h_1adf5435f0dbb09c0d931a1b851847236b","_cpp_api/function_logging_8h_1aef44b69c62af7cf2edc8875a9506641a","_cpp_api/function_trtorch_8h_1a2cf17d43ba9117b3b4d652744b4f0447","_cpp_api/function_trtorch_8h_1a4422781719d7befedb364cacd91c6247","_cpp_api/function_trtorch_8h_1a536bba54b70e44554099d23fa3d7e804","_cpp_api/function_trtorch_8h_1a5f33b142bc2f3f2aaf462270b3ad7e31","_cpp_api/function_trtorch_8h_1a726f6e7091b6b7be45b5a4275b2ffb10","_cpp_api/function_trtorch_8h_1ab01696cfe08b6a5293c55935a9713c25","_cpp_api/function_trtorch_8h_1ae38897d1ca4438227c970029d0f76fb5","_cpp_api/namespace_trtorch","_cpp_api/namespace_trtorch__logging","_cpp_api/namespace_trtorch__ptq","_cpp_api/program_listing_file_cpp_api_include_trtorch_logging.h","_cpp_api/program_listing_file_cpp_api_include_trtorch_macros.h","_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h","_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h","_cpp_api/structtrtorch_1_1ExtraInfo","_cpp_api/structtrtorch_1_1ExtraInfo_1_1InputRange","_cpp_api/trtorch_cpp","_cpp_api/unabridged_api","_cpp_api/unabridged_orphan","contributors/conversion","contributors/execution","contributors/lowering","contributors/phases","contributors/system_overview","contributors/useful_links","contributors/writing_converters","index","py_api/logging","py_api/trtorch","tutorials/getting_started","tutorials/installation","tutorials/ptq","tutorials/trtorchc"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":2,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,sphinx:56},filenames:["_cpp_api/class_view_hierarchy.rst","_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.rst","_cpp_api/classtrtorch_1_1ExtraInfo_1_1DeviceType.rst","_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator.rst","_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator.rst","_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.rst","_cpp_api/define_macros_8h_1a20c1fbeb21757871c52299dc52351b5f.rst","_cpp_api/define_macros_8h_1a25ee153c325dfc7466a33cbd5c1ff055.rst","_cpp_api/define_macros_8h_1a48d6029a45583a06848891cb0e86f7ba.rst","_cpp_api/define_macros_8h_1a71b02dddfabe869498ad5a88e11c440f.rst","_cpp_api/define_macros_8h_1a9d31d0569348d109b1b069b972dd143e.rst","_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.rst","_cpp_api/define_macros_8h_1ae1c56ab8a40af292a9a4964651524d84.rst","_cpp_api/dir_cpp.rst","_cpp_api/dir_cpp_api.rst","_cpp_api/dir_cpp_api_include.rst","_cpp_api/dir_cpp_api_include_trtorch.rst","_cpp_api/enum_logging_8h_1a5f612ff2f783ff4fbe89d168f0d817d4.rst","_cpp_api/file_cpp_api_include_trtorch_logging.h.rst","_cpp_api/file_cpp_api_include_trtorch_macros.h.rst","_cpp_api/file_cpp_api_include_trtorch_ptq.h.rst","_cpp_api/file_cpp_api_include_trtorch_trtorch.h.rst","_cpp_api/file_view_hierarchy.rst","_cpp_api/function_logging_8h_1a118d65b179defff7fff279eb9cd126cb.rst","_cpp_api/function_logging_8h_1a396a688110397538f8b3fb7dfdaf38bb.rst","_cpp_api/function_logging_8h_1a9b420280bfacc016d7e36a5704021949.rst","_cpp_api/function_logging_8h_1aa533955a2b908db9e5df5acdfa24715f.rst","_cpp_api/function_logging_8h_1abc57d473f3af292551dee8b9c78373ad.rst","_cpp_api/function_logging_8h_1adf5435f0dbb09c0d931a1b851847236b.rst","_cpp_api/function_logging_8h_1aef44b69c62af7cf2edc8875a9506641a.rst","_cpp_api/function_trtorch_8h_1a2cf17d43ba9117b3b4d652744b4f0447.rst","_cpp_api/function_trtorch_8h_1a4422781719d7befedb364cacd91c6247.rst","_cpp_api/function_trtorch_8h_1a536bba54b70e44554099d23fa3d7e804.rst","_cpp_api/function_trtorch_8h_1a5f33b142bc2f3f2aaf462270b3ad7e31.rst","_cpp_api/function_trtorch_8h_1a726f6e7091b6b7be45b5a4275b2ffb10.rst","_cpp_api/function_trtorch_8h_1ab01696cfe08b6a5293c55935a9713c25.rst","_cpp_api/function_trtorch_8h_1ae38897d1ca4438227c970029d0f76fb5.rst","_cpp_api/namespace_trtorch.rst","_cpp_api/namespace_trtorch__logging.rst","_cpp_api/namespace_trtorch__ptq.rst","_cpp_api/program_listing_file_cpp_api_include_trtorch_logging.h.rst","_cpp_api/program_listing_file_cpp_api_include_trtorch_macros.h.rst","_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.rst","_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.rst","_cpp_api/structtrtorch_1_1ExtraInfo.rst","_cpp_api/structtrtorch_1_1ExtraInfo_1_1InputRange.rst","_cpp_api/trtorch_cpp.rst","_cpp_api/unabridged_api.rst","_cpp_api/unabridged_orphan.rst","contributors/conversion.rst","contributors/execution.rst","contributors/lowering.rst","contributors/phases.rst","contributors/system_overview.rst","contributors/useful_links.rst","contributors/writing_converters.rst","index.rst","py_api/logging.rst","py_api/trtorch.rst","tutorials/getting_started.rst","tutorials/installation.rst","tutorials/ptq.rst","tutorials/trtorchc.rst"],objects:{"":{"logging::trtorch::Level":[17,1,1,"_CPPv4N7logging7trtorch5LevelE"],"logging::trtorch::Level::kDEBUG":[17,2,1,"_CPPv4N7logging7trtorch5Level6kDEBUGE"],"logging::trtorch::Level::kERROR":[17,2,1,"_CPPv4N7logging7trtorch5Level6kERRORE"],"logging::trtorch::Level::kGRAPH":[17,2,1,"_CPPv4N7logging7trtorch5Level6kGRAPHE"],"logging::trtorch::Level::kINFO":[17,2,1,"_CPPv4N7logging7trtorch5Level5kINFOE"],"logging::trtorch::Level::kINTERNAL_ERROR":[17,2,1,"_CPPv4N7logging7trtorch5Level15kINTERNAL_ERRORE"],"logging::trtorch::Level::kWARNING":[17,2,1,"_CPPv4N7logging7trtorch5Level8kWARNINGE"],"logging::trtorch::get_is_colored_output_on":[25,3,1,"_CPPv4N7logging7trtorch24get_is_colored_output_onEv"],"logging::trtorch::get_logging_prefix":[29,3,1,"_CPPv4N7logging7trtorch18get_logging_prefixEv"],"logging::trtorch::get_reportable_log_level":[23,3,1,"_CPPv4N7logging7trtorch24get_reportable_log_levelEv"],"logging::trtorch::kDEBUG":[17,2,1,"_CPPv4N7logging7trtorch5Level6kDEBUGE"],"logging::trtorch::kERROR":[17,2,1,"_CPPv4N7logging7trtorch5Level6kERRORE"],"logging::trtorch::kGRAPH":[17,2,1,"_CPPv4N7logging7trtorch5Level6kGRAPHE"],"logging::trtorch::kINFO":[17,2,1,"_CPPv4N7logging7trtorch5Level5kINFOE"],"logging::trtorch::kINTERNAL_ERROR":[17,2,1,"_CPPv4N7logging7trtorch5Level15kINTERNAL_ERRORE"],"logging::trtorch::kWARNING":[17,2,1,"_CPPv4N7logging7trtorch5Level8kWARNINGE"],"logging::trtorch::log":[27,3,1,"_CPPv4N7logging7trtorch3logE5LevelNSt6stringE"],"logging::trtorch::log::lvl":[27,4,1,"_CPPv4N7logging7trtorch3logE5LevelNSt6stringE"],"logging::trtorch::log::msg":[27,4,1,"_CPPv4N7logging7trtorch3logE5LevelNSt6stringE"],"logging::trtorch::set_is_colored_output_on":[26,3,1,"_CPPv4N7logging7trtorch24set_is_colored_output_onEb"],"logging::trtorch::set_is_colored_output_on::colored_output_on":[26,4,1,"_CPPv4N7logging7trtorch24set_is_colored_output_onEb"],"logging::trtorch::set_logging_prefix":[24,3,1,"_CPPv4N7logging7trtorch18set_logging_prefixENSt6stringE"],"logging::trtorch::set_logging_prefix::prefix":[24,4,1,"_CPPv4N7logging7trtorch18set_logging_prefixENSt6stringE"],"logging::trtorch::set_reportable_log_level":[28,3,1,"_CPPv4N7logging7trtorch24set_reportable_log_levelE5Level"],"logging::trtorch::set_reportable_log_level::lvl":[28,4,1,"_CPPv4N7logging7trtorch24set_reportable_log_levelE5Level"],"ptq::trtorch::make_int8_cache_calibrator":[33,3,1,"_CPPv4I0EN3ptq7trtorch26make_int8_cache_calibratorE19Int8CacheCalibratorI9AlgorithmERKNSt6stringE"],"ptq::trtorch::make_int8_cache_calibrator::Algorithm":[33,5,1,"_CPPv4I0EN3ptq7trtorch26make_int8_cache_calibratorE19Int8CacheCalibratorI9AlgorithmERKNSt6stringE"],"ptq::trtorch::make_int8_cache_calibrator::cache_file_path":[33,4,1,"_CPPv4I0EN3ptq7trtorch26make_int8_cache_calibratorE19Int8CacheCalibratorI9AlgorithmERKNSt6stringE"],"ptq::trtorch::make_int8_calibrator":[31,3,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"ptq::trtorch::make_int8_calibrator::Algorithm":[31,5,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"ptq::trtorch::make_int8_calibrator::DataLoader":[31,5,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"ptq::trtorch::make_int8_calibrator::cache_file_path":[31,4,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"ptq::trtorch::make_int8_calibrator::dataloader":[31,4,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"ptq::trtorch::make_int8_calibrator::use_cache":[31,4,1,"_CPPv4I00EN3ptq7trtorch20make_int8_calibratorE14Int8CalibratorI9Algorithm10DataLoaderE10DataLoaderRKNSt6stringEb"],"trtorch::CheckMethodOperatorSupport":[35,3,1,"_CPPv4N7trtorch26CheckMethodOperatorSupportERKN5torch3jit6ModuleENSt6stringE"],"trtorch::CheckMethodOperatorSupport::method_name":[35,4,1,"_CPPv4N7trtorch26CheckMethodOperatorSupportERKN5torch3jit6ModuleENSt6stringE"],"trtorch::CheckMethodOperatorSupport::module":[35,4,1,"_CPPv4N7trtorch26CheckMethodOperatorSupportERKN5torch3jit6ModuleENSt6stringE"],"trtorch::CompileGraph":[36,3,1,"_CPPv4N7trtorch12CompileGraphERKN5torch3jit6ModuleE9ExtraInfo"],"trtorch::CompileGraph::info":[36,4,1,"_CPPv4N7trtorch12CompileGraphERKN5torch3jit6ModuleE9ExtraInfo"],"trtorch::CompileGraph::module":[36,4,1,"_CPPv4N7trtorch12CompileGraphERKN5torch3jit6ModuleE9ExtraInfo"],"trtorch::ConvertGraphToTRTEngine":[32,3,1,"_CPPv4N7trtorch23ConvertGraphToTRTEngineERKN5torch3jit6ModuleENSt6stringE9ExtraInfo"],"trtorch::ConvertGraphToTRTEngine::info":[32,4,1,"_CPPv4N7trtorch23ConvertGraphToTRTEngineERKN5torch3jit6ModuleENSt6stringE9ExtraInfo"],"trtorch::ConvertGraphToTRTEngine::method_name":[32,4,1,"_CPPv4N7trtorch23ConvertGraphToTRTEngineERKN5torch3jit6ModuleENSt6stringE9ExtraInfo"],"trtorch::ConvertGraphToTRTEngine::module":[32,4,1,"_CPPv4N7trtorch23ConvertGraphToTRTEngineERKN5torch3jit6ModuleENSt6stringE9ExtraInfo"],"trtorch::ExtraInfo":[44,6,1,"_CPPv4N7trtorch9ExtraInfoE"],"trtorch::ExtraInfo::DataType":[44,6,1,"_CPPv4N7trtorch9ExtraInfo8DataTypeE"],"trtorch::ExtraInfo::DataType::DataType":[44,3,1,"_CPPv4N7trtorch9ExtraInfo8DataType8DataTypeEv"],"trtorch::ExtraInfo::DataType::DataType::t":[44,4,1,"_CPPv4N7trtorch9ExtraInfo8DataType8DataTypeEN3c1010ScalarTypeE"],"trtorch::ExtraInfo::DataType::Value":[44,1,1,"_CPPv4N7trtorch9ExtraInfo8DataType5ValueE"],"trtorch::ExtraInfo::DataType::Value::kChar":[44,2,1,"_CPPv4N7trtorch9ExtraInfo8DataType5Value5kCharE"],"trtorch::ExtraInfo::DataType::Value::kFloat":[44,2,1,"_CPPv4N7trtorch9ExtraInfo8DataType5Value6kFloatE"],"trtorch::ExtraInfo::DataType::Value::kHalf":[44,2,1,"_CPPv4N7trtorch9ExtraInfo8DataType5Value5kHalfE"],"trtorch::ExtraInfo::DataType::kChar":[1,2,1,"_CPPv4N7trtorch9ExtraInfo8DataType5Value5kCharE"],"trtorch::ExtraInfo::DataType::kFloat":[1,2,1,"_CPPv4N7trtorch9ExtraInfo8DataType5Value6kFloatE"],"trtorch::ExtraInfo::DataType::kHalf":[1,2,1,"_CPPv4N7trtorch9ExtraInfo8DataType5Value5kHalfE"],"trtorch::ExtraInfo::DataType::operator Value":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo8DataTypecv5ValueEv"],"trtorch::ExtraInfo::DataType::operator bool":[44,3,1,"_CPPv4N7trtorch9ExtraInfo8DataTypecvbEv"],"trtorch::ExtraInfo::DataType::operator!=":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE"],"trtorch::ExtraInfo::DataType::operator!=::other":[44,4,1,"_CPPv4NK7trtorch9ExtraInfo8DataTypeneEN8DataType5ValueE"],"trtorch::ExtraInfo::DataType::operator==":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE"],"trtorch::ExtraInfo::DataType::operator==::other":[44,4,1,"_CPPv4NK7trtorch9ExtraInfo8DataTypeeqEN8DataType5ValueE"],"trtorch::ExtraInfo::DeviceType":[44,6,1,"_CPPv4N7trtorch9ExtraInfo10DeviceTypeE"],"trtorch::ExtraInfo::DeviceType::DeviceType":[44,3,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType10DeviceTypeEv"],"trtorch::ExtraInfo::DeviceType::DeviceType::t":[44,4,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType10DeviceTypeEN3c1010DeviceTypeE"],"trtorch::ExtraInfo::DeviceType::Value":[44,1,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType5ValueE"],"trtorch::ExtraInfo::DeviceType::Value::kDLA":[44,2,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType5Value4kDLAE"],"trtorch::ExtraInfo::DeviceType::Value::kGPU":[44,2,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType5Value4kGPUE"],"trtorch::ExtraInfo::DeviceType::kDLA":[2,2,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType5Value4kDLAE"],"trtorch::ExtraInfo::DeviceType::kGPU":[2,2,1,"_CPPv4N7trtorch9ExtraInfo10DeviceType5Value4kGPUE"],"trtorch::ExtraInfo::DeviceType::operator Value":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo10DeviceTypecv5ValueEv"],"trtorch::ExtraInfo::DeviceType::operator bool":[44,3,1,"_CPPv4N7trtorch9ExtraInfo10DeviceTypecvbEv"],"trtorch::ExtraInfo::DeviceType::operator!=":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo10DeviceTypeneE10DeviceType"],"trtorch::ExtraInfo::DeviceType::operator!=::other":[44,4,1,"_CPPv4NK7trtorch9ExtraInfo10DeviceTypeneE10DeviceType"],"trtorch::ExtraInfo::DeviceType::operator==":[44,3,1,"_CPPv4NK7trtorch9ExtraInfo10DeviceTypeeqE10DeviceType"],"trtorch::ExtraInfo::DeviceType::operator==::other":[44,4,1,"_CPPv4NK7trtorch9ExtraInfo10DeviceTypeeqE10DeviceType"],"trtorch::ExtraInfo::EngineCapability":[44,1,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapabilityE"],"trtorch::ExtraInfo::EngineCapability::kDEFAULT":[44,2,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapability8kDEFAULTE"],"trtorch::ExtraInfo::EngineCapability::kSAFE_DLA":[44,2,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapability9kSAFE_DLAE"],"trtorch::ExtraInfo::EngineCapability::kSAFE_GPU":[44,2,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapability9kSAFE_GPUE"],"trtorch::ExtraInfo::ExtraInfo":[44,3,1,"_CPPv4N7trtorch9ExtraInfo9ExtraInfoENSt6vectorINSt6vectorI7int64_tEEEE"],"trtorch::ExtraInfo::ExtraInfo::fixed_sizes":[44,4,1,"_CPPv4N7trtorch9ExtraInfo9ExtraInfoENSt6vectorINSt6vectorI7int64_tEEEE"],"trtorch::ExtraInfo::ExtraInfo::input_ranges":[44,4,1,"_CPPv4N7trtorch9ExtraInfo9ExtraInfoENSt6vectorI10InputRangeEE"],"trtorch::ExtraInfo::InputRange":[45,6,1,"_CPPv4N7trtorch9ExtraInfo10InputRangeE"],"trtorch::ExtraInfo::InputRange::InputRange":[45,3,1,"_CPPv4N7trtorch9ExtraInfo10InputRange10InputRangeENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE"],"trtorch::ExtraInfo::InputRange::InputRange::max":[45,4,1,"_CPPv4N7trtorch9ExtraInfo10InputRange10InputRangeENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE"],"trtorch::ExtraInfo::InputRange::InputRange::min":[45,4,1,"_CPPv4N7trtorch9ExtraInfo10InputRange10InputRangeENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE"],"trtorch::ExtraInfo::InputRange::InputRange::opt":[45,4,1,"_CPPv4N7trtorch9ExtraInfo10InputRange10InputRangeENSt6vectorI7int64_tEENSt6vectorI7int64_tEENSt6vectorI7int64_tEE"],"trtorch::ExtraInfo::InputRange::max":[45,7,1,"_CPPv4N7trtorch9ExtraInfo10InputRange3maxE"],"trtorch::ExtraInfo::InputRange::min":[45,7,1,"_CPPv4N7trtorch9ExtraInfo10InputRange3minE"],"trtorch::ExtraInfo::InputRange::opt":[45,7,1,"_CPPv4N7trtorch9ExtraInfo10InputRange3optE"],"trtorch::ExtraInfo::allow_gpu_fallback":[44,7,1,"_CPPv4N7trtorch9ExtraInfo18allow_gpu_fallbackE"],"trtorch::ExtraInfo::capability":[44,7,1,"_CPPv4N7trtorch9ExtraInfo10capabilityE"],"trtorch::ExtraInfo::debug":[44,7,1,"_CPPv4N7trtorch9ExtraInfo5debugE"],"trtorch::ExtraInfo::device":[44,7,1,"_CPPv4N7trtorch9ExtraInfo6deviceE"],"trtorch::ExtraInfo::input_ranges":[44,7,1,"_CPPv4N7trtorch9ExtraInfo12input_rangesE"],"trtorch::ExtraInfo::kDEFAULT":[44,2,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapability8kDEFAULTE"],"trtorch::ExtraInfo::kSAFE_DLA":[44,2,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapability9kSAFE_DLAE"],"trtorch::ExtraInfo::kSAFE_GPU":[44,2,1,"_CPPv4N7trtorch9ExtraInfo16EngineCapability9kSAFE_GPUE"],"trtorch::ExtraInfo::max_batch_size":[44,7,1,"_CPPv4N7trtorch9ExtraInfo14max_batch_sizeE"],"trtorch::ExtraInfo::num_avg_timing_iters":[44,7,1,"_CPPv4N7trtorch9ExtraInfo20num_avg_timing_itersE"],"trtorch::ExtraInfo::num_min_timing_iters":[44,7,1,"_CPPv4N7trtorch9ExtraInfo20num_min_timing_itersE"],"trtorch::ExtraInfo::op_precision":[44,7,1,"_CPPv4N7trtorch9ExtraInfo12op_precisionE"],"trtorch::ExtraInfo::ptq_calibrator":[44,7,1,"_CPPv4N7trtorch9ExtraInfo14ptq_calibratorE"],"trtorch::ExtraInfo::refit":[44,7,1,"_CPPv4N7trtorch9ExtraInfo5refitE"],"trtorch::ExtraInfo::strict_types":[44,7,1,"_CPPv4N7trtorch9ExtraInfo12strict_typesE"],"trtorch::ExtraInfo::workspace_size":[44,7,1,"_CPPv4N7trtorch9ExtraInfo14workspace_sizeE"],"trtorch::dump_build_info":[34,3,1,"_CPPv4N7trtorch15dump_build_infoEv"],"trtorch::get_build_info":[30,3,1,"_CPPv4N7trtorch14get_build_infoEv"],"trtorch::ptq::Int8CacheCalibrator":[3,6,1,"_CPPv4I0EN7trtorch3ptq19Int8CacheCalibratorE"],"trtorch::ptq::Int8CacheCalibrator::Algorithm":[3,5,1,"_CPPv4I0EN7trtorch3ptq19Int8CacheCalibratorE"],"trtorch::ptq::Int8CacheCalibrator::Int8CacheCalibrator":[3,3,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator19Int8CacheCalibratorERKNSt6stringE"],"trtorch::ptq::Int8CacheCalibrator::Int8CacheCalibrator::cache_file_path":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator19Int8CacheCalibratorERKNSt6stringE"],"trtorch::ptq::Int8CacheCalibrator::getBatch":[3,3,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8CacheCalibrator::getBatch::bindings":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8CacheCalibrator::getBatch::names":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8CacheCalibrator::getBatch::nbBindings":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8CacheCalibrator::getBatchSize":[3,3,1,"_CPPv4NK7trtorch3ptq19Int8CacheCalibrator12getBatchSizeEv"],"trtorch::ptq::Int8CacheCalibrator::operator nvinfer1::IInt8Calibrator*":[3,3,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibratorcvPN8nvinfer115IInt8CalibratorEEv"],"trtorch::ptq::Int8CacheCalibrator::readCalibrationCache":[3,3,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator20readCalibrationCacheER6size_t"],"trtorch::ptq::Int8CacheCalibrator::readCalibrationCache::length":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator20readCalibrationCacheER6size_t"],"trtorch::ptq::Int8CacheCalibrator::writeCalibrationCache":[3,3,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator21writeCalibrationCacheEPKv6size_t"],"trtorch::ptq::Int8CacheCalibrator::writeCalibrationCache::cache":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator21writeCalibrationCacheEPKv6size_t"],"trtorch::ptq::Int8CacheCalibrator::writeCalibrationCache::length":[3,4,1,"_CPPv4N7trtorch3ptq19Int8CacheCalibrator21writeCalibrationCacheEPKv6size_t"],"trtorch::ptq::Int8Calibrator":[4,6,1,"_CPPv4I00EN7trtorch3ptq14Int8CalibratorE"],"trtorch::ptq::Int8Calibrator::Algorithm":[4,5,1,"_CPPv4I00EN7trtorch3ptq14Int8CalibratorE"],"trtorch::ptq::Int8Calibrator::DataLoaderUniquePtr":[4,5,1,"_CPPv4I00EN7trtorch3ptq14Int8CalibratorE"],"trtorch::ptq::Int8Calibrator::Int8Calibrator":[4,3,1,"_CPPv4N7trtorch3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb"],"trtorch::ptq::Int8Calibrator::Int8Calibrator::cache_file_path":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb"],"trtorch::ptq::Int8Calibrator::Int8Calibrator::dataloader":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb"],"trtorch::ptq::Int8Calibrator::Int8Calibrator::use_cache":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator14Int8CalibratorE19DataLoaderUniquePtrRKNSt6stringEb"],"trtorch::ptq::Int8Calibrator::getBatch":[4,3,1,"_CPPv4N7trtorch3ptq14Int8Calibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8Calibrator::getBatch::bindings":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8Calibrator::getBatch::names":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8Calibrator::getBatch::nbBindings":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator8getBatchEA_PvA_PKci"],"trtorch::ptq::Int8Calibrator::getBatchSize":[4,3,1,"_CPPv4NK7trtorch3ptq14Int8Calibrator12getBatchSizeEv"],"trtorch::ptq::Int8Calibrator::operator nvinfer1::IInt8Calibrator*":[4,3,1,"_CPPv4N7trtorch3ptq14Int8CalibratorcvPN8nvinfer115IInt8CalibratorEEv"],"trtorch::ptq::Int8Calibrator::readCalibrationCache":[4,3,1,"_CPPv4N7trtorch3ptq14Int8Calibrator20readCalibrationCacheER6size_t"],"trtorch::ptq::Int8Calibrator::readCalibrationCache::length":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator20readCalibrationCacheER6size_t"],"trtorch::ptq::Int8Calibrator::writeCalibrationCache":[4,3,1,"_CPPv4N7trtorch3ptq14Int8Calibrator21writeCalibrationCacheEPKv6size_t"],"trtorch::ptq::Int8Calibrator::writeCalibrationCache::cache":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator21writeCalibrationCacheEPKv6size_t"],"trtorch::ptq::Int8Calibrator::writeCalibrationCache::length":[4,4,1,"_CPPv4N7trtorch3ptq14Int8Calibrator21writeCalibrationCacheEPKv6size_t"],STR:[5,0,1,"c.STR"],TRTORCH_API:[6,0,1,"c.TRTORCH_API"],TRTORCH_HIDDEN:[7,0,1,"c.TRTORCH_HIDDEN"],TRTORCH_MAJOR_VERSION:[8,0,1,"c.TRTORCH_MAJOR_VERSION"],TRTORCH_MINOR_VERSION:[12,0,1,"c.TRTORCH_MINOR_VERSION"],TRTORCH_PATCH_VERSION:[9,0,1,"c.TRTORCH_PATCH_VERSION"],TRTORCH_VERSION:[10,0,1,"c.TRTORCH_VERSION"],XSTR:[11,0,1,"c.XSTR"],trtorch:[58,8,0,"-"]},"trtorch.logging":{Level:[57,9,1,""],get_is_colored_output_on:[57,10,1,""],get_logging_prefix:[57,10,1,""],get_reportable_log_level:[57,10,1,""],log:[57,10,1,""],set_is_colored_output_on:[57,10,1,""],set_logging_prefix:[57,10,1,""],set_reportable_log_level:[57,10,1,""]},"trtorch.logging.Level":{Debug:[57,11,1,""],Error:[57,11,1,""],Info:[57,11,1,""],InternalError:[57,11,1,""],Warning:[57,11,1,""]},trtorch:{DeviceType:[58,9,1,""],EngineCapability:[58,9,1,""],check_method_op_support:[58,10,1,""],compile:[58,10,1,""],convert_method_to_trt_engine:[58,10,1,""],dtype:[58,9,1,""],dump_build_info:[58,10,1,""],get_build_info:[58,10,1,""],logging:[57,8,0,"-"]}},objnames:{"0":["c","macro","C macro"],"1":["cpp","enum","C++ enum"],"10":["py","function","Python function"],"11":["py","attribute","Python attribute"],"2":["cpp","enumerator","C++ enumerator"],"3":["cpp","function","C++ function"],"4":["cpp","functionParam","functionParam"],"5":["cpp","templateParam","templateParam"],"6":["cpp","class","C++ class"],"7":["cpp","member","C++ member"],"8":["py","module","Python module"],"9":["py","class","Python class"]},objtypes:{"0":"c:macro","1":"cpp:enum","10":"py:function","11":"py:attribute","2":"cpp:enumerator","3":"cpp:function","4":"cpp:functionParam","5":"cpp:templateParam","6":"cpp:class","7":"cpp:member","8":"py:module","9":"py:class"},terms:{"abstract":[50,55],"byte":58,"case":[1,2,44,49,55,61],"catch":59,"char":[3,4,42,59],"class":[31,33,42,43,44,48,55,57,58,59,61],"const":[1,2,3,4,31,32,33,35,36,42,43,44,51,55,59,61],"default":[1,2,3,4,17,31,33,41,43,44,58,61,62],"enum":[1,2,40,43,44,48,57,61],"final":49,"float":[58,59,62],"function":[1,2,3,4,44,45,48,50,51,55,59],"import":[59,62],"int":[3,4,42,50,59],"long":49,"new":[1,2,3,4,36,44,45,50,53,55,57,59],"public":[1,2,3,4,42,43,44,45,61],"return":[1,2,3,4,23,25,30,31,32,33,35,36,40,41,42,43,44,50,51,52,53,55,57,58,59,61],"static":[44,45,49,55,58,59],"super":[42,59],"throw":[51,59],"true":[1,2,4,43,44,51,55,58,59,61],"try":[53,59],"void":[3,4,24,26,27,28,34,40,42,43],"while":61,And:59,Are:40,But:59,For:[49,59],Its:55,Not:3,One:[58,59],PRs:59,Thats:59,The:[2,44,49,50,51,52,53,55,57,60,61,62],Then:[60,61],There:[4,49,55,59,61],These:[49,50],Use:[44,55,58,61],Useful:56,Using:4,Will:35,With:[59,61],___torch_mangle_10:[50,59],___torch_mangle_5:59,___torch_mangle_9:59,__attribute__:41,__gnuc__:41,__init__:59,__torch__:[50,59],__visibility__:41,_all_:51,_convolut:59,aarch64:[53,60],abl:[49,51,55,56,61],about:[49,50,55,58,62],abov:[28,59],accept:[44,45,50,55,62],access:[51,55,56,59],accord:55,accuraci:61,across:51,act:50,acthardtanh:55,activ:[59,61],activationtyp:55,actual:[50,51,55,57,59],add:[27,49,51,55,57,59],add_:[51,59],addactiv:55,added:[28,49],addenginetograph:[50,59],addit:[51,59],addlay:59,addshuffl:59,advanc:61,after:[49,56,59,62],again:[42,55],against:[59,62],ahead:59,aim:51,algorithm:[3,4,31,33,42,43,61],all:[17,43,50,51,58,59,61,62],alloc:55,allow:[44,45,49,51,58,62],allow_gpu_fallback:[43,44,58],alreadi:[49,51,59,62],also:[33,49,55,56,59,60,61],alwai:[3,4,26],analogu:55,ani:[49,55,58,59,60,62],annot:[55,59],anoth:59,aot:[56,59],api:[13,15,16,40,41,42,43,53,55,58,59,61],apidirectori:[22,46],appli:61,applic:[2,33,44,51,59,62],aquir:59,architectur:56,archiv:60,aren:59,arg:[49,59],argc:59,argument:[50,51,55,59,62],argv:59,around:[50,55,59],arrai:[3,4,49],arrayref:[43,44,45],arxiv:61,aspect:62,assembl:[49,59],assign:[3,4,50],associ:[49,55,59],associatevalueandivalu:55,associatevalueandtensor:[55,59],aten:[51,54,55,59],attribut:51,auto:[42,55,59,61],avail:55,averag:[44,58,62],avg:62,back:[50,51,53,59],back_insert:42,background:59,base:[34,46,47,50,57,59,60,61],basic:62,batch:[3,4,42,44,55,58,61,62],batch_norm:55,batch_siz:[42,61],batched_data_:42,batchnorm:51,batchtyp:42,bazel:[53,60],bdist_wheel:60,becaus:[55,59],becom:55,been:[49,55,59],befor:[51,53,55,56,59,60],begin:[42,60],beginn:59,behavior:58,being:59,below:[55,59],benefit:[55,59],best:[44,45],better:[59,61],between:[55,61],bia:[51,59],bin:60,binari:[42,61],bind:[3,4,42],bit:[55,58,59],blob:54,block0:51,block1:51,block:49,bool:[1,2,3,4,25,26,31,35,40,42,43,44,51,55,57,58,59,61],both:59,briefli:59,bsd:43,buffer:[3,4],bug:60,build:[30,31,33,44,49,52,53,55,58,59,61,62],build_fil:60,builderconfig:43,built:[60,62],c10:[1,2,43,44,45,59,61],c_api:54,c_str:[55,59],cach:[3,4,31,33,42,61,62],cache_:42,cache_fil:42,cache_file_path:[3,4,31,33,42,43],cache_file_path_:42,cache_size_:42,calcul:[49,59],calibr:[3,4,31,33,42,44,61,62],calibration_cache_fil:[31,33,61],calibration_dataload:[31,61],calibration_dataset:61,call:[31,33,36,44,50,51,55,58,59],callmethod:59,can:[1,2,4,31,32,33,44,45,49,50,51,52,53,55,58,59,60,61,62],cannot:[51,59],capabl:[43,44,58,62],cast:[3,4],caus:[55,60],cdll:59,cerr:59,chain:55,chanc:55,chang:[33,53,61],check:[1,2,35,44,51,55,58,59,60,62],check_method_op_support:58,checkmethodoperatorsupport:[21,37,43,46,47,59],choos:59,cifar10:61,cifar:61,classif:59,clear:42,cli:62,close:59,closer:51,code:[53,56,59],collect:59,color:[25,26,57],colored_output_on:[26,40,57],com:[54,59,60,61],command:62,comment:60,common:[49,51],commun:59,comparis:[1,44],comparison:[2,44],compat:[1,2,44],compil:[32,35,36,44,50,51,55,58,61,62],compile_set:59,compilegraph:[21,37,43,46,47,59,61],complet:59,complex:59,compliat:61,compon:[52,53,59],compos:59,composit:59,comput:61,config:60,configur:[32,36,56,58,59,61],connect:51,consid:59,consol:62,consolid:59,constant:[49,50,51,59],constexpr:[1,2,43,44],construct:[1,2,3,4,44,45,49,52,53,55,59],constructor:[1,44,59],consum:[4,49,59],contain:[31,35,49,51,55,58,59,60,61],content:61,context:[49,50,52,53],contributor:59,control:59,conv1:59,conv2:59,conv2d:59,conv:[55,59],convers:[50,51,58,59],conversionctx:[55,59],convert:[3,4,32,35,36,51,52,53,56,58],convert_method_to_trt_engin:58,convertgraphtotrtengin:[21,37,43,46,47,59],convien:44,convienc:[3,4],convolut:61,coordin:53,copi:[42,55],copyright:43,core:[51,53,59],corespond:50,corpor:43,correct:60,correspond:55,coupl:[49,53],cout:59,cp35:60,cp35m:60,cp36:60,cp36m:60,cp37:60,cp37m:60,cp38:60,cpp:[14,15,16,40,41,42,43,48,51,53,59,61],cpp_frontend:61,cppdirectori:[22,46],cppdoc:59,creat:[31,33,49,55,62],csrc:[51,54],cstddef:61,ctx:[55,59],ctype:59,cuda:[44,58,59,60],cudafloattyp:59,current:[23,55],data:[1,3,4,31,33,42,44,49,52,53,55,61],data_dir:61,dataflow:[55,59],dataload:[4,31,33,42,43,44,61],dataloader_:42,dataloaderopt:61,dataloaderuniqueptr:[4,42],dataset:[33,61],datatyp:[2,21,37,43,44,46,47,58],datatypeclass:[0,46],dbg:60,dead_code_elimin:51,deal:55,debug:[17,26,43,44,55,57,58,62],debugg:[58,62],dedic:51,deep:[55,56,61],deeplearn:54,def:59,defin:[1,2,3,4,17,23,24,25,26,27,28,29,30,31,32,33,34,35,36,41,44,45,48,59,61,62],definit:[48,55],delet:[1,2,43,44,51],demo:61,depend:[30,33,49,53,59],deploi:[56,59,61],deploy:[59,61,62],describ:[44,55,58,59],deseri:[58,59],destroi:55,destructor:55,detail:59,determin:51,develop:[56,59,60],deviat:62,devic:[2,43,44,58,59,62],devicetyp:[0,21,37,43,44,46,47,58],dict:58,dictionari:[58,59],differ:[33,56,59],dimens:51,directli:[55,61],directori:[18,19,20,21,22,43,46,60,61],disabl:[57,62],disclos:60,displai:62,distdir:60,distribut:[58,59,61],dla:[2,44,58,62],doc:[53,54,60],docsrc:53,document:[40,41,42,43,46,47,53,59,61],doe:[41,42,51,55,61],doesn:59,doing:[49,51,59,61],domain:61,don:[55,61],done:[49,53],dont:40,down:60,download:60,doxygen_should_skip_thi:[42,43],driver:60,dtype:58,due:[3,4],dump:[34,60,62],dump_build_info:[21,37,43,46,47,58],dure:[55,61,62],dynam:[44,45,58],each:[3,4,44,49,50,51,55,59],easi:[49,62],easier:[52,53,55,59,61],easili:[3,4],edu:61,effect:[51,59,61],effici:55,either:[44,45,55,58,59,60,62],element:50,element_typ:42,els:[41,42,58],embed:62,emit:49,empti:59,emum:[17,44],enabl:[3,4,25,57,58],encount:60,end:[42,55,59],end_dim:59,endif:[41,42,43],enforc:59,engin:[1,2,32,36,44,45,49,52,53,56,58,59,61,62],engine_converted_from_jit:59,enginecap:[43,44,58],ensur:[33,51],enter:49,entri:[44,55],entropi:[31,33,61],enumer:[1,2,17,44],equival:[36,52,53,55,58,59],equivil:32,error:[17,49,51,53,57,59,60],etc:58,eval:59,evalu:[50,52,53],evaluated_value_map:[49,55],even:59,everi:59,everyth:17,exampl:[50,55,59,61],exception_elimin:51,execpt:51,execut:[51,58,59,61],execute_engin:[50,59],exhaust:59,exist:[4,32,35,36,58,60,61],expect:[51,55,59],explic:42,explicit:[3,4,43,51,56,61],explicitli:61,explict:42,explictli:[1,44],extend:55,extent:[56,59],extra:44,extra_info:[58,61],extrainfo:[0,3,4,21,32,36,37,43,46,47,58,59,61],extrainfostruct:[0,46],f16:62,f32:62,factori:[4,31,33,61],fail:59,fallback:[55,62],fals:[1,2,3,4,42,43,44,58,59],fashion:59,fc1:59,fc2:59,fc3:59,feat:59,featur:[61,62],fed:[3,4,59],feed:[31,33,59],feel:56,field:[3,4,61],file:[1,2,3,4,5,6,7,8,9,10,11,12,17,23,24,25,26,27,28,29,30,31,32,33,34,35,36,44,45,53,58,59,60,61,62],file_path:62,find:[4,59],first:[49,51,59,61],fix:44,fixed_s:[43,44],flag:62,flatten:59,flatten_convert:59,float16:[58,62],float32:[58,62],flow:[55,59],fly:59,follow:[59,61,62],forc:62,form:49,format:62,forward:[31,33,36,50,55,58,59,61],found:[43,59,60,61],fp16:[1,44,56,58,59],fp32:[1,44,56,61],freed:55,freeze_modul:51,from:[1,2,3,4,31,33,42,44,45,49,50,51,52,53,55,59,61,62],full:[55,59,61,62],fulli:[35,51,58,59,61],fuse_flatten_linear:51,fuse_linear:51,fusion:55,gaurd:41,gcc:53,gear:61,gener:[3,4,33,50,51,53,55,59,61,62],get:[1,2,3,4,23,30,42,44,45,55,57,60,61],get_batch_impl:42,get_build_info:[21,37,43,46,47,58],get_is_colored_output_on:[18,38,40,46,47,57],get_logging_prefix:[18,38,40,46,47,57],get_reportable_log_level:[18,38,40,46,47,57],getattr:[51,59],getbatch:[3,4,42],getbatchs:[3,4,42],getdimens:[55,59],getoutput:[55,59],github:[54,59,60,61],given:[44,51,58,59,62],global:[27,59],gnu:60,goal:55,going:[42,59],good:[42,55],got:59,gpu:[2,32,36,44,58,59,62],graph:[17,32,35,36,43,49,52,53,55,56,58,59],great:59,gtc:56,guard:51,guard_elimin:51,hack:42,half:[58,59,62],handl:51,happen:59,hardtanh:55,has:[49,51,53,55,59,61],hash:60,have:[33,42,49,55,56,59,60,61,62],haven:59,header:59,help:[26,49,55,62],helper:55,here:[42,49,50,59,60,61],hermet:60,hfile:[22,46],hidden:41,high:51,higher:[51,59],hinton:61,hold:[44,45,49,55,61],hood:53,how:[3,4,59],howev:33,html:[54,59,60,61],http:[54,59,60,61],http_archiv:60,idea:51,ident:62,ifndef:[42,43],ifstream:42,iint8calibr:[3,4,31,33,42,43,44,61],iint8entropycalibrator2:[3,4,31,33,42,43,61],iint8minmaxcalibr:[31,33,61],ilay:55,imag:61,images_:61,implement:[3,4,50,59,61],implic:51,in_shap:59,in_tensor:59,incas:42,includ:[14,16,17,30,34,40,41,42,43,48,58,59,60,61,62],includedirectori:[22,46],index:[54,56,61],inetworkdefinit:49,infer:[51,59,61],info:[17,32,36,43,44,55,57,59,62],inform:[28,30,34,49,56,58,59,61,62],infrastructur:61,ingest:53,inherit:[46,47,61],inlin:[43,51,59],input0:59,input1:59,input2:59,input:[3,4,33,42,44,45,49,50,51,52,53,55,58,59,61,62],input_data:59,input_file_path:62,input_rang:[43,44],input_s:59,input_shap:[58,59,61,62],inputrang:[21,37,43,44,46,47,59],inputrangeclass:[0,46],inspect:[55,59],instal:[56,59],instanc:[51,59],instanti:[52,53,55,59],instatin:[1,2,44],instead:[49,51,59,62],instruct:59,insur:60,int16_t:43,int64_t:[43,44,45,61],int8:[1,42,44,56,58,61,62],int8_t:43,int8cachecalibr:[20,33,39,42,43,46,47],int8cachecalibratortempl:[0,46],int8calibr:[3,20,31,39,42,43,46,47],int8calibratorstruct:[0,46],integ:58,integr:56,interfac:[1,2,44,50,53,55,61],intermedi:[17,59],intern:[2,17,44,55,59],internal_error:57,internalerror:57,interpret:50,intro_to_torchscript_tutori:59,invok:59,ios:42,iostream:[20,42,59],is_train:61,iscustomclass:55,issu:[3,4,59,60],istensor:55,istream_iter:42,it_:42,itensor:[49,55,59],iter:[42,44,49,58,62],its:[33,49,50,55],itself:[1,2,44,62],ivalu:[49,50,55,59],jetson:58,jit:[32,35,36,43,49,50,51,52,53,54,55,58,59,62],just:[42,43,51,56,59],kchar:[1,43,44],kclip:55,kcpu:[2,44],kcuda:[2,44,59],kdebug:[17,40,42],kdefault:[43,44],kdla:[2,43,44],kei:[58,59],kernel:[44,55,58,62],kerror:[17,40],kfloat:[1,43,44],kgpu:[2,43,44],kgraph:[17,40,51],khalf:[1,43,44,59],ki8:61,kind:[49,58],kinfo:[17,40,42],kinternal_error:[17,40],know:[40,55],kriz:61,krizhevski:61,ksafe_dla:[43,44],ksafe_gpu:[43,44],ktest:61,ktrain:61,kwarn:[17,40],label:61,laid:59,lambda:[55,59],languag:59,larg:[52,53,59,61],larger:61,last:51,later:[33,59],layer:[44,49,51,55,58,59,61,62],ld_library_path:60,ldd:60,learn:[56,61],leav:51,lenet:59,lenet_trt:[50,59],lenetclassifi:59,lenetfeatextractor:59,length:[3,4,42],let:[44,51,55,62],level:[18,23,27,28,38,40,42,46,47,51,53,57,59],levelnamespac:[0,46],leverag:61,lib:[51,59],librari:[30,43,50,52,53,55,59,60],libtorch:[4,34,55,59,60,61],libtrtorch:[59,60,62],licens:43,like:[49,55,59,61,62],limit:[51,61],line:62,linear:59,link:[49,56,59,62],linux:[53,60],linux_x86_64:60,list:[18,19,20,21,35,48,49,55,58,59,60],listconstruct:49,live:55,load:[50,59,61,62],local:[51,59],locat:61,log:[16,17,19,20,21,22,37,42,43,46,47,48,51,55,58],log_debug:55,logger:57,loggingenum:[0,46],loglevel:57,look:[49,50,51,52,53,59,61],loop:51,loss:61,lot:55,lower:17,lower_graph:51,lower_tupl:51,loweralltupl:51,lowersimpletupl:51,lvl:[27,28,40],machin:[50,61],macro:[5,6,7,8,9,10,11,12,16,18,21,22,40,43,46,48],made:[51,52,53],mai:[49,53,59,61],main:[50,52,53,55,59],maintain:[50,55],major:53,make:[49,59,60,61],make_data_load:[4,61],make_int8_cache_calibr:[21,39,43,46,47,61],make_int8_calibr:[21,33,39,43,46,47,61],manag:[49,50,52,53,55,59],map:[2,44,49,51,52,53,55,59,61],master:[54,60,61],match:[44,51,60],matmul:[51,59],matrix:54,matur:53,max:[43,44,45,55,58,59,62],max_batch_s:[43,44,58,62],max_c:62,max_h:62,max_n:62,max_pool2d:59,max_val:55,max_w:62,maximum:[44,45,58,59,62],mean:[44,55,56,58,62],mechan:55,meet:58,member:[44,45,58],memori:[20,21,42,43,51,55,59],menu:62,messag:[17,27,28,57,62],metadata:[50,55],method:[32,35,36,51,55,58,59,60],method_nam:[32,35,43,58,59],min:[43,44,45,55,58,59,62],min_c:62,min_h:62,min_n:62,min_val:55,min_w:62,minim:[44,58,61,62],minimum:[44,45,57,59],minmax:[31,33,61],miss:59,mod:[59,61],mode:61,mode_:61,model:[59,61],modul:[32,35,36,43,50,52,53,55,56,58,61,62],modular:59,more:[49,56,59,61],most:53,move:[31,43,59,61],msg:[27,40,57],much:[55,61],multipl:61,must:[44,55,58,59,60,62],name:[3,4,32,35,42,55,58,59,60],namespac:[0,40,42,43,48,56,61],nativ:[53,54,59],native_funct:54,nbbind:[3,4,42],necessari:40,need:[1,2,28,33,41,44,49,51,55,59,60,61],nest:[46,47],net:[55,59],network:[31,33,55,59,61],new_lay:55,new_local_repositori:60,new_siz:61,next:[3,4,49,50,61],nice:60,ninja:60,nlp:[31,33,61],node:[51,55,59],node_info:[55,59],noexcept:61,none:55,norm:55,normal:[1,2,44,59,61],noskipw:42,note:[2,44,55],now:[53,55,59],nullptr:[42,43,44],num:62,num_avg_timing_it:[43,44,58],num_it:62,num_min_timing_it:[43,44,58],number:[3,4,44,51,55,58,59,62],numer:62,nvidia:[32,36,43,54,58,59,60,61,62],nvinfer1:[3,4,31,33,42,43,44,55,61],object:[1,2,3,4,44,45,55,59],obvious:59,off:50,ofstream:[42,59],older:53,onc:[40,41,42,43,49,50,59,61],one:[51,55,57,59],ones:[40,59,60],onli:[2,3,4,17,33,42,44,53,55,57,58,59,61,62],onnx:51,onto:[50,62],op_precis:[43,44,58,59,61,62],oper:[1,2,3,4,35,42,43,44,49,50,51,52,53,55,56,58,61,62],ops:[51,59],opset:[52,53],opt:[43,44,45,58,59,60],opt_c:62,opt_h:62,opt_n:62,opt_w:62,optim:[44,45,56,59,62],optimi:59,optimin:[44,45],option:[42,58,60,61,62],order:[44,55,59],org:[54,59,60,61],other:[1,2,43,44,49,56,58,59,62],our:[53,59],out:[35,42,49,51,52,53,55,57,58,59,60],out_shap:59,out_tensor:[55,59],output:[25,26,44,49,50,51,55,57,59,62],output_file_path:62,outself:59,over:[52,53],overrid:[3,4,31,33,42,61],overview:[54,56],own:[55,59],packag:[51,59,62],page:56,pair:[55,61],paramet:[1,2,3,4,26,27,28,31,32,33,35,36,44,45,49,51,55,57,58,59],parent:[14,15,16,18,19,20,21],pars:59,part:[53,62],pass:[49,50,52,53,55,59,61],path:[4,13,14,15,16,31,33,59,60,61,62],pathwai:59,pattern:[55,59],perform:[31,33],performac:[44,45,61],phase:[17,55,59],pick:59,pip3:60,pipelin:62,piplein:59,place:[51,60,61],plan:[53,62],pleas:60,point:[58,59],pointer:[3,4,61],pop:50,posit:62,post:[31,33,44,56,62],power:59,pragma:[40,41,42,43,61],pre_cxx11_abi:60,precis:[44,56,58,59,61,62],precompil:60,prefix:[24,26,40,57],preprint:61,preprocess:61,preserv:[59,61],prespect:59,pretti:59,previous:33,prim:[49,50,51,59],primarili:[53,59],print:[17,35,42,57,58],priorit:60,privat:[3,4,42,43,61],process:[59,62],produc:[44,45,49,50,55,59],profil:[44,45],program:[18,19,20,21,33,48,52,53,56,59,62],propog:51,provid:[3,4,44,55,59,60,61],ptq:[3,4,16,18,21,22,37,43,46,47,48,56,62],ptq_calibr:[3,4,43,44,61],ptqtemplat:[0,46],pull:60,pure:35,purpos:60,push:50,push_back:42,python3:[51,59,60],python:[53,62],python_api:54,pytorch:[50,52,53,55,58,59,60,61],quantiz:[31,33,56,62],quantizatiom:44,question:59,quickli:[59,61,62],quit:[55,59],rais:51,raiseexcept:51,rand:59,randn:59,rang:[44,45,58,59,62],rather:51,read:[3,4,31,33,42,61],readcalibrationcach:[3,4,42],realiz:50,realli:55,reason:[1,44,59],recalibr:33,recognit:61,recomend:[31,33],recommend:[31,33,59,60],record:[49,59],recurs:49,reduc:[51,52,53,61],refer:[50,52,53],referenc:60,refit:[43,44,58],reflect:43,regard:60,regist:[50,55],registernodeconversionpattern:[55,59],registri:[49,59],reinterpret_cast:42,relationship:[46,47],releas:60,relu:59,remain:[51,61],remove_contigu:51,remove_dropout:51,replac:51,report:[23,42],reportable_log_level:57,repositori:53,repres:[44,45,55,57],represent:[51,55,59],request:59,requir:[33,49,57,58,59,61,62],reserv:43,reset:42,resolv:[49,51,52,53],resourc:[49,61],respons:[33,50],restrict:[44,58,62],result:[49,51,52,53,58,59],reus:[51,61],right:[43,53,55],root:[43,61],run:[2,32,44,49,50,52,53,55,56,58,59,60,61,62],runtim:[50,56,59],safe:[55,58],safe_dla:[58,62],safe_gpu:[58,62],safeti:[44,58],same:[50,59],sampl:61,save:[33,42,58,59,62],saw:59,scalar:55,scalartyp:[1,43,44],scale:61,schema:[55,59],scope:51,scratch:33,script:[35,51,58,59],script_model:59,scriptmodul:[58,59],sdk:54,seamlessli:56,search:56,section:61,see:[35,50,51,58,59],select:[31,32,33,44,58,61,62],self:[50,51,55,59],sens:59,serial:[32,50,52,53,58,59],serv:62,set:[3,4,17,26,28,32,33,36,44,45,49,51,52,53,56,57,58,59,61,62],set_is_colored_output_on:[18,38,40,46,47,57],set_logging_prefix:[18,38,40,46,47,57],set_reportable_log_level:[18,38,40,46,47,57],setalpha:55,setbeta:55,setnam:[55,59],setreshapedimens:59,setup:[60,61],sever:[17,27,57],sha256:60,shape:[44,45,55,58],ship:59,should:[1,3,4,33,43,44,49,55,56,57,58,61,62],shown:59,shuffl:59,side:[51,59],signifi:[44,45],significantli:51,similar:[55,59],simonyan:61,simpil:61,simpl:59,simplifi:49,sinc:[51,59,61],singl:[44,45,51,59,61,62],singular:55,site:[51,59],size:[3,4,42,44,45,51,58,59,61,62],size_t:[3,4,42,61],softmax:51,sole:61,some:[49,50,51,52,53,55,59,61],someth:[41,51],sort:55,sourc:[43,53,58],space:61,specif:[36,51,52,53,58],specifi:[3,4,55,56,57,58,59,62],specifii:58,src:54,ssd_trace:62,ssd_trt:62,sstream:[20,42],stabl:54,stack:[50,61],stage:49,stand:50,standard:[56,62],start:[49,60],start_dim:59,state:[49,55,59],statement:51,static_cast:42,statu:42,std:[3,4,24,27,29,30,31,32,33,35,40,42,43,44,45,59,61],stdout:[34,57,58],steamlin:61,step:[56,61],still:[42,61],stitch:59,stop:59,storag:61,store:[4,49,55,59],str:[19,41,42,46,47,57,58],straight:55,strict:62,strict_typ:[43,44,58],strictli:58,string:[3,4,18,20,21,24,27,29,30,31,32,33,35,40,42,43,55,58,59,61],stringstream:42,strip_prefix:60,struct:[1,2,21,37,43,61],structur:[33,44,53,55,59],style:43,sub:59,subdirectori:48,subgraph:[49,51,55,59],subject:53,submodul:59,subset:61,suit:56,support:[1,2,26,35,44,45,54,58,59,62],sure:[59,60],system:[49,55,56,60],take:[32,35,36,49,50,52,53,55,58,59,61],talk:56,tar:[60,61],tarbal:[59,61],target:[2,44,53,56,58,59,61,62],targets_:61,task:[31,33,61],techinqu:59,techniqu:61,tell:[55,59],templat:[20,21,39,42,43,46,47,59],tensor:[42,44,45,49,50,55,59,61],tensorcontain:55,tensorlist:55,tensorrt:[1,2,3,4,31,32,33,34,36,43,44,45,49,51,52,53,55,56,58,59,61,62],term:61,termin:[26,59,62],test:[53,62],text:57,than:[51,56],thats:[49,61],thei:[44,49,51,55,60,62],them:[50,59],theori:49,therebi:50,therefor:[33,59],thi:[1,2,31,33,40,41,42,43,44,45,49,50,51,52,53,55,59,60,61,62],think:55,third_parti:[53,60],those:49,though:[53,55,59,62],three:[44,45,52,53],threshold:62,thrid_parti:60,through:[49,50,56,59],time:[44,49,51,52,53,55,58,59,61,62],tini:61,tmp:59,tocustomclass:55,todim:59,togeth:[49,55,59],too:60,tool:[55,59],top:53,torch:[1,2,4,31,32,33,35,36,42,43,44,50,51,54,55,58,59,61,62],torch_scirpt_modul:59,torch_script_modul:59,torchscript:[32,35,36,52,53,58,62],toronto:61,tovec:59,toward:61,trace:[58,59],traced_model:59,track:[55,61],tradit:61,traget:36,train:[31,33,44,56,59,62],trainabl:51,transform:[59,61],translat:59,travers:[52,53],treat:62,tree:[43,61],trigger:59,trim:61,trt:[1,2,3,4,44,49,50,51,55,59],trt_mod:[59,61],trt_ts_modul:59,trtorch:[0,1,2,3,4,15,17,22,40,41,42,44,45,47,48,49,50,51,52,53,60,61,62],trtorch_api:[19,23,24,25,26,27,28,29,30,31,32,33,34,35,36,40,41,43,46,47],trtorch_check:55,trtorch_hidden:[19,41,46,47],trtorch_major_vers:[19,41,46,47],trtorch_minor_vers:[19,41,46,47],trtorch_patch_vers:[19,41,46,47],trtorch_unus:55,trtorch_vers:[19,41,46,47],trtorchc:56,trtorchfil:[22,46],trtorchnamespac:[0,46],tupl:[58,59],tupleconstruct:51,tupleunpack:51,tutori:[59,61],two:[55,59,60,61,62],type:[1,2,31,45,46,47,49,50,55,57,58,59,61,62],typenam:[3,4,31,33,42,43],typic:[49,55],uint64_t:[43,44],unabl:[55,59],uncom:60,under:[43,53],underli:[1,2,44,55],union:[55,59],uniqu:4,unique_ptr:[4,31],unlik:56,unpack_addmm:51,unpack_log_softmax:51,unqiue_ptr:4,unstabl:53,unsupport:[35,58],unsur:55,untest:53,until:[49,53,55],unwrap:55,unwraptodoubl:55,unwraptoint:59,upstream:59,url:60,use:[1,2,3,4,31,33,44,49,50,53,55,57,58,59,60,61,62],use_cach:[3,4,31,42,43],use_cache_:42,use_subset:61,used:[1,2,3,4,42,44,45,49,50,51,55,57,58,59,61,62],useful:55,user:[40,52,53,59,60,61],uses:[31,33,42,55,61],using:[1,2,32,36,42,44,55,56,58,59,61,62],using_int:59,usr:60,util:[55,59],valid:[2,44,55],valu:[1,2,17,43,44,49,50,55,59],value_tensor_map:[49,55],vector:[20,21,42,43,44,45,59,61],verbios:62,verbos:62,veri:61,version:[30,34,53,60],vgg16:61,via:[56,58],virtual:61,wai:[59,62],want:[40,44,59],warn:[17,42,55,57,62],websit:60,weight:[49,59],welcom:59,well:[59,61],were:59,what:[4,51,59],whatev:50,when:[26,42,44,49,50,51,52,53,55,57,58,59,60,61,62],where:[49,51,55,59,61],whether:[4,61],which:[2,32,33,36,44,49,50,51,52,53,55,58,59,61],whl:60,whose:51,within:[50,52,53],without:[55,59,61],work:[42,51,53,55,61],worker:61,workspac:[44,58,60,61,62],workspace_s:[43,44,58,61,62],would:[55,59,60,62],wrap:[52,53,59],wrapper:55,write:[3,4,31,33,42,49,56,59,61],writecalibrationcach:[3,4,42],www:[59,60,61],x86_64:[53,60],xstr:[19,41,46,47],yaml:54,you:[1,2,31,33,44,49,50,51,53,55,56,58,59,60,61,62],your:[55,56,59,60],yourself:59,zisserman:61},titles:["Class Hierarchy","Class ExtraInfo::DataType","Class ExtraInfo::DeviceType","Template Class Int8CacheCalibrator","Template Class Int8Calibrator","Define STR","Define TRTORCH_API","Define TRTORCH_HIDDEN","Define TRTORCH_MAJOR_VERSION","Define TRTORCH_PATCH_VERSION","Define TRTORCH_VERSION","Define XSTR","Define TRTORCH_MINOR_VERSION","Directory cpp","Directory api","Directory include","Directory trtorch","Enum Level","File logging.h","File macros.h","File ptq.h","File trtorch.h","File Hierarchy","Function trtorch::logging::get_reportable_log_level","Function trtorch::logging::set_logging_prefix","Function trtorch::logging::get_is_colored_output_on","Function trtorch::logging::set_is_colored_output_on","Function trtorch::logging::log","Function trtorch::logging::set_reportable_log_level","Function trtorch::logging::get_logging_prefix","Function trtorch::get_build_info","Template Function trtorch::ptq::make_int8_calibrator","Function trtorch::ConvertGraphToTRTEngine","Template Function trtorch::ptq::make_int8_cache_calibrator","Function trtorch::dump_build_info","Function trtorch::CheckMethodOperatorSupport","Function trtorch::CompileGraph","Namespace trtorch","Namespace trtorch::logging","Namespace trtorch::ptq","Program Listing for File logging.h","Program Listing for File macros.h","Program Listing for File ptq.h","Program Listing for File trtorch.h","Struct ExtraInfo","Struct ExtraInfo::InputRange","TRTorch C++ API","Full API","Full API","Conversion Phase","Execution Phase","Lowering Phase","Compiler Phases","System Overview","Useful Links for TRTorch Development","Writing Converters","TRTorch","trtorch.logging","trtorch","Getting Started","Installation","Post Training Quantization (PTQ)","trtorchc"],titleterms:{"class":[0,1,2,3,4,20,21,37,39,46,47],"enum":[17,18,38,46,47,58],"function":[18,21,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,46,47,54,58],The:59,Used:51,Useful:54,addmm:51,advic:55,ahead:56,api:[14,18,19,20,21,46,47,48,54,56],applic:61,arg:55,avail:54,background:[50,55],base:[3,4],binari:60,build:60,checkmethodoperatorsupport:35,citat:61,code:51,compil:[52,53,56,59,60],compilegraph:36,construct:50,content:[18,19,20,21,37,38,39],context:55,contigu:51,contract:55,contributor:56,convers:[49,52,53,55],convert:[49,55,59],convertgraphtotrtengin:32,cpp:[13,18,19,20,21],creat:[59,61],cudnn:60,custom:59,datatyp:1,dead:51,debug:60,defin:[5,6,7,8,9,10,11,12,19,46,47],definit:[18,19,20,21],depend:60,develop:54,devicetyp:2,dimens:54,directori:[13,14,15,16,48],distribut:60,documen:56,document:[1,2,3,4,5,6,7,8,9,10,11,12,17,23,24,25,26,27,28,29,30,31,32,33,34,35,36,44,45,54,56],dropout:51,dump_build_info:34,easier:54,elimin:51,engin:50,evalu:49,execept:51,execut:[50,52,53],executor:50,expect:54,extrainfo:[1,2,44,45],file:[16,18,19,20,21,22,40,41,42,43,46,48],flatten:51,freez:51,from:60,full:[46,47,48],fuse:51,gaurd:51,get:[56,59],get_build_info:30,get_is_colored_output_on:25,get_logging_prefix:29,get_reportable_log_level:23,gpu:56,graph:[50,51],guarante:55,hierarchi:[0,22,46],hood:59,how:61,includ:[15,18,19,20,21],indic:56,inherit:[3,4],inputrang:45,instal:60,int8cachecalibr:3,int8calibr:4,jit:56,layer:54,level:17,linear:51,link:54,list:[40,41,42,43],local:60,log:[18,23,24,25,26,27,28,29,38,40,57],logsoftmax:51,lower:[51,52,53],macro:[19,41],make_int8_cache_calibr:33,make_int8_calibr:31,modul:[51,59],namespac:[18,20,21,37,38,39,46,47],native_op:54,nest:[1,2,44,45],node:49,nvidia:56,oper:59,other:55,overview:53,own:61,packag:60,pass:51,pattern:51,phase:[49,50,51,52,53],post:61,program:[40,41,42,43],ptq:[20,31,33,39,42,61],python:[54,56,59,60],pytorch:[54,56],quantiz:61,read:54,redund:51,regist:59,relationship:[1,2,3,4,44,45],remov:51,respons:55,result:50,set_is_colored_output_on:26,set_logging_prefix:24,set_reportable_log_level:28,sometim:54,sourc:60,start:[56,59],str:5,struct:[44,45,46,47],subdirectori:[13,14,15],submodul:58,system:53,tarbal:60,templat:[3,4,31,33],tensorrt:[50,54,60],time:56,torchscript:[56,59],train:61,trtorch:[16,18,19,20,21,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,43,46,54,56,57,58,59],trtorch_api:6,trtorch_hidden:7,trtorch_major_vers:8,trtorch_minor_vers:12,trtorch_patch_vers:9,trtorch_vers:10,trtorchc:62,tupl:51,type:[3,4,44],under:59,unpack:51,unsupport:59,using:60,weight:55,what:55,work:59,write:55,xstr:11,your:61}}) \ No newline at end of file diff --git a/docs/sitemap.xml b/docs/sitemap.xml index d09258827a..4670406c27 100644 --- a/docs/sitemap.xml +++ b/docs/sitemap.xml @@ -1 +1 @@ -<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/class_view_hierarchy.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DeviceType.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a20c1fbeb21757871c52299dc52351b5f.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a25ee153c325dfc7466a33cbd5c1ff055.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a48d6029a45583a06848891cb0e86f7ba.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a71b02dddfabe869498ad5a88e11c440f.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a9d31d0569348d109b1b069b972dd143e.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1ae1c56ab8a40af292a9a4964651524d84.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/dir_cpp.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/dir_cpp_api.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/dir_cpp_api_include.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/dir_cpp_api_include_trtorch.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/enum_logging_8h_1a5f612ff2f783ff4fbe89d168f0d817d4.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/file_cpp_api_include_trtorch_logging.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/file_cpp_api_include_trtorch_macros.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/file_cpp_api_include_trtorch_ptq.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/file_cpp_api_include_trtorch_trtorch.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/file_view_hierarchy.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1a118d65b179defff7fff279eb9cd126cb.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1a396a688110397538f8b3fb7dfdaf38bb.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1a9b420280bfacc016d7e36a5704021949.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1aa533955a2b908db9e5df5acdfa24715f.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1abc57d473f3af292551dee8b9c78373ad.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1adf5435f0dbb09c0d931a1b851847236b.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1aef44b69c62af7cf2edc8875a9506641a.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1a2cf17d43ba9117b3b4d652744b4f0447.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1a4422781719d7befedb364cacd91c6247.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1a536bba54b70e44554099d23fa3d7e804.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1a5f33b142bc2f3f2aaf462270b3ad7e31.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1a726f6e7091b6b7be45b5a4275b2ffb10.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1ab01696cfe08b6a5293c55935a9713c25.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1ae38897d1ca4438227c970029d0f76fb5.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/namespace_trtorch.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/namespace_trtorch__logging.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/namespace_trtorch__ptq.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/program_listing_file_cpp_api_include_trtorch_logging.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/program_listing_file_cpp_api_include_trtorch_macros.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/structtrtorch_1_1ExtraInfo.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/structtrtorch_1_1ExtraInfo_1_1InputRange.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/trtorch_cpp.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/unabridged_api.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/unabridged_orphan.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/contributors/writing_converters.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/index.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/genindex.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/py-modindex.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/search.html</loc></url></urlset> \ No newline at end of file +<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/class_view_hierarchy.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DataType.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/classtrtorch_1_1ExtraInfo_1_1DeviceType.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a18d295a837ac71add5578860b55e5502.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a20c1fbeb21757871c52299dc52351b5f.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a25ee153c325dfc7466a33cbd5c1ff055.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a48d6029a45583a06848891cb0e86f7ba.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a71b02dddfabe869498ad5a88e11c440f.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1a9d31d0569348d109b1b069b972dd143e.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1abe87b341f562fd1cf40b7672e4d759da.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/define_macros_8h_1ae1c56ab8a40af292a9a4964651524d84.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/dir_cpp.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/dir_cpp_api.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/dir_cpp_api_include.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/dir_cpp_api_include_trtorch.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/enum_logging_8h_1a5f612ff2f783ff4fbe89d168f0d817d4.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/file_cpp_api_include_trtorch_logging.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/file_cpp_api_include_trtorch_macros.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/file_cpp_api_include_trtorch_ptq.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/file_cpp_api_include_trtorch_trtorch.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/file_view_hierarchy.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1a118d65b179defff7fff279eb9cd126cb.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1a396a688110397538f8b3fb7dfdaf38bb.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1a9b420280bfacc016d7e36a5704021949.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1aa533955a2b908db9e5df5acdfa24715f.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1abc57d473f3af292551dee8b9c78373ad.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1adf5435f0dbb09c0d931a1b851847236b.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_logging_8h_1aef44b69c62af7cf2edc8875a9506641a.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1a2cf17d43ba9117b3b4d652744b4f0447.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1a4422781719d7befedb364cacd91c6247.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1a536bba54b70e44554099d23fa3d7e804.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1a5f33b142bc2f3f2aaf462270b3ad7e31.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1a726f6e7091b6b7be45b5a4275b2ffb10.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1ab01696cfe08b6a5293c55935a9713c25.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/function_trtorch_8h_1ae38897d1ca4438227c970029d0f76fb5.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/namespace_trtorch.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/namespace_trtorch__logging.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/namespace_trtorch__ptq.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/program_listing_file_cpp_api_include_trtorch_logging.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/program_listing_file_cpp_api_include_trtorch_macros.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/program_listing_file_cpp_api_include_trtorch_ptq.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/program_listing_file_cpp_api_include_trtorch_trtorch.h.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/structtrtorch_1_1ExtraInfo.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/structtrtorch_1_1ExtraInfo_1_1InputRange.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/trtorch_cpp.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/unabridged_api.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/_cpp_api/unabridged_orphan.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/contributors/conversion.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/contributors/execution.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/contributors/lowering.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/contributors/phases.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/contributors/system_overview.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/contributors/useful_links.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/contributors/writing_converters.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/index.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/py_api/logging.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/py_api/trtorch.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/tutorials/getting_started.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/tutorials/installation.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/tutorials/ptq.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/tutorials/trtorchc.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/genindex.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/py-modindex.html</loc></url><url><loc>https://nvidia.github.io/TRTorch/search.html</loc></url></urlset> \ No newline at end of file diff --git a/docs/tutorials/getting_started.html b/docs/tutorials/getting_started.html index c4836fe36f..9b52d08888 100644 --- a/docs/tutorials/getting_started.html +++ b/docs/tutorials/getting_started.html @@ -365,6 +365,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -878,7 +883,14 @@ <h2 id="compiling-with-trtorch-in-python"> </span> </code> objects or dictionaries of minimum, optimial and maximum sizes. You can also specify settings such as -operating precision for the engine or target device. +operating precision for the engine or target device. After compilation you can save the module just like any other module +to load in a deployment application. In order to load a TensorRT/TorchScript module, make sure you first import + <code class="docutils literal notranslate"> + <span class="pre"> + trtorch + </span> + </code> + . </p> <div class="highlight-python notranslate"> <div class="highlight"> @@ -898,6 +910,19 @@ <h2 id="compiling-with-trtorch-in-python"> <span class="n">trt_ts_module</span> <span class="o">=</span> <span class="n">trtorch</span><span class="o">.</span><span class="n">compile</span><span class="p">(</span><span class="n">torch_script_module</span><span class="p">,</span> <span class="n">compile_settings</span><span class="p">)</span> +<span class="n">input_data</span> <span class="o">=</span> <span class="n">input_data</span><span class="o">.</span><span class="n">half</span><span class="p">()</span> +<span class="n">result</span> <span class="o">=</span> <span class="n">trt_ts_module</span><span class="p">(</span><span class="n">input_data</span><span class="p">)</span> +<span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">save</span><span class="p">(</span><span class="n">trt_ts_module</span><span class="p">,</span> <span class="s2">"trt_ts_module.ts"</span><span class="p">)</span> +</pre> + </div> + </div> + <div class="highlight-python notranslate"> + <div class="highlight"> + <pre><span></span><span class="c1"># Deployment application</span> +<span class="kn">import</span> <span class="nn">torch</span> +<span class="kn">import</span> <span class="nn">trtorch</span> + +<span class="n">trt_ts_module</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">load</span><span class="p">(</span><span class="s2">"trt_ts_module.ts"</span><span class="p">)</span> <span class="n">input_data</span> <span class="o">=</span> <span class="n">input_data</span><span class="o">.</span><span class="n">half</span><span class="p">()</span> <span class="n">result</span> <span class="o">=</span> <span class="n">trt_ts_module</span><span class="p">(</span><span class="n">input_data</span><span class="p">)</span> </pre> @@ -1044,8 +1069,46 @@ <h2 id="compiling-with-trtorch-in-c"> </div> </div> <p> - And now we are running the module in FP16 precision. + And now we are running the module in FP16 precision. You can then save the module to load later. + </p> + <div class="highlight-c++ notranslate"> + <div class="highlight"> + <pre><span></span><span class="n">trt_mod</span><span class="p">.</span><span class="n">save</span><span class="p">(</span><span class="s">"&lt;PATH TO SAVED TRT/TS MOD&gt;"</span><span class="p">)</span> +</pre> + </div> + </div> + <p> + TRTorch compiled TorchScript modules are loaded in the same way as normal TorchScript module. Make sure your deployment application is linked against + <code class="docutils literal notranslate"> + <span class="pre"> + libtrtorch.so + </span> + </code> </p> + <div class="highlight-c++ notranslate"> + <div class="highlight"> + <pre><span></span><span class="cp">#include</span> <span class="cpf">"torch/script.h"</span><span class="cp"></span> +<span class="cp">#include</span> <span class="cpf">"trtorch/trtorch.h"</span><span class="cp"></span> + +<span class="kt">int</span> <span class="nf">main</span><span class="p">(</span><span class="kt">int</span> <span class="n">argc</span><span class="p">,</span> <span class="k">const</span> <span class="kt">char</span><span class="o">*</span> <span class="n">argv</span><span class="p">[])</span> <span class="p">{</span> + <span class="n">torch</span><span class="o">::</span><span class="n">jit</span><span class="o">::</span><span class="n">Module</span> <span class="k">module</span><span class="p">;</span> + <span class="k">try</span> <span class="p">{</span> + <span class="c1">// Deserialize the ScriptModule from a file using torch::jit::load().</span> + <span class="k">module</span> <span class="o">=</span> <span class="n">torch</span><span class="o">::</span><span class="n">jit</span><span class="o">::</span><span class="n">load</span><span class="p">(</span><span class="s">"&lt;PATH TO SAVED TRT/TS MOD&gt;"</span><span class="p">);</span> + <span class="p">}</span> + <span class="k">catch</span> <span class="p">(</span><span class="k">const</span> <span class="n">c10</span><span class="o">::</span><span class="n">Error</span><span class="o">&amp;</span> <span class="n">e</span><span class="p">)</span> <span class="p">{</span> + <span class="n">std</span><span class="o">::</span><span class="n">cerr</span> <span class="o">&lt;&lt;</span> <span class="s">"error loading the model</span><span class="se">\n</span><span class="s">"</span><span class="p">;</span> + <span class="k">return</span> <span class="o">-</span><span class="mi">1</span><span class="p">;</span> + <span class="p">}</span> + + <span class="n">torch</span><span class="o">::</span><span class="n">Tensor</span> <span class="n">in</span> <span class="o">=</span> <span class="n">torch</span><span class="o">::</span><span class="n">randn</span><span class="p">({</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">32</span><span class="p">,</span> <span class="mi">32</span><span class="p">},</span> <span class="n">torch</span><span class="o">::</span><span class="n">kCUDA</span><span class="p">);</span> + <span class="k">auto</span> <span class="n">out</span> <span class="o">=</span> <span class="n">mod</span><span class="p">.</span><span class="n">forward</span><span class="p">(</span><span class="n">in</span><span class="p">);</span> + + <span class="n">std</span><span class="o">::</span><span class="n">cout</span> <span class="o">&lt;&lt;</span> <span class="s">"ok</span><span class="se">\n</span><span class="s">"</span><span class="p">;</span> +<span class="p">}</span> +</pre> + </div> + </div> <p> If you want to save the engine produced by TRTorch to use in a TensorRT application you can use the <code class="docutils literal notranslate"> diff --git a/docs/tutorials/installation.html b/docs/tutorials/installation.html index 8f22ca897c..f976eeedea 100644 --- a/docs/tutorials/installation.html +++ b/docs/tutorials/installation.html @@ -382,6 +382,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> diff --git a/docs/tutorials/ptq.html b/docs/tutorials/ptq.html index 91ece9207f..5089f8c9df 100644 --- a/docs/tutorials/ptq.html +++ b/docs/tutorials/ptq.html @@ -56,7 +56,7 @@ </script> <link href="../genindex.html" rel="index" title="Index"/> <link href="../search.html" rel="search" title="Search"/> - <link href="../contributors/system_overview.html" rel="next" title="System Overview"/> + <link href="trtorchc.html" rel="next" title="trtorchc"/> <link href="getting_started.html" rel="prev" title="Getting Started"/> </head> <body data-md-color-accent="light-green" data-md-color-primary="light-green" dir="ltr"> @@ -335,6 +335,11 @@ </li> </ul> </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="trtorchc.html"> + trtorchc + </a> + </li> <li class="md-nav__item"> <span class="md-nav__link caption"> <span class="caption-text"> @@ -866,13 +871,13 @@ <h3 id="citations"> </span> </div> </a> - <a class="md-flex md-footer-nav__link md-footer-nav__link--next" href="../contributors/system_overview.html" rel="next" title="System Overview"> + <a class="md-flex md-footer-nav__link md-footer-nav__link--next" href="trtorchc.html" rel="next" title="trtorchc"> <div class="md-flex__cell md-flex__cell--stretch md-footer-nav__title"> <span class="md-flex__ellipsis"> <span class="md-footer-nav__direction"> Next </span> - System Overview + trtorchc </span> </div> <div class="md-flex__cell md-flex__cell--shrink"> diff --git a/docs/tutorials/trtorchc.html b/docs/tutorials/trtorchc.html new file mode 100644 index 0000000000..148d344c22 --- /dev/null +++ b/docs/tutorials/trtorchc.html @@ -0,0 +1,683 @@ +<!DOCTYPE html> +<html> + <head> + <meta charset="utf-8"/> + <meta content="width=device-width,initial-scale=1" name="viewport"/> + <meta content="ie=edge" http-equiv="x-ua-compatible"/> + <meta content="Copy to clipboard" name="lang:clipboard.copy"/> + <meta content="Copied to clipboard" name="lang:clipboard.copied"/> + <meta content="en" name="lang:search.language"/> + <meta content="True" name="lang:search.pipeline.stopwords"/> + <meta content="True" name="lang:search.pipeline.trimmer"/> + <meta content="No matching documents" name="lang:search.result.none"/> + <meta content="1 matching document" name="lang:search.result.one"/> + <meta content="# matching documents" name="lang:search.result.other"/> + <meta content="[\s\-]+" name="lang:search.tokenizer"/> + <link crossorigin="" href="https://fonts.gstatic.com/" rel="preconnect"/> + <link href="https://fonts.googleapis.com/css?family=Roboto+Mono:400,500,700|Roboto:300,400,400i,700&amp;display=fallback" rel="stylesheet"/> + <style> + body, + input { + font-family: "Roboto", "Helvetica Neue", Helvetica, Arial, sans-serif + } + + code, + kbd, + pre { + font-family: "Roboto Mono", "Courier New", Courier, monospace + } + </style> + <link href="../_static/stylesheets/application.css" rel="stylesheet"/> + <link href="../_static/stylesheets/application-palette.css" rel="stylesheet"/> + <link href="../_static/stylesheets/application-fixes.css" rel="stylesheet"/> + <link href="../_static/fonts/material-icons.css" rel="stylesheet"/> + <meta content="84bd00" name="theme-color"/> + <script src="../_static/javascripts/modernizr.js"> + </script> + <title> + trtorchc — TRTorch 0.0.2 documentation + </title> + <link href="../_static/material.css" rel="stylesheet" type="text/css"/> + <link href="../_static/pygments.css" rel="stylesheet" type="text/css"/> + <link href="../_static/collapsible-lists/css/tree_view.css" rel="stylesheet" type="text/css"/> + <script data-url_root="../" id="documentation_options" src="../_static/documentation_options.js"> + </script> + <script src="../_static/jquery.js"> + </script> + <script src="../_static/underscore.js"> + </script> + <script src="../_static/doctools.js"> + </script> + <script src="../_static/language_data.js"> + </script> + <script src="../_static/collapsible-lists/js/CollapsibleLists.compressed.js"> + </script> + <script src="../_static/collapsible-lists/js/apply-collapsible-lists.js"> + </script> + <link href="../genindex.html" rel="index" title="Index"/> + <link href="../search.html" rel="search" title="Search"/> + <link href="../contributors/system_overview.html" rel="next" title="System Overview"/> + <link href="ptq.html" rel="prev" title="Post Training Quantization (PTQ)"/> + </head> + <body data-md-color-accent="light-green" data-md-color-primary="light-green" dir="ltr"> + <svg class="md-svg"> + <defs data-children-count="0"> + <svg height="448" id="__github" viewbox="0 0 416 448" width="416" xmlns="http://www.w3.org/2000/svg"> + <path d="M160 304q0 10-3.125 20.5t-10.75 19T128 352t-18.125-8.5-10.75-19T96 304t3.125-20.5 10.75-19T128 256t18.125 8.5 10.75 19T160 304zm160 0q0 10-3.125 20.5t-10.75 19T288 352t-18.125-8.5-10.75-19T256 304t3.125-20.5 10.75-19T288 256t18.125 8.5 10.75 19T320 304zm40 0q0-30-17.25-51T296 232q-10.25 0-48.75 5.25Q229.5 240 208 240t-39.25-2.75Q130.75 232 120 232q-29.5 0-46.75 21T56 304q0 22 8 38.375t20.25 25.75 30.5 15 35 7.375 37.25 1.75h42q20.5 0 37.25-1.75t35-7.375 30.5-15 20.25-25.75T360 304zm56-44q0 51.75-15.25 82.75-9.5 19.25-26.375 33.25t-35.25 21.5-42.5 11.875-42.875 5.5T212 416q-19.5 0-35.5-.75t-36.875-3.125-38.125-7.5-34.25-12.875T37 371.5t-21.5-28.75Q0 312 0 260q0-59.25 34-99-6.75-20.5-6.75-42.5 0-29 12.75-54.5 27 0 47.5 9.875t47.25 30.875Q171.5 96 212 96q37 0 70 8 26.25-20.5 46.75-30.25T376 64q12.75 25.5 12.75 54.5 0 21.75-6.75 42 34 40 34 99.5z" fill="currentColor"> + </path> + </svg> + </defs> + </svg> + <input class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/> + <input class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/> + <label class="md-overlay" data-md-component="overlay" for="__drawer"> + </label> + <a class="md-skip" href="#tutorials/trtorchc" tabindex="1"> + Skip to content + </a> + <header class="md-header" data-md-component="header"> + <nav class="md-header-nav md-grid"> + <div class="md-flex navheader"> + <div class="md-flex__cell md-flex__cell--shrink"> + <a class="md-header-nav__button md-logo" href="../index.html" title="TRTorch 0.0.2 documentation"> + <i class="md-icon"> +  + </i> + </a> + </div> + <div class="md-flex__cell md-flex__cell--shrink"> + <label class="md-icon md-icon--menu md-header-nav__button" for="__drawer"> + </label> + </div> + <div class="md-flex__cell md-flex__cell--stretch"> + <div class="md-flex__ellipsis md-header-nav__title" data-md-component="title"> + <span class="md-header-nav__topic"> + TRTorch + </span> + <span class="md-header-nav__topic"> + trtorchc + </span> + </div> + </div> + <div class="md-flex__cell md-flex__cell--shrink"> + <label class="md-icon md-icon--search md-header-nav__button" for="__search"> + </label> + <div class="md-search" data-md-component="search" role="dialog"> + <label class="md-search__overlay" for="__search"> + </label> + <div class="md-search__inner" role="search"> + <form action="../search.html" class="md-search__form" method="GET" name="search"> + <input autocapitalize="off" autocomplete="off" class="md-search__input" data-md-component="query" data-md-state="active" name="q" placeholder="Search" spellcheck="false" type="text"/> + <label class="md-icon md-search__icon" for="__search"> + </label> + <button class="md-icon md-search__icon" data-md-component="reset" tabindex="-1" type="reset"> +  + </button> + </form> + <div class="md-search__output"> + <div class="md-search__scrollwrap" data-md-scrollfix=""> + <div class="md-search-result" data-md-component="result"> + <div class="md-search-result__meta"> + Type to start searching + </div> + <ol class="md-search-result__list"> + </ol> + </div> + </div> + </div> + </div> + </div> + </div> + <div class="md-flex__cell md-flex__cell--shrink"> + <div class="md-header-nav__source"> + <a class="md-source" data-md-source="github" href="https://github.com/nvidia/TRTorch/" title="Go to repository"> + <div class="md-source__icon"> + <svg height="28" viewbox="0 0 24 24" width="28" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"> + <use height="24" width="24" xlink:href="#__github"> + </use> + </svg> + </div> + <div class="md-source__repository"> + TRTorch + </div> + </a> + </div> + </div> + <div class="md-flex__cell md-flex__cell--shrink dropdown"> + <button class="dropdownbutton"> + Versions + </button> + <div class="dropdown-content md-hero"> + <a href="https://nvidia.github.io/TRTorch/" title="master"> + master + </a> + <a href="https://nvidia.github.io/TRTorch/v0.0.2/" title="v0.0.2"> + v0.0.2 + </a> + <a href="https://nvidia.github.io/TRTorch/v0.0.1/" title="v0.0.1"> + v0.0.1 + </a> + </div> + </div> + </div> + </nav> + </header> + <div class="md-container"> + <nav class="md-tabs" data-md-component="tabs"> + <div class="md-tabs__inner md-grid"> + <ul class="md-tabs__list"> + <li class="md-tabs__item"> + <a class="md-tabs__link" href="../index.html"> + TRTorch 0.0.2 documentation + </a> + </li> + </ul> + </div> + </nav> + <main class="md-main"> + <div class="md-main__inner md-grid" data-md-component="container"> + <div class="md-sidebar md-sidebar--primary" data-md-component="navigation"> + <div class="md-sidebar__scrollwrap"> + <div class="md-sidebar__inner"> + <nav class="md-nav md-nav--primary" data-md-level="0"> + <label class="md-nav__title md-nav__title--site" for="__drawer"> + <a class="md-nav__button md-logo" href="../index.html" title="TRTorch 0.0.2 documentation"> + <i class="md-icon"> +  + </i> + </a> + <a href="../index.html" title="TRTorch 0.0.2 documentation"> + TRTorch + </a> + </label> + <div class="md-nav__source"> + <a class="md-source" data-md-source="github" href="https://github.com/nvidia/TRTorch/" title="Go to repository"> + <div class="md-source__icon"> + <svg height="28" viewbox="0 0 24 24" width="28" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"> + <use height="24" width="24" xlink:href="#__github"> + </use> + </svg> + </div> + <div class="md-source__repository"> + TRTorch + </div> + </a> + </div> + <ul class="md-nav__list"> + <li class="md-nav__item"> + <span class="md-nav__link caption"> + <span class="caption-text"> + Getting Started + </span> + </span> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="installation.html"> + Installation + </a> + <ul class="md-nav__list"> + <li class="md-nav__item"> + <a class="md-nav__link" href="installation.html#dependencies"> + Dependencies + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="installation.html#dependencies-for-compilation"> + Dependencies for Compilation + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="installation.html#building-using-cudnn-tensorrt-tarball-distributions"> + <strong> + Building using cuDNN &amp; TensorRT tarball distributions + </strong> + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="installation.html#building-using-locally-installed-cudnn-tensorrt"> + <strong> + Building using locally installed cuDNN &amp; TensorRT + </strong> + </a> + </li> + </ul> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="getting_started.html"> + Getting Started + </a> + <ul class="md-nav__list"> + <li class="md-nav__item"> + <a class="md-nav__link" href="getting_started.html#creating-a-torchscript-module"> + Creating a TorchScript Module + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="getting_started.html#working-with-torchscript-in-python"> + Working with TorchScript in Python + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="getting_started.html#compiling-with-trtorch-in-python"> + Compiling with TRTorch in Python + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="getting_started.html#working-with-torchscript-in-c"> + Working with TorchScript in C++ + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="getting_started.html#compiling-with-trtorch-in-c"> + Compiling with TRTorch in C++ + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="getting_started.html#under-the-hood"> + Under The Hood + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="getting_started.html#working-with-unsupported-operators"> + Working with Unsupported Operators + </a> + </li> + </ul> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="ptq.html"> + Post Training Quantization (PTQ) + </a> + <ul class="md-nav__list"> + <li class="md-nav__item"> + <a class="md-nav__link" href="ptq.html#how-to-create-your-own-ptq-application"> + How to create your own PTQ application + </a> + </li> + </ul> + </li> + <li class="md-nav__item"> + <input class="md-toggle md-nav__toggle" data-md-toggle="toc" id="__toc" type="checkbox"/> + <label class="md-nav__link md-nav__link--active" for="__toc"> + trtorchc + </label> + <a class="md-nav__link md-nav__link--active" href="#"> + trtorchc + </a> + <nav class="md-nav md-nav--secondary"> + <ul class="md-nav__list" data-md-scrollfix=""> + <li class="md-nav__item"> + <a class="md-nav__extra_link" href="../_sources/tutorials/trtorchc.rst.txt"> + Show Source + </a> + </li> + </ul> + </nav> + </li> + <li class="md-nav__item"> + <span class="md-nav__link caption"> + <span class="caption-text"> + Contributor Documentation + </span> + </span> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/system_overview.html"> + System Overview + </a> + <ul class="md-nav__list"> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/system_overview.html#compiler-phases"> + Compiler Phases + </a> + </li> + </ul> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/writing_converters.html"> + Writing Converters + </a> + <ul class="md-nav__list"> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/writing_converters.html#background"> + Background + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/writing_converters.html#converters"> + Converters + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/writing_converters.html#converter-contract"> + Converter Contract + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/writing_converters.html#conversion-context"> + Conversion Context + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/writing_converters.html#args"> + Args + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/writing_converters.html#weights"> + Weights + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/writing_converters.html#other-advice"> + Other advice + </a> + </li> + </ul> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/useful_links.html"> + Useful Links for TRTorch Development + </a> + <ul class="md-nav__list"> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/useful_links.html#tensorrt-available-layers-and-expected-dimensions"> + TensorRT Available Layers and Expected Dimensions: + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/useful_links.html#tensorrt-c-documentation"> + TensorRT C++ Documentation: + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/useful_links.html#tensorrt-python-documentation-sometimes-easier-to-read"> + TensorRT Python Documentation (Sometimes easier to read): + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/useful_links.html#pytorch-functional-api"> + PyTorch Functional API: + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/useful_links.html#pytorch-native-ops"> + PyTorch native_ops: + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../contributors/useful_links.html#pytorch-ir-documentation"> + PyTorch IR Documentation: + </a> + </li> + </ul> + </li> + <li class="md-nav__item"> + <span class="md-nav__link caption"> + <span class="caption-text"> + Python API Documenation + </span> + </span> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../py_api/trtorch.html"> + trtorch + </a> + <ul class="md-nav__list"> + <li class="md-nav__item"> + <a class="md-nav__link" href="../py_api/trtorch.html#functions"> + Functions + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../py_api/trtorch.html#enums"> + Enums + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../py_api/trtorch.html#submodules"> + Submodules + </a> + </li> + </ul> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../py_api/logging.html"> + trtorch.logging + </a> + </li> + <li class="md-nav__item"> + <span class="md-nav__link caption"> + <span class="caption-text"> + C++ API Documenation + </span> + </span> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../_cpp_api/trtorch_cpp.html"> + TRTorch C++ API + </a> + <ul class="md-nav__list"> + <li class="md-nav__item"> + <a class="md-nav__link" href="../_cpp_api/trtorch_cpp.html#class-hierarchy"> + Class Hierarchy + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../_cpp_api/trtorch_cpp.html#file-hierarchy"> + File Hierarchy + </a> + </li> + <li class="md-nav__item"> + <a class="md-nav__link" href="../_cpp_api/trtorch_cpp.html#full-api"> + Full API + </a> + </li> + </ul> + </li> + </ul> + </nav> + </div> + </div> + </div> + <div class="md-sidebar md-sidebar--secondary" data-md-component="toc"> + <div class="md-sidebar__scrollwrap"> + <div class="md-sidebar__inner"> + <nav class="md-nav md-nav--secondary"> + <ul class="md-nav__list" data-md-scrollfix=""> + <li class="md-nav__item"> + <a class="md-nav__extra_link" href="../_sources/tutorials/trtorchc.rst.txt"> + Show Source + </a> + </li> + <li class="md-nav__item" id="searchbox"> + </li> + </ul> + </nav> + </div> + </div> + </div> + <div class="md-content"> + <article class="md-content__inner md-typeset" role="main"> + <span id="id1"> + </span> + <h1 id="tutorials-trtorchc--page-root"> + trtorchc + <a class="headerlink" href="#tutorials-trtorchc--page-root" title="Permalink to this headline"> + ¶ + </a> + </h1> + <p> + <code class="docutils literal notranslate"> + <span class="pre"> + trtorchc + </span> + </code> + is a CLI application for using the TRTorch compiler. It serves as an easy way to compile a +TorchScript Module with TRTorch from the command-line to quickly check support or as part of +a deployment pipeline. All basic features of the compiler are supported including post training +quantization (though you must already have a calibration cache file to use the PTQ feature). The compiler can +output two formats, either a TorchScript program with the TensorRT engine embedded or +the TensorRT engine itself as a PLAN file. + </p> + <p> + All that is required to run the program after compilation is for C++ linking against + <code class="docutils literal notranslate"> + <span class="pre"> + libtrtorch.so + </span> + </code> + or in Python importing the trtorch package. All other aspects of using compiled modules are identical +to standard TorchScript. Load with + <code class="docutils literal notranslate"> + <span class="pre"> + torch.jit.load() + </span> + </code> + and run like you would run any other module. + </p> + <div class="highlight-txt notranslate"> + <div class="highlight"> + <pre><span></span>trtorchc [input_file_path] [output_file_path] + [input_shapes...] {OPTIONS} + + TRTorch is a compiler for TorchScript, it will compile and optimize + TorchScript programs to run on NVIDIA GPUs using TensorRT + +OPTIONS: + + -h, --help Display this help menu + Verbiosity of the compiler + -v, --verbose Dumps debugging information about the + compilation process onto the console + -w, --warnings Disables warnings generated during + compilation onto the console (warnings + are on by default) + --info Dumps info messages generated during + compilation onto the console + --build-debuggable-engine Creates a debuggable engine + --use-strict-types Restrict operating type to only use set + default operation precision + (op_precision) + --allow-gpu-fallback (Only used when targeting DLA + (device-type)) Lets engine run layers on + GPU if they are not supported on DLA + -p[precision], + --default-op-precision=[precision] + Default operating precision for the + engine (Int8 requires a + calibration-cache argument) [ float | + float32 | f32 | half | float16 | f16 | + int8 | i8 ] (default: float) + -d[type], --device-type=[type] The type of device the engine should be + built for [ gpu | dla ] (default: gpu) + --engine-capability=[capability] The type of device the engine should be + built for [ default | safe_gpu | + safe_dla ] + --calibration-cache-file=[file_path] + Path to calibration cache file to use + for post training quantization + --num-min-timing-iter=[num_iters] Number of minimization timing iterations + used to select kernels + --num-avg-timing-iters=[num_iters] + Number of averaging timing iterations + used to select kernels + --workspace-size=[workspace_size] Maximum size of workspace given to + TensorRT + --max-batch-size=[max_batch_size] Maximum batch size (must be &gt;= 1 to be + set, 0 means not set) + -t[threshold], + --threshold=[threshold] Maximum acceptable numerical deviation + from standard torchscript output + (default 2e-5) + --save-engine Instead of compiling a full a + TorchScript program, save the created + engine to the path specified as the + output path + input_file_path Path to input TorchScript file + output_file_path Path for compiled TorchScript (or + TensorRT engine) file + input_shapes... Sizes for inputs to engine, can either + be a single size or a range defined by + Min, Optimal, Max sizes, e.g. + "(N,..,C,H,W)" + "[(MIN_N,..,MIN_C,MIN_H,MIN_W);(OPT_N,..,OPT_C,OPT_H,OPT_W);(MAX_N,..,MAX_C,MAX_H,MAX_W)]" + "--" can be used to terminate flag options and force all following + arguments to be treated as positional options +</pre> + </div> + </div> + <p> + e.g. + </p> + <div class="highlight-txt notranslate"> + <div class="highlight"> + <pre><span></span>trtorchc tests/modules/ssd_traced.jit.pt ssd_trt.ts "[(1,3,300,300); (1,3,512,512); (1, 3, 1024, 1024)]" -p f16 +</pre> + </div> + </div> + </article> + </div> + </div> + </main> + </div> + <footer class="md-footer"> + <div class="md-footer-nav"> + <nav class="md-footer-nav__inner md-grid"> + <a class="md-flex md-footer-nav__link md-footer-nav__link--prev" href="ptq.html" rel="prev" title="Post Training Quantization (PTQ)"> + <div class="md-flex__cell md-flex__cell--shrink"> + <i class="md-icon md-icon--arrow-back md-footer-nav__button"> + </i> + </div> + <div class="md-flex__cell md-flex__cell--stretch md-footer-nav__title"> + <span class="md-flex__ellipsis"> + <span class="md-footer-nav__direction"> + Previous + </span> + Post Training Quantization (PTQ) + </span> + </div> + </a> + <a class="md-flex md-footer-nav__link md-footer-nav__link--next" href="../contributors/system_overview.html" rel="next" title="System Overview"> + <div class="md-flex__cell md-flex__cell--stretch md-footer-nav__title"> + <span class="md-flex__ellipsis"> + <span class="md-footer-nav__direction"> + Next + </span> + System Overview + </span> + </div> + <div class="md-flex__cell md-flex__cell--shrink"> + <i class="md-icon md-icon--arrow-forward md-footer-nav__button"> + </i> + </div> + </a> + </nav> + </div> + <div class="md-footer-meta md-typeset"> + <div class="md-footer-meta__inner md-grid"> + <div class="md-footer-copyright"> + <div class="md-footer-copyright__highlight"> + © Copyright 2020, NVIDIA Corporation. + </div> + Created using + <a href="http://www.sphinx-doc.org/"> + Sphinx + </a> + 3.0.3. + and + <a href="https://github.com/bashtage/sphinx-material/"> + Material for + Sphinx + </a> + </div> + </div> + </div> + </footer> + <script src="../_static/javascripts/application.js"> + </script> + <script> + app.initialize({version: "1.0.4", url: {base: ".."}}) + </script> + </body> +</html> \ No newline at end of file diff --git a/docsrc/index.rst b/docsrc/index.rst index 5255135f58..45a1610b49 100644 --- a/docsrc/index.rst +++ b/docsrc/index.rst @@ -23,15 +23,18 @@ Getting Started * :ref:`installation` * :ref:`getting_started` * :ref:`ptq` +* :ref:`trtorchc` + .. toctree:: :caption: Getting Started - :maxdepth: 2 + :maxdepth: 1 :hidden: tutorials/installation tutorials/getting_started tutorials/ptq + tutorials/trtorchc Contributor Documentation -------------------------------- diff --git a/docsrc/tutorials/getting_started.rst b/docsrc/tutorials/getting_started.rst index 0d133a7eab..45c08b8637 100644 --- a/docsrc/tutorials/getting_started.rst +++ b/docsrc/tutorials/getting_started.rst @@ -130,7 +130,8 @@ To compile your TorchScript module with TRTorch, all you need to do is provide t to TRTorch and you will be returned an optimized TorchScript module to run or add into another PyTorch module. The only required setting is the input size or input range which is defined as a list of either list types like ``lists``, ``tuples`` or PyTorch ``size`` objects or dictionaries of minimum, optimial and maximum sizes. You can also specify settings such as -operating precision for the engine or target device. +operating precision for the engine or target device. After compilation you can save the module just like any other module +to load in a deployment application. In order to load a TensorRT/TorchScript module, make sure you first import ``trtorch``. .. code-block:: python @@ -152,6 +153,17 @@ operating precision for the engine or target device. input_data = input_data.half() result = trt_ts_module(input_data) + torch.jit.save(trt_ts_module, "trt_ts_module.ts") + +.. code-block:: python + + # Deployment application + import torch + import trtorch + + trt_ts_module = torch.jit.load("trt_ts_module.ts") + input_data = input_data.half() + result = trt_ts_module(input_data) .. _ts_in_cc: @@ -251,7 +263,35 @@ We can also set settings like operating precision to run in FP16. auto trt_mod = trtorch::CompileGraph(mod, info); auto out = trt_mod.forward({in}); -And now we are running the module in FP16 precision. +And now we are running the module in FP16 precision. You can then save the module to load later. + +.. code-block:: c++ + + trt_mod.save("<PATH TO SAVED TRT/TS MOD>") + +TRTorch compiled TorchScript modules are loaded in the same way as normal TorchScript module. Make sure your deployment application is linked against ``libtrtorch.so`` + +.. code-block:: c++ + + #include "torch/script.h" + #include "trtorch/trtorch.h" + + int main(int argc, const char* argv[]) { + torch::jit::Module module; + try { + // Deserialize the ScriptModule from a file using torch::jit::load(). + module = torch::jit::load("<PATH TO SAVED TRT/TS MOD>"); + } + catch (const c10::Error& e) { + std::cerr << "error loading the model\n"; + return -1; + } + + torch::Tensor in = torch::randn({1, 1, 32, 32}, torch::kCUDA); + auto out = mod.forward(in); + + std::cout << "ok\n"; + } If you want to save the engine produced by TRTorch to use in a TensorRT application you can use the ``ConvertGraphToTRTEngine`` API. diff --git a/docsrc/tutorials/trtorchc.rst b/docsrc/tutorials/trtorchc.rst new file mode 100644 index 0000000000..5561ee86ed --- /dev/null +++ b/docsrc/tutorials/trtorchc.rst @@ -0,0 +1,91 @@ +.. _trtorchc: + +trtorchc +================================= + +``trtorchc`` is a CLI application for using the TRTorch compiler. It serves as an easy way to compile a +TorchScript Module with TRTorch from the command-line to quickly check support or as part of +a deployment pipeline. All basic features of the compiler are supported including post training +quantization (though you must already have a calibration cache file to use the PTQ feature). The compiler can +output two formats, either a TorchScript program with the TensorRT engine embedded or +the TensorRT engine itself as a PLAN file. + +All that is required to run the program after compilation is for C++ linking against ``libtrtorch.so`` +or in Python importing the trtorch package. All other aspects of using compiled modules are identical +to standard TorchScript. Load with ``torch.jit.load()`` and run like you would run any other module. + +.. code-block:: txt + + trtorchc [input_file_path] [output_file_path] + [input_shapes...] {OPTIONS} + + TRTorch is a compiler for TorchScript, it will compile and optimize + TorchScript programs to run on NVIDIA GPUs using TensorRT + + OPTIONS: + + -h, --help Display this help menu + Verbiosity of the compiler + -v, --verbose Dumps debugging information about the + compilation process onto the console + -w, --warnings Disables warnings generated during + compilation onto the console (warnings + are on by default) + --info Dumps info messages generated during + compilation onto the console + --build-debuggable-engine Creates a debuggable engine + --use-strict-types Restrict operating type to only use set + default operation precision + (op_precision) + --allow-gpu-fallback (Only used when targeting DLA + (device-type)) Lets engine run layers on + GPU if they are not supported on DLA + -p[precision], + --default-op-precision=[precision] + Default operating precision for the + engine (Int8 requires a + calibration-cache argument) [ float | + float32 | f32 | half | float16 | f16 | + int8 | i8 ] (default: float) + -d[type], --device-type=[type] The type of device the engine should be + built for [ gpu | dla ] (default: gpu) + --engine-capability=[capability] The type of device the engine should be + built for [ default | safe_gpu | + safe_dla ] + --calibration-cache-file=[file_path] + Path to calibration cache file to use + for post training quantization + --num-min-timing-iter=[num_iters] Number of minimization timing iterations + used to select kernels + --num-avg-timing-iters=[num_iters] + Number of averaging timing iterations + used to select kernels + --workspace-size=[workspace_size] Maximum size of workspace given to + TensorRT + --max-batch-size=[max_batch_size] Maximum batch size (must be >= 1 to be + set, 0 means not set) + -t[threshold], + --threshold=[threshold] Maximum acceptable numerical deviation + from standard torchscript output + (default 2e-5) + --save-engine Instead of compiling a full a + TorchScript program, save the created + engine to the path specified as the + output path + input_file_path Path to input TorchScript file + output_file_path Path for compiled TorchScript (or + TensorRT engine) file + input_shapes... Sizes for inputs to engine, can either + be a single size or a range defined by + Min, Optimal, Max sizes, e.g. + "(N,..,C,H,W)" + "[(MIN_N,..,MIN_C,MIN_H,MIN_W);(OPT_N,..,OPT_C,OPT_H,OPT_W);(MAX_N,..,MAX_C,MAX_H,MAX_W)]" + "--" can be used to terminate flag options and force all following + arguments to be treated as positional options + + +e.g. + +.. code-block:: txt + + trtorchc tests/modules/ssd_traced.jit.pt ssd_trt.ts "[(1,3,300,300); (1,3,512,512); (1, 3, 1024, 1024)]" -p f16 diff --git a/py/trtorch/__init__.py b/py/trtorch/__init__.py index e72d8482a5..db46947cb6 100644 --- a/py/trtorch/__init__.py +++ b/py/trtorch/__init__.py @@ -11,3 +11,7 @@ from trtorch._compiler import * from trtorch._types import * from trtorch import logging + +def _register_with_torch(): + trtorch_dir = os.path.dirname(__file__) + torch.ops.load_library(trtorch_dir + '/lib/trtorch.so') \ No newline at end of file diff --git a/tests/modules/BUILD b/tests/modules/BUILD index cd282a9756..ac837e1460 100644 --- a/tests/modules/BUILD +++ b/tests/modules/BUILD @@ -1,3 +1,5 @@ +package(default_visibility = ["//visibility:public"]) + config_setting( name = "use_pre_cxx11_abi", values = { @@ -15,7 +17,19 @@ test_suite( tests = [ ":test_modules_as_engines", ":test_compiled_modules", - ":test_multiple_registered_engines" + ":test_multiple_registered_engines", + ":test_serialization" + ] +) + +cc_test( + name = "test_serialization", + srcs = ["test_serialization.cpp"], + deps = [ + ":module_test", + ], + data = [ + ":jit_models" ] ) diff --git a/tests/modules/hub.py b/tests/modules/hub.py index 873dfff9ac..a35dd41b05 100644 --- a/tests/modules/hub.py +++ b/tests/modules/hub.py @@ -2,64 +2,68 @@ import torchvision.models as models models = { - "alexnet": { - "model": models.alexnet(pretrained=True), - "path": "both" - }, - "vgg16": { - "model": models.vgg16(pretrained=True), - "path": "both" - }, - "squeezenet": { - "model": models.squeezenet1_0(pretrained=True), - "path": "both" - }, - "densenet": { - "model": models.densenet161(pretrained=True), - "path": "both" - }, - "inception_v3": { - "model": models.inception_v3(pretrained=True), - "path": "both" - }, - #"googlenet": models.googlenet(pretrained=True), - "shufflenet": { - "model": models.shufflenet_v2_x1_0(pretrained=True), - "path": "both" - }, - "mobilenet_v2": { - "model": models.mobilenet_v2(pretrained=True), - "path": "both" - }, - "resnext50_32x4d": { - "model": models.resnext50_32x4d(pretrained=True), - "path": "both" - }, - "wideresnet50_2": { - "model": models.wide_resnet50_2(pretrained=True), - "path": "both" - }, - "mnasnet": { - "model": models.mnasnet1_0(pretrained=True), - "path": "both" - }, - "resnet18": { - "model": torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True), - "path": "both" - }, - "resnet50": { - "model":torch.hub.load('pytorch/vision:v0.6.0', 'resnet50', pretrained=True), - "path": "both" - }, - "fcn_resnet101": { - "model": torch.hub.load('pytorch/vision:v0.6.0', 'fcn_resnet101', pretrained=True), - "path": "script" - }, - "ssd": { - "model": torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_ssd', model_math="fp32"), - "path": "trace" - } + "alexnet": { + "model": models.alexnet(pretrained=True), + "path": "both" + }, + "vgg16": { + "model": models.vgg16(pretrained=True), + "path": "both" + }, + "squeezenet": { + "model": models.squeezenet1_0(pretrained=True), + "path": "both" + }, + "densenet": { + "model": models.densenet161(pretrained=True), + "path": "both" + }, + "inception_v3": { + "model": models.inception_v3(pretrained=True), + "path": "both" + }, + #"googlenet": models.googlenet(pretrained=True), + "shufflenet": { + "model": models.shufflenet_v2_x1_0(pretrained=True), + "path": "both" + }, + "mobilenet_v2": { + "model": models.mobilenet_v2(pretrained=True), + "path": "both" + }, + "resnext50_32x4d": { + "model": models.resnext50_32x4d(pretrained=True), + "path": "both" + }, + "wideresnet50_2": { + "model": models.wide_resnet50_2(pretrained=True), + "path": "both" + }, + "mnasnet": { + "model": models.mnasnet1_0(pretrained=True), + "path": "both" + }, + "resnet18": { + "model": torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True), + "path": "both" + }, + "resnet50": { + "model":torch.hub.load('pytorch/vision:v0.6.0', 'resnet50', pretrained=True), + "path": "both" + }, + "fcn_resnet101": { + "model": torch.hub.load('pytorch/vision:v0.6.0', 'fcn_resnet101', pretrained=True), + "path": "script" + }, + "ssd": { + "model": torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_ssd', model_math="fp32"), + "path": "trace" + }, + "faster_rcnn": { + "model": models.detection.fasterrcnn_resnet50_fpn(pretrained=True), + "path": "script" } +} for n, m in models.items(): print("Downloading {}".format(n)) diff --git a/tests/modules/test_serialization.cpp b/tests/modules/test_serialization.cpp new file mode 100644 index 0000000000..0e9c2d59f4 --- /dev/null +++ b/tests/modules/test_serialization.cpp @@ -0,0 +1,34 @@ +#include "module_test.h" + +TEST_P(ModuleTests, SerializedModuleIsStillCorrect) { + std::vector<torch::jit::IValue> post_serialized_inputs_ivalues; + std::vector<torch::jit::IValue> pre_serialized_inputs_ivalues; + for (auto in_shape : input_shapes) { + auto in = at::randint(5, in_shape, {at::kCUDA}); + post_serialized_inputs_ivalues.push_back(in.clone()); + pre_serialized_inputs_ivalues.push_back(in.clone()); + } + + auto pre_serialized_mod = trtorch::CompileGraph(mod, input_shapes); + torch::jit::IValue pre_serialized_results_ivalues = trtorch::tests::util::RunModuleForward(pre_serialized_mod, pre_serialized_inputs_ivalues); + std::vector<at::Tensor> pre_serialized_results; + pre_serialized_results.push_back(pre_serialized_results_ivalues.toTensor()); + + pre_serialized_mod.save("test_serialization_mod.ts"); + auto post_serialized_mod = torch::jit::load("test_serialization_mod.ts"); + + torch::jit::IValue post_serialized_results_ivalues = trtorch::tests::util::RunModuleForward(post_serialized_mod, post_serialized_inputs_ivalues); + std::vector<at::Tensor> post_serialized_results; + post_serialized_results.push_back(post_serialized_results_ivalues.toTensor()); + + for (size_t i = 0; i < pre_serialized_results.size(); i++) { + ASSERT_TRUE(trtorch::tests::util::almostEqual(post_serialized_results[i], pre_serialized_results[i].reshape_as(post_serialized_results[i]), 2e-5)); + } +} + + +INSTANTIATE_TEST_SUITE_P(CompiledModuleForwardIsCloseSuite, + ModuleTests, + testing::Values( + PathAndInSize({"tests/modules/resnet18_traced.jit.pt", + {{1,3,224,224}}}))); diff --git a/third_party/args/BUILD b/third_party/args/BUILD new file mode 100644 index 0000000000..5d7a14bb2c --- /dev/null +++ b/third_party/args/BUILD @@ -0,0 +1,6 @@ +package(default_visibility = ["//visibility:public"]) + +cc_library( + name = "args", + hdrs = ["args.hpp"], +) diff --git a/third_party/args/LICENSE b/third_party/args/LICENSE new file mode 100644 index 0000000000..5c792a5edf --- /dev/null +++ b/third_party/args/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2016-2017 Taylor C. Richberger <[email protected]> and Pavel Belikov + + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/third_party/args/args.hpp b/third_party/args/args.hpp new file mode 100644 index 0000000000..1a595268c0 --- /dev/null +++ b/third_party/args/args.hpp @@ -0,0 +1,4305 @@ +/* A simple header-only C++ argument parser library. + * + * https://github.com/Taywee/args + * + * Copyright (c) 2016-2019 Taylor C. Richberger <[email protected]> and Pavel + * Belikov + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/** \file args.hxx + * \brief this single-header lets you use all of the args functionality + * + * The important stuff is done inside the args namespace + */ + +#ifndef ARGS_HXX +#define ARGS_HXX + +#include <algorithm> +#include <iterator> +#include <exception> +#include <functional> +#include <sstream> +#include <string> +#include <tuple> +#include <vector> +#include <unordered_map> +#include <unordered_set> +#include <type_traits> +#include <cstddef> + +#if defined(_MSC_VER) && _MSC_VER <= 1800 +#define noexcept +#endif + +#ifdef ARGS_TESTNAMESPACE +namespace argstest +{ +#else + +/** \namespace args + * \brief contains all the functionality of the args library + */ +namespace args +{ +#endif + /** Getter to grab the value from the argument type. + * + * If the Get() function of the type returns a reference, so does this, and + * the value will be modifiable. + */ + template <typename Option> + auto get(Option &option_) -> decltype(option_.Get()) + { + return option_.Get(); + } + + /** (INTERNAL) Count UTF-8 glyphs + * + * This is not reliable, and will fail for combinatory glyphs, but it's + * good enough here for now. + * + * \param string The string to count glyphs from + * \return The UTF-8 glyphs in the string + */ + inline std::string::size_type Glyphs(const std::string &string_) + { + std::string::size_type length = 0; + for (const char c: string_) + { + if ((c & 0xc0) != 0x80) + { + ++length; + } + } + return length; + } + + /** (INTERNAL) Wrap a vector of words into a vector of lines + * + * Empty words are skipped. Word "\n" forces wrapping. + * + * \param begin The begin iterator + * \param end The end iterator + * \param width The width of the body + * \param firstlinewidth the width of the first line, defaults to the width of the body + * \param firstlineindent the indent of the first line, defaults to 0 + * \return the vector of lines + */ + template <typename It> + inline std::vector<std::string> Wrap(It begin, + It end, + const std::string::size_type width, + std::string::size_type firstlinewidth = 0, + std::string::size_type firstlineindent = 0) + { + std::vector<std::string> output; + std::string line(firstlineindent, ' '); + bool empty = true; + + if (firstlinewidth == 0) + { + firstlinewidth = width; + } + + auto currentwidth = firstlinewidth; + + for (auto it = begin; it != end; ++it) + { + if (it->empty()) + { + continue; + } + + if (*it == "\n") + { + if (!empty) + { + output.push_back(line); + line.clear(); + empty = true; + currentwidth = width; + } + + continue; + } + + auto itemsize = Glyphs(*it); + if ((line.length() + 1 + itemsize) > currentwidth) + { + if (!empty) + { + output.push_back(line); + line.clear(); + empty = true; + currentwidth = width; + } + } + + if (itemsize > 0) + { + if (!empty) + { + line += ' '; + } + + line += *it; + empty = false; + } + } + + if (!empty) + { + output.push_back(line); + } + + return output; + } + + namespace detail + { + template <typename T> + std::string Join(const T& array, const std::string &delimiter) + { + std::string res; + for (auto &element : array) + { + if (!res.empty()) + { + res += delimiter; + } + + res += element; + } + + return res; + } + } + + /** (INTERNAL) Wrap a string into a vector of lines + * + * This is quick and hacky, but works well enough. You can specify a + * different width for the first line + * + * \param width The width of the body + * \param firstlinewid the width of the first line, defaults to the width of the body + * \return the vector of lines + */ + inline std::vector<std::string> Wrap(const std::string &in, const std::string::size_type width, std::string::size_type firstlinewidth = 0) + { + // Preserve existing line breaks + const auto newlineloc = in.find('\n'); + if (newlineloc != in.npos) + { + auto first = Wrap(std::string(in, 0, newlineloc), width); + auto second = Wrap(std::string(in, newlineloc + 1), width); + first.insert( + std::end(first), + std::make_move_iterator(std::begin(second)), + std::make_move_iterator(std::end(second))); + return first; + } + + std::istringstream stream(in); + std::string::size_type indent = 0; + + for (char c : in) + { + if (!isspace(c)) + { + break; + } + ++indent; + } + + return Wrap(std::istream_iterator<std::string>(stream), std::istream_iterator<std::string>(), + width, firstlinewidth, indent); + } + +#ifdef ARGS_NOEXCEPT + /// Error class, for when ARGS_NOEXCEPT is defined + enum class Error + { + None, + Usage, + Parse, + Validation, + Required, + Map, + Extra, + Help, + Subparser, + Completion, + }; +#else + /** Base error class + */ + class Error : public std::runtime_error + { + public: + Error(const std::string &problem) : std::runtime_error(problem) {} + virtual ~Error() {} + }; + + /** Errors that occur during usage + */ + class UsageError : public Error + { + public: + UsageError(const std::string &problem) : Error(problem) {} + virtual ~UsageError() {} + }; + + /** Errors that occur during regular parsing + */ + class ParseError : public Error + { + public: + ParseError(const std::string &problem) : Error(problem) {} + virtual ~ParseError() {} + }; + + /** Errors that are detected from group validation after parsing finishes + */ + class ValidationError : public Error + { + public: + ValidationError(const std::string &problem) : Error(problem) {} + virtual ~ValidationError() {} + }; + + /** Errors that when a required flag is omitted + */ + class RequiredError : public ValidationError + { + public: + RequiredError(const std::string &problem) : ValidationError(problem) {} + virtual ~RequiredError() {} + }; + + /** Errors in map lookups + */ + class MapError : public ParseError + { + public: + MapError(const std::string &problem) : ParseError(problem) {} + virtual ~MapError() {} + }; + + /** Error that occurs when a singular flag is specified multiple times + */ + class ExtraError : public ParseError + { + public: + ExtraError(const std::string &problem) : ParseError(problem) {} + virtual ~ExtraError() {} + }; + + /** An exception that indicates that the user has requested help + */ + class Help : public Error + { + public: + Help(const std::string &flag) : Error(flag) {} + virtual ~Help() {} + }; + + /** (INTERNAL) An exception that emulates coroutine-like control flow for subparsers. + */ + class SubparserError : public Error + { + public: + SubparserError() : Error("") {} + virtual ~SubparserError() {} + }; + + /** An exception that contains autocompletion reply + */ + class Completion : public Error + { + public: + Completion(const std::string &flag) : Error(flag) {} + virtual ~Completion() {} + }; +#endif + + /** A simple unified option type for unified initializer lists for the Matcher class. + */ + struct EitherFlag + { + const bool isShort; + const char shortFlag; + const std::string longFlag; + EitherFlag(const std::string &flag) : isShort(false), shortFlag(), longFlag(flag) {} + EitherFlag(const char *flag) : isShort(false), shortFlag(), longFlag(flag) {} + EitherFlag(const char flag) : isShort(true), shortFlag(flag), longFlag() {} + + /** Get just the long flags from an initializer list of EitherFlags + */ + static std::unordered_set<std::string> GetLong(std::initializer_list<EitherFlag> flags) + { + std::unordered_set<std::string> longFlags; + for (const EitherFlag &flag: flags) + { + if (!flag.isShort) + { + longFlags.insert(flag.longFlag); + } + } + return longFlags; + } + + /** Get just the short flags from an initializer list of EitherFlags + */ + static std::unordered_set<char> GetShort(std::initializer_list<EitherFlag> flags) + { + std::unordered_set<char> shortFlags; + for (const EitherFlag &flag: flags) + { + if (flag.isShort) + { + shortFlags.insert(flag.shortFlag); + } + } + return shortFlags; + } + + std::string str() const + { + return isShort ? std::string(1, shortFlag) : longFlag; + } + + std::string str(const std::string &shortPrefix, const std::string &longPrefix) const + { + return isShort ? shortPrefix + std::string(1, shortFlag) : longPrefix + longFlag; + } + }; + + + + /** A class of "matchers", specifying short and flags that can possibly be + * matched. + * + * This is supposed to be constructed and then passed in, not used directly + * from user code. + */ + class Matcher + { + private: + const std::unordered_set<char> shortFlags; + const std::unordered_set<std::string> longFlags; + + public: + /** Specify short and long flags separately as iterators + * + * ex: `args::Matcher(shortFlags.begin(), shortFlags.end(), longFlags.begin(), longFlags.end())` + */ + template <typename ShortIt, typename LongIt> + Matcher(ShortIt shortFlagsStart, ShortIt shortFlagsEnd, LongIt longFlagsStart, LongIt longFlagsEnd) : + shortFlags(shortFlagsStart, shortFlagsEnd), + longFlags(longFlagsStart, longFlagsEnd) + { + if (shortFlags.empty() && longFlags.empty()) + { +#ifndef ARGS_NOEXCEPT + throw UsageError("empty Matcher"); +#endif + } + } + +#ifdef ARGS_NOEXCEPT + /// Only for ARGS_NOEXCEPT + Error GetError() const noexcept + { + return shortFlags.empty() && longFlags.empty() ? Error::Usage : Error::None; + } +#endif + + /** Specify short and long flags separately as iterables + * + * ex: `args::Matcher(shortFlags, longFlags)` + */ + template <typename Short, typename Long> + Matcher(Short &&shortIn, Long &&longIn) : + Matcher(std::begin(shortIn), std::end(shortIn), std::begin(longIn), std::end(longIn)) + {} + + /** Specify a mixed single initializer-list of both short and long flags + * + * This is the fancy one. It takes a single initializer list of + * any number of any mixed kinds of flags. Chars are + * automatically interpreted as short flags, and strings are + * automatically interpreted as long flags: + * + * args::Matcher{'a'} + * args::Matcher{"foo"} + * args::Matcher{'h', "help"} + * args::Matcher{"foo", 'f', 'F', "FoO"} + */ + Matcher(std::initializer_list<EitherFlag> in) : + Matcher(EitherFlag::GetShort(in), EitherFlag::GetLong(in)) {} + + Matcher(Matcher &&other) : shortFlags(std::move(other.shortFlags)), longFlags(std::move(other.longFlags)) + {} + + ~Matcher() {} + + /** (INTERNAL) Check if there is a match of a short flag + */ + bool Match(const char flag) const + { + return shortFlags.find(flag) != shortFlags.end(); + } + + /** (INTERNAL) Check if there is a match of a long flag + */ + bool Match(const std::string &flag) const + { + return longFlags.find(flag) != longFlags.end(); + } + + /** (INTERNAL) Check if there is a match of a flag + */ + bool Match(const EitherFlag &flag) const + { + return flag.isShort ? Match(flag.shortFlag) : Match(flag.longFlag); + } + + /** (INTERNAL) Get all flag strings as a vector, with the prefixes embedded + */ + std::vector<EitherFlag> GetFlagStrings() const + { + std::vector<EitherFlag> flagStrings; + flagStrings.reserve(shortFlags.size() + longFlags.size()); + for (const char flag: shortFlags) + { + flagStrings.emplace_back(flag); + } + for (const std::string &flag: longFlags) + { + flagStrings.emplace_back(flag); + } + return flagStrings; + } + + /** (INTERNAL) Get long flag if it exists or any short flag + */ + EitherFlag GetLongOrAny() const + { + if (!longFlags.empty()) + { + return *longFlags.begin(); + } + + if (!shortFlags.empty()) + { + return *shortFlags.begin(); + } + + // should be unreachable + return ' '; + } + + /** (INTERNAL) Get short flag if it exists or any long flag + */ + EitherFlag GetShortOrAny() const + { + if (!shortFlags.empty()) + { + return *shortFlags.begin(); + } + + if (!longFlags.empty()) + { + return *longFlags.begin(); + } + + // should be unreachable + return ' '; + } + }; + + /** Attributes for flags. + */ + enum class Options + { + /** Default options. + */ + None = 0x0, + + /** Flag can't be passed multiple times. + */ + Single = 0x01, + + /** Flag can't be omitted. + */ + Required = 0x02, + + /** Flag is excluded from usage line. + */ + HiddenFromUsage = 0x04, + + /** Flag is excluded from options help. + */ + HiddenFromDescription = 0x08, + + /** Flag is global and can be used in any subcommand. + */ + Global = 0x10, + + /** Flag stops a parser. + */ + KickOut = 0x20, + + /** Flag is excluded from auto completion. + */ + HiddenFromCompletion = 0x40, + + /** Flag is excluded from options help and usage line + */ + Hidden = HiddenFromUsage | HiddenFromDescription | HiddenFromCompletion, + }; + + inline Options operator | (Options lhs, Options rhs) + { + return static_cast<Options>(static_cast<int>(lhs) | static_cast<int>(rhs)); + } + + inline Options operator & (Options lhs, Options rhs) + { + return static_cast<Options>(static_cast<int>(lhs) & static_cast<int>(rhs)); + } + + class FlagBase; + class PositionalBase; + class Command; + class ArgumentParser; + + /** A simple structure of parameters for easy user-modifyable help menus + */ + struct HelpParams + { + /** The width of the help menu + */ + unsigned int width = 80; + /** The indent of the program line + */ + unsigned int progindent = 2; + /** The indent of the program trailing lines for long parameters + */ + unsigned int progtailindent = 4; + /** The indent of the description and epilogs + */ + unsigned int descriptionindent = 4; + /** The indent of the flags + */ + unsigned int flagindent = 6; + /** The indent of the flag descriptions + */ + unsigned int helpindent = 40; + /** The additional indent each group adds + */ + unsigned int eachgroupindent = 2; + + /** The minimum gutter between each flag and its help + */ + unsigned int gutter = 1; + + /** Show the terminator when both options and positional parameters are present + */ + bool showTerminator = true; + + /** Show the {OPTIONS} on the prog line when this is true + */ + bool showProglineOptions = true; + + /** Show the positionals on the prog line when this is true + */ + bool showProglinePositionals = true; + + /** The prefix for short flags + */ + std::string shortPrefix; + + /** The prefix for long flags + */ + std::string longPrefix; + + /** The separator for short flags + */ + std::string shortSeparator; + + /** The separator for long flags + */ + std::string longSeparator; + + /** The program name for help generation + */ + std::string programName; + + /** Show command's flags + */ + bool showCommandChildren = false; + + /** Show command's descriptions and epilog + */ + bool showCommandFullHelp = false; + + /** The postfix for progline when showProglineOptions is true and command has any flags + */ + std::string proglineOptions = "{OPTIONS}"; + + /** The prefix for progline when command has any subcommands + */ + std::string proglineCommand = "COMMAND"; + + /** The prefix for progline value + */ + std::string proglineValueOpen = " <"; + + /** The postfix for progline value + */ + std::string proglineValueClose = ">"; + + /** The prefix for progline required argument + */ + std::string proglineRequiredOpen = ""; + + /** The postfix for progline required argument + */ + std::string proglineRequiredClose = ""; + + /** The prefix for progline non-required argument + */ + std::string proglineNonrequiredOpen = "["; + + /** The postfix for progline non-required argument + */ + std::string proglineNonrequiredClose = "]"; + + /** Show flags in program line + */ + bool proglineShowFlags = false; + + /** Use short flags in program lines when possible + */ + bool proglinePreferShortFlags = false; + + /** Program line prefix + */ + std::string usageString; + + /** String shown in help before flags descriptions + */ + std::string optionsString = "OPTIONS:"; + + /** Display value name after all the long and short flags + */ + bool useValueNameOnce = false; + + /** Show value name + */ + bool showValueName = true; + + /** Add newline before flag description + */ + bool addNewlineBeforeDescription = false; + + /** The prefix for option value + */ + std::string valueOpen = "["; + + /** The postfix for option value + */ + std::string valueClose = "]"; + + /** Add choices to argument description + */ + bool addChoices = false; + + /** The prefix for choices + */ + std::string choiceString = "\nOne of: "; + + /** Add default values to argument description + */ + bool addDefault = false; + + /** The prefix for default values + */ + std::string defaultString = "\nDefault: "; + }; + + /** A number of arguments which can be consumed by an option. + * + * Represents a closed interval [min, max]. + */ + struct Nargs + { + const size_t min; + const size_t max; + + Nargs(size_t min_, size_t max_) : min{min_}, max{max_} + { +#ifndef ARGS_NOEXCEPT + if (max < min) + { + throw UsageError("Nargs: max > min"); + } +#endif + } + + Nargs(size_t num_) : min{num_}, max{num_} + { + } + + friend bool operator == (const Nargs &lhs, const Nargs &rhs) + { + return lhs.min == rhs.min && lhs.max == rhs.max; + } + + friend bool operator != (const Nargs &lhs, const Nargs &rhs) + { + return !(lhs == rhs); + } + }; + + /** Base class for all match types + */ + class Base + { + private: + Options options = {}; + + protected: + bool matched = false; + const std::string help; +#ifdef ARGS_NOEXCEPT + /// Only for ARGS_NOEXCEPT + mutable Error error = Error::None; + mutable std::string errorMsg; +#endif + + public: + Base(const std::string &help_, Options options_ = {}) : options(options_), help(help_) {} + virtual ~Base() {} + + Options GetOptions() const noexcept + { + return options; + } + + bool IsRequired() const noexcept + { + return (GetOptions() & Options::Required) != Options::None; + } + + virtual bool Matched() const noexcept + { + return matched; + } + + virtual void Validate(const std::string &, const std::string &) const + { + } + + operator bool() const noexcept + { + return Matched(); + } + + virtual std::vector<std::tuple<std::string, std::string, unsigned>> GetDescription(const HelpParams &, const unsigned indentLevel) const + { + std::tuple<std::string, std::string, unsigned> description; + std::get<1>(description) = help; + std::get<2>(description) = indentLevel; + return { std::move(description) }; + } + + virtual std::vector<Command*> GetCommands() + { + return {}; + } + + virtual bool IsGroup() const + { + return false; + } + + virtual FlagBase *Match(const EitherFlag &) + { + return nullptr; + } + + virtual PositionalBase *GetNextPositional() + { + return nullptr; + } + + virtual std::vector<FlagBase*> GetAllFlags() + { + return {}; + } + + virtual bool HasFlag() const + { + return false; + } + + virtual bool HasPositional() const + { + return false; + } + + virtual bool HasCommand() const + { + return false; + } + + virtual std::vector<std::string> GetProgramLine(const HelpParams &) const + { + return {}; + } + + /// Sets a kick-out value for building subparsers + void KickOut(bool kickout_) noexcept + { + if (kickout_) + { + options = options | Options::KickOut; + } + else + { + options = static_cast<Options>(static_cast<int>(options) & ~static_cast<int>(Options::KickOut)); + } + } + + /// Gets the kick-out value for building subparsers + bool KickOut() const noexcept + { + return (options & Options::KickOut) != Options::None; + } + + virtual void Reset() noexcept + { + matched = false; +#ifdef ARGS_NOEXCEPT + error = Error::None; + errorMsg.clear(); +#endif + } + +#ifdef ARGS_NOEXCEPT + /// Only for ARGS_NOEXCEPT + virtual Error GetError() const + { + return error; + } + + /// Only for ARGS_NOEXCEPT + std::string GetErrorMsg() const + { + return errorMsg; + } +#endif + }; + + /** Base class for all match types that have a name + */ + class NamedBase : public Base + { + protected: + const std::string name; + bool kickout = false; + std::string defaultString; + bool defaultStringManual = false; + std::vector<std::string> choicesStrings; + bool choicesStringManual = false; + + virtual std::string GetDefaultString(const HelpParams&) const { return {}; } + + virtual std::vector<std::string> GetChoicesStrings(const HelpParams&) const { return {}; } + + virtual std::string GetNameString(const HelpParams&) const { return Name(); } + + void AddDescriptionPostfix(std::string &dest, const bool isManual, const std::string &manual, bool isGenerated, const std::string &generated, const std::string &str) const + { + if (isManual && !manual.empty()) + { + dest += str; + dest += manual; + } + else if (!isManual && isGenerated && !generated.empty()) + { + dest += str; + dest += generated; + } + } + + public: + NamedBase(const std::string &name_, const std::string &help_, Options options_ = {}) : Base(help_, options_), name(name_) {} + virtual ~NamedBase() {} + + /** Sets default value string that will be added to argument description. + * Use empty string to disable it for this argument. + */ + void HelpDefault(const std::string &str) + { + defaultStringManual = true; + defaultString = str; + } + + /** Gets default value string that will be added to argument description. + */ + std::string HelpDefault(const HelpParams &params) const + { + return defaultStringManual ? defaultString : GetDefaultString(params); + } + + /** Sets choices strings that will be added to argument description. + * Use empty vector to disable it for this argument. + */ + void HelpChoices(const std::vector<std::string> &array) + { + choicesStringManual = true; + choicesStrings = array; + } + + /** Gets choices strings that will be added to argument description. + */ + std::vector<std::string> HelpChoices(const HelpParams &params) const + { + return choicesStringManual ? choicesStrings : GetChoicesStrings(params); + } + + virtual std::vector<std::tuple<std::string, std::string, unsigned>> GetDescription(const HelpParams &params, const unsigned indentLevel) const override + { + std::tuple<std::string, std::string, unsigned> description; + std::get<0>(description) = GetNameString(params); + std::get<1>(description) = help; + std::get<2>(description) = indentLevel; + + AddDescriptionPostfix(std::get<1>(description), choicesStringManual, detail::Join(choicesStrings, ", "), params.addChoices, detail::Join(GetChoicesStrings(params), ", "), params.choiceString); + AddDescriptionPostfix(std::get<1>(description), defaultStringManual, defaultString, params.addDefault, GetDefaultString(params), params.defaultString); + + return { std::move(description) }; + } + + virtual std::string Name() const + { + return name; + } + }; + + namespace detail + { + template<typename T> + using vector = std::vector<T, std::allocator<T>>; + + template<typename K, typename T> + using unordered_map = std::unordered_map<K, T, std::hash<K>, + std::equal_to<K>, std::allocator<std::pair<const K, T> > >; + + template<typename S, typename T> + class is_streamable + { + template<typename SS, typename TT> + static auto test(int) + -> decltype( std::declval<SS&>() << std::declval<TT>(), std::true_type() ); + + template<typename, typename> + static auto test(...) -> std::false_type; + + public: + using type = decltype(test<S,T>(0)); + }; + + template <typename T> + using IsConvertableToString = typename is_streamable<std::ostringstream, T>::type; + + template <typename T> + typename std::enable_if<IsConvertableToString<T>::value, std::string>::type + ToString(const T &value) + { + std::ostringstream s; + s << value; + return s.str(); + } + + template <typename T> + typename std::enable_if<!IsConvertableToString<T>::value, std::string>::type + ToString(const T &) + { + return {}; + } + + template <typename T> + std::vector<std::string> MapKeysToStrings(const T &map) + { + std::vector<std::string> res; + using K = typename std::decay<decltype(std::begin(map)->first)>::type; + if (IsConvertableToString<K>::value) + { + for (const auto &p : map) + { + res.push_back(detail::ToString(p.first)); + } + + std::sort(res.begin(), res.end()); + } + return res; + } + } + + /** Base class for all flag options + */ + class FlagBase : public NamedBase + { + protected: + const Matcher matcher; + + virtual std::string GetNameString(const HelpParams &params) const override + { + const std::string postfix = !params.showValueName || NumberOfArguments() == 0 ? std::string() : Name(); + std::string flags; + const auto flagStrings = matcher.GetFlagStrings(); + const bool useValueNameOnce = flagStrings.size() == 1 ? false : params.useValueNameOnce; + for (auto it = flagStrings.begin(); it != flagStrings.end(); ++it) + { + auto &flag = *it; + if (it != flagStrings.begin()) + { + flags += ", "; + } + + flags += flag.isShort ? params.shortPrefix : params.longPrefix; + flags += flag.str(); + + if (!postfix.empty() && (!useValueNameOnce || it + 1 == flagStrings.end())) + { + flags += flag.isShort ? params.shortSeparator : params.longSeparator; + flags += params.valueOpen + postfix + params.valueClose; + } + } + + return flags; + } + + public: + FlagBase(const std::string &name_, const std::string &help_, Matcher &&matcher_, const bool extraError_ = false) : NamedBase(name_, help_, extraError_ ? Options::Single : Options()), matcher(std::move(matcher_)) {} + + FlagBase(const std::string &name_, const std::string &help_, Matcher &&matcher_, Options options_) : NamedBase(name_, help_, options_), matcher(std::move(matcher_)) {} + + virtual ~FlagBase() {} + + virtual FlagBase *Match(const EitherFlag &flag) override + { + if (matcher.Match(flag)) + { + if ((GetOptions() & Options::Single) != Options::None && matched) + { + std::ostringstream problem; + problem << "Flag '" << flag.str() << "' was passed multiple times, but is only allowed to be passed once"; +#ifdef ARGS_NOEXCEPT + error = Error::Extra; + errorMsg = problem.str(); +#else + throw ExtraError(problem.str()); +#endif + } + matched = true; + return this; + } + return nullptr; + } + + virtual std::vector<FlagBase*> GetAllFlags() override + { + return { this }; + } + + const Matcher &GetMatcher() const + { + return matcher; + } + + virtual void Validate(const std::string &shortPrefix, const std::string &longPrefix) const override + { + if (!Matched() && IsRequired()) + { + std::ostringstream problem; + problem << "Flag '" << matcher.GetLongOrAny().str(shortPrefix, longPrefix) << "' is required"; +#ifdef ARGS_NOEXCEPT + error = Error::Required; + errorMsg = problem.str(); +#else + throw RequiredError(problem.str()); +#endif + } + } + + virtual std::vector<std::string> GetProgramLine(const HelpParams &params) const override + { + if (!params.proglineShowFlags) + { + return {}; + } + + const std::string postfix = NumberOfArguments() == 0 ? std::string() : Name(); + const EitherFlag flag = params.proglinePreferShortFlags ? matcher.GetShortOrAny() : matcher.GetLongOrAny(); + std::string res = flag.str(params.shortPrefix, params.longPrefix); + if (!postfix.empty()) + { + res += params.proglineValueOpen + postfix + params.proglineValueClose; + } + + return { IsRequired() ? params.proglineRequiredOpen + res + params.proglineRequiredClose + : params.proglineNonrequiredOpen + res + params.proglineNonrequiredClose }; + } + + virtual bool HasFlag() const override + { + return true; + } + +#ifdef ARGS_NOEXCEPT + /// Only for ARGS_NOEXCEPT + virtual Error GetError() const override + { + const auto nargs = NumberOfArguments(); + if (nargs.min > nargs.max) + { + return Error::Usage; + } + + const auto matcherError = matcher.GetError(); + if (matcherError != Error::None) + { + return matcherError; + } + + return error; + } +#endif + + /** Defines how many values can be consumed by this option. + * + * \return closed interval [min, max] + */ + virtual Nargs NumberOfArguments() const noexcept = 0; + + /** Parse values of this option. + * + * \param value Vector of values. It's size must be in NumberOfArguments() interval. + */ + virtual void ParseValue(const std::vector<std::string> &value) = 0; + }; + + /** Base class for value-accepting flag options + */ + class ValueFlagBase : public FlagBase + { + public: + ValueFlagBase(const std::string &name_, const std::string &help_, Matcher &&matcher_, const bool extraError_ = false) : FlagBase(name_, help_, std::move(matcher_), extraError_) {} + ValueFlagBase(const std::string &name_, const std::string &help_, Matcher &&matcher_, Options options_) : FlagBase(name_, help_, std::move(matcher_), options_) {} + virtual ~ValueFlagBase() {} + + virtual Nargs NumberOfArguments() const noexcept override + { + return 1; + } + }; + + class CompletionFlag : public ValueFlagBase + { + public: + std::vector<std::string> reply; + size_t cword = 0; + std::string syntax; + + template <typename GroupClass> + CompletionFlag(GroupClass &group_, Matcher &&matcher_): ValueFlagBase("completion", "completion flag", std::move(matcher_), Options::Hidden) + { + group_.AddCompletion(*this); + } + + virtual ~CompletionFlag() {} + + virtual Nargs NumberOfArguments() const noexcept override + { + return 2; + } + + virtual void ParseValue(const std::vector<std::string> &value_) override + { + syntax = value_.at(0); + std::istringstream(value_.at(1)) >> cword; + } + + /** Get the completion reply + */ + std::string Get() noexcept + { + return detail::Join(reply, "\n"); + } + + virtual void Reset() noexcept override + { + ValueFlagBase::Reset(); + cword = 0; + syntax.clear(); + reply.clear(); + } + }; + + + /** Base class for positional options + */ + class PositionalBase : public NamedBase + { + protected: + bool ready; + + public: + PositionalBase(const std::string &name_, const std::string &help_, Options options_ = {}) : NamedBase(name_, help_, options_), ready(true) {} + virtual ~PositionalBase() {} + + bool Ready() + { + return ready; + } + + virtual void ParseValue(const std::string &value_) = 0; + + virtual void Reset() noexcept override + { + matched = false; + ready = true; +#ifdef ARGS_NOEXCEPT + error = Error::None; + errorMsg.clear(); +#endif + } + + virtual PositionalBase *GetNextPositional() override + { + return Ready() ? this : nullptr; + } + + virtual bool HasPositional() const override + { + return true; + } + + virtual std::vector<std::string> GetProgramLine(const HelpParams &params) const override + { + return { IsRequired() ? params.proglineRequiredOpen + Name() + params.proglineRequiredClose + : params.proglineNonrequiredOpen + Name() + params.proglineNonrequiredClose }; + } + + virtual void Validate(const std::string &, const std::string &) const override + { + if (IsRequired() && !Matched()) + { + std::ostringstream problem; + problem << "Option '" << Name() << "' is required"; +#ifdef ARGS_NOEXCEPT + error = Error::Required; + errorMsg = problem.str(); +#else + throw RequiredError(problem.str()); +#endif + } + } + }; + + /** Class for all kinds of validating groups, including ArgumentParser + */ + class Group : public Base + { + private: + std::vector<Base*> children; + std::function<bool(const Group &)> validator; + + public: + /** Default validators + */ + struct Validators + { + static bool Xor(const Group &group) + { + return group.MatchedChildren() == 1; + } + + static bool AtLeastOne(const Group &group) + { + return group.MatchedChildren() >= 1; + } + + static bool AtMostOne(const Group &group) + { + return group.MatchedChildren() <= 1; + } + + static bool All(const Group &group) + { + return group.Children().size() == group.MatchedChildren(); + } + + static bool AllOrNone(const Group &group) + { + return (All(group) || None(group)); + } + + static bool AllChildGroups(const Group &group) + { + return std::none_of(std::begin(group.Children()), std::end(group.Children()), [](const Base* child) -> bool { + return child->IsGroup() && !child->Matched(); + }); + } + + static bool DontCare(const Group &) + { + return true; + } + + static bool CareTooMuch(const Group &) + { + return false; + } + + static bool None(const Group &group) + { + return group.MatchedChildren() == 0; + } + }; + /// If help is empty, this group will not be printed in help output + Group(const std::string &help_ = std::string(), const std::function<bool(const Group &)> &validator_ = Validators::DontCare, Options options_ = {}) : Base(help_, options_), validator(validator_) {} + /// If help is empty, this group will not be printed in help output + Group(Group &group_, const std::string &help_ = std::string(), const std::function<bool(const Group &)> &validator_ = Validators::DontCare, Options options_ = {}) : Base(help_, options_), validator(validator_) + { + group_.Add(*this); + } + virtual ~Group() {} + + /** Append a child to this Group. + */ + void Add(Base &child) + { + children.emplace_back(&child); + } + + /** Get all this group's children + */ + const std::vector<Base *> &Children() const + { + return children; + } + + /** Return the first FlagBase that matches flag, or nullptr + * + * \param flag The flag with prefixes stripped + * \return the first matching FlagBase pointer, or nullptr if there is no match + */ + virtual FlagBase *Match(const EitherFlag &flag) override + { + for (Base *child: Children()) + { + if (FlagBase *match = child->Match(flag)) + { + return match; + } + } + return nullptr; + } + + virtual std::vector<FlagBase*> GetAllFlags() override + { + std::vector<FlagBase*> res; + for (Base *child: Children()) + { + auto childRes = child->GetAllFlags(); + res.insert(res.end(), childRes.begin(), childRes.end()); + } + return res; + } + + virtual void Validate(const std::string &shortPrefix, const std::string &longPrefix) const override + { + for (Base *child: Children()) + { + child->Validate(shortPrefix, longPrefix); + } + } + + /** Get the next ready positional, or nullptr if there is none + * + * \return the first ready PositionalBase pointer, or nullptr if there is no match + */ + virtual PositionalBase *GetNextPositional() override + { + for (Base *child: Children()) + { + if (auto next = child->GetNextPositional()) + { + return next; + } + } + return nullptr; + } + + /** Get whether this has any FlagBase children + * + * \return Whether or not there are any FlagBase children + */ + virtual bool HasFlag() const override + { + return std::any_of(Children().begin(), Children().end(), [](Base *child) { return child->HasFlag(); }); + } + + /** Get whether this has any PositionalBase children + * + * \return Whether or not there are any PositionalBase children + */ + virtual bool HasPositional() const override + { + return std::any_of(Children().begin(), Children().end(), [](Base *child) { return child->HasPositional(); }); + } + + /** Get whether this has any Command children + * + * \return Whether or not there are any Command children + */ + virtual bool HasCommand() const override + { + return std::any_of(Children().begin(), Children().end(), [](Base *child) { return child->HasCommand(); }); + } + + /** Count the number of matched children this group has + */ + std::vector<Base *>::size_type MatchedChildren() const + { + // Cast to avoid warnings from -Wsign-conversion + return static_cast<std::vector<Base *>::size_type>( + std::count_if(std::begin(Children()), std::end(Children()), [](const Base *child){return child->Matched();})); + } + + /** Whether or not this group matches validation + */ + virtual bool Matched() const noexcept override + { + return validator(*this); + } + + /** Get validation + */ + bool Get() const + { + return Matched(); + } + + /** Get all the child descriptions for help generation + */ + virtual std::vector<std::tuple<std::string, std::string, unsigned>> GetDescription(const HelpParams &params, const unsigned int indent) const override + { + std::vector<std::tuple<std::string, std::string, unsigned int>> descriptions; + + // Push that group description on the back if not empty + unsigned addindent = 0; + if (!help.empty()) + { + descriptions.emplace_back(help, "", indent); + addindent = 1; + } + + for (Base *child: Children()) + { + if ((child->GetOptions() & Options::HiddenFromDescription) != Options::None) + { + continue; + } + + auto groupDescriptions = child->GetDescription(params, indent + addindent); + descriptions.insert( + std::end(descriptions), + std::make_move_iterator(std::begin(groupDescriptions)), + std::make_move_iterator(std::end(groupDescriptions))); + } + return descriptions; + } + + /** Get the names of positional parameters + */ + virtual std::vector<std::string> GetProgramLine(const HelpParams &params) const override + { + std::vector <std::string> names; + for (Base *child: Children()) + { + if ((child->GetOptions() & Options::HiddenFromUsage) != Options::None) + { + continue; + } + + auto groupNames = child->GetProgramLine(params); + names.insert( + std::end(names), + std::make_move_iterator(std::begin(groupNames)), + std::make_move_iterator(std::end(groupNames))); + } + return names; + } + + virtual std::vector<Command*> GetCommands() override + { + std::vector<Command*> res; + for (const auto &child : Children()) + { + auto subparsers = child->GetCommands(); + res.insert(std::end(res), std::begin(subparsers), std::end(subparsers)); + } + return res; + } + + virtual bool IsGroup() const override + { + return true; + } + + virtual void Reset() noexcept override + { + Base::Reset(); + + for (auto &child: Children()) + { + child->Reset(); + } +#ifdef ARGS_NOEXCEPT + error = Error::None; + errorMsg.clear(); +#endif + } + +#ifdef ARGS_NOEXCEPT + /// Only for ARGS_NOEXCEPT + virtual Error GetError() const override + { + if (error != Error::None) + { + return error; + } + + auto it = std::find_if(Children().begin(), Children().end(), [](const Base *child){return child->GetError() != Error::None;}); + if (it == Children().end()) + { + return Error::None; + } else + { + return (*it)->GetError(); + } + } +#endif + + }; + + /** Class for using global options in ArgumentParser. + */ + class GlobalOptions : public Group + { + public: + GlobalOptions(Group &base, Base &options_) : Group(base, {}, Group::Validators::DontCare, Options::Global) + { + Add(options_); + } + }; + + /** Utility class for building subparsers with coroutines/callbacks. + * + * Brief example: + * \code + * Command command(argumentParser, "command", "my command", [](args::Subparser &s) + * { + * // your command flags/positionals + * s.Parse(); //required + * //your command code + * }); + * \endcode + * + * For ARGS_NOEXCEPT mode don't forget to check `s.GetError()` after `s.Parse()` + * and return if it isn't equals to args::Error::None. + * + * \sa Command + */ + class Subparser : public Group + { + private: + std::vector<std::string> args; + std::vector<std::string> kicked; + ArgumentParser *parser = nullptr; + const HelpParams &helpParams; + const Command &command; + bool isParsed = false; + + public: + Subparser(std::vector<std::string> args_, ArgumentParser &parser_, const Command &command_, const HelpParams &helpParams_) + : Group({}, Validators::AllChildGroups), args(std::move(args_)), parser(&parser_), helpParams(helpParams_), command(command_) + { + } + + Subparser(const Command &command_, const HelpParams &helpParams_) : Group({}, Validators::AllChildGroups), helpParams(helpParams_), command(command_) + { + } + + Subparser(const Subparser&) = delete; + Subparser(Subparser&&) = delete; + Subparser &operator = (const Subparser&) = delete; + Subparser &operator = (Subparser&&) = delete; + + const Command &GetCommand() + { + return command; + } + + /** (INTERNAL) Determines whether Parse was called or not. + */ + bool IsParsed() const + { + return isParsed; + } + + /** Continue parsing arguments for new command. + */ + void Parse(); + + /** Returns a vector of kicked out arguments. + * + * \sa Base::KickOut + */ + const std::vector<std::string> &KickedOut() const noexcept + { + return kicked; + } + }; + + /** Main class for building subparsers. + * + * /sa Subparser + */ + class Command : public Group + { + private: + friend class Subparser; + + std::string name; + std::string help; + std::string description; + std::string epilog; + std::string proglinePostfix; + + std::function<void(Subparser&)> parserCoroutine; + bool commandIsRequired = true; + Command *selectedCommand = nullptr; + + mutable std::vector<std::tuple<std::string, std::string, unsigned>> subparserDescription; + mutable std::vector<std::string> subparserProgramLine; + mutable bool subparserHasFlag = false; + mutable bool subparserHasPositional = false; + mutable bool subparserHasCommand = false; +#ifdef ARGS_NOEXCEPT + mutable Error subparserError = Error::None; +#endif + mutable Subparser *subparser = nullptr; + + protected: + + class RaiiSubparser + { + public: + RaiiSubparser(ArgumentParser &parser_, std::vector<std::string> args_); + RaiiSubparser(const Command &command_, const HelpParams &params_); + + ~RaiiSubparser() + { + command.subparser = oldSubparser; + } + + Subparser &Parser() + { + return parser; + } + + private: + const Command &command; + Subparser parser; + Subparser *oldSubparser; + }; + + Command() = default; + + std::function<void(Subparser&)> &GetCoroutine() + { + return selectedCommand != nullptr ? selectedCommand->GetCoroutine() : parserCoroutine; + } + + Command &SelectedCommand() + { + Command *res = this; + while (res->selectedCommand != nullptr) + { + res = res->selectedCommand; + } + + return *res; + } + + const Command &SelectedCommand() const + { + const Command *res = this; + while (res->selectedCommand != nullptr) + { + res = res->selectedCommand; + } + + return *res; + } + + void UpdateSubparserHelp(const HelpParams &params) const + { + if (parserCoroutine) + { + RaiiSubparser coro(*this, params); +#ifndef ARGS_NOEXCEPT + try + { + parserCoroutine(coro.Parser()); + } + catch (args::SubparserError&) + { + } +#else + parserCoroutine(coro.Parser()); +#endif + } + } + + public: + Command(Group &base_, std::string name_, std::string help_, std::function<void(Subparser&)> coroutine_ = {}) + : name(std::move(name_)), help(std::move(help_)), parserCoroutine(std::move(coroutine_)) + { + base_.Add(*this); + } + + /** The description that appears on the prog line after options + */ + const std::string &ProglinePostfix() const + { return proglinePostfix; } + + /** The description that appears on the prog line after options + */ + void ProglinePostfix(const std::string &proglinePostfix_) + { this->proglinePostfix = proglinePostfix_; } + + /** The description that appears above options + */ + const std::string &Description() const + { return description; } + /** The description that appears above options + */ + + void Description(const std::string &description_) + { this->description = description_; } + + /** The description that appears below options + */ + const std::string &Epilog() const + { return epilog; } + + /** The description that appears below options + */ + void Epilog(const std::string &epilog_) + { this->epilog = epilog_; } + + /** The name of command + */ + const std::string &Name() const + { return name; } + + /** The description of command + */ + const std::string &Help() const + { return help; } + + /** If value is true, parser will fail if no command was parsed. + * + * Default: true. + */ + void RequireCommand(bool value) + { commandIsRequired = value; } + + virtual bool IsGroup() const override + { return false; } + + virtual bool Matched() const noexcept override + { return Base::Matched(); } + + operator bool() const noexcept + { return Matched(); } + + void Match() noexcept + { matched = true; } + + void SelectCommand(Command *c) noexcept + { + selectedCommand = c; + + if (c != nullptr) + { + c->Match(); + } + } + + virtual FlagBase *Match(const EitherFlag &flag) override + { + if (selectedCommand != nullptr) + { + if (auto *res = selectedCommand->Match(flag)) + { + return res; + } + + for (auto *child: Children()) + { + if ((child->GetOptions() & Options::Global) != Options::None) + { + if (auto *res = child->Match(flag)) + { + return res; + } + } + } + + return nullptr; + } + + if (subparser != nullptr) + { + return subparser->Match(flag); + } + + return Matched() ? Group::Match(flag) : nullptr; + } + + virtual std::vector<FlagBase*> GetAllFlags() override + { + std::vector<FlagBase*> res; + + if (!Matched()) + { + return res; + } + + for (auto *child: Children()) + { + if (selectedCommand == nullptr || (child->GetOptions() & Options::Global) != Options::None) + { + auto childFlags = child->GetAllFlags(); + res.insert(res.end(), childFlags.begin(), childFlags.end()); + } + } + + if (selectedCommand != nullptr) + { + auto childFlags = selectedCommand->GetAllFlags(); + res.insert(res.end(), childFlags.begin(), childFlags.end()); + } + + if (subparser != nullptr) + { + auto childFlags = subparser->GetAllFlags(); + res.insert(res.end(), childFlags.begin(), childFlags.end()); + } + + return res; + } + + virtual PositionalBase *GetNextPositional() override + { + if (selectedCommand != nullptr) + { + if (auto *res = selectedCommand->GetNextPositional()) + { + return res; + } + + for (auto *child: Children()) + { + if ((child->GetOptions() & Options::Global) != Options::None) + { + if (auto *res = child->GetNextPositional()) + { + return res; + } + } + } + + return nullptr; + } + + if (subparser != nullptr) + { + return subparser->GetNextPositional(); + } + + return Matched() ? Group::GetNextPositional() : nullptr; + } + + virtual bool HasFlag() const override + { + return subparserHasFlag || Group::HasFlag(); + } + + virtual bool HasPositional() const override + { + return subparserHasPositional || Group::HasPositional(); + } + + virtual bool HasCommand() const override + { + return true; + } + + std::vector<std::string> GetCommandProgramLine(const HelpParams &params) const + { + UpdateSubparserHelp(params); + + auto res = Group::GetProgramLine(params); + res.insert(res.end(), subparserProgramLine.begin(), subparserProgramLine.end()); + + if (!params.proglineCommand.empty() && (Group::HasCommand() || subparserHasCommand)) + { + res.insert(res.begin(), commandIsRequired ? params.proglineCommand : "[" + params.proglineCommand + "]"); + } + + if (!Name().empty()) + { + res.insert(res.begin(), Name()); + } + + if ((subparserHasFlag || Group::HasFlag()) && params.showProglineOptions && !params.proglineShowFlags) + { + res.push_back(params.proglineOptions); + } + + if (!ProglinePostfix().empty()) + { + std::string line; + for (char c : ProglinePostfix()) + { + if (isspace(c)) + { + if (!line.empty()) + { + res.push_back(line); + line.clear(); + } + + if (c == '\n') + { + res.push_back("\n"); + } + } + else + { + line += c; + } + } + + if (!line.empty()) + { + res.push_back(line); + } + } + + return res; + } + + virtual std::vector<std::string> GetProgramLine(const HelpParams &params) const override + { + if (!Matched()) + { + return {}; + } + + return GetCommandProgramLine(params); + } + + virtual std::vector<Command*> GetCommands() override + { + if (selectedCommand != nullptr) + { + return selectedCommand->GetCommands(); + } + + if (Matched()) + { + return Group::GetCommands(); + } + + return { this }; + } + + virtual std::vector<std::tuple<std::string, std::string, unsigned>> GetDescription(const HelpParams &params, const unsigned int indent) const override + { + std::vector<std::tuple<std::string, std::string, unsigned>> descriptions; + unsigned addindent = 0; + + UpdateSubparserHelp(params); + + if (!Matched()) + { + if (params.showCommandFullHelp) + { + std::ostringstream s; + bool empty = true; + for (const auto &progline: GetCommandProgramLine(params)) + { + if (!empty) + { + s << ' '; + } + else + { + empty = false; + } + + s << progline; + } + + descriptions.emplace_back(s.str(), "", indent); + } + else + { + descriptions.emplace_back(Name(), help, indent); + } + + if (!params.showCommandChildren && !params.showCommandFullHelp) + { + return descriptions; + } + + addindent = 1; + } + + if (params.showCommandFullHelp && !Matched()) + { + descriptions.emplace_back("", "", indent + addindent); + descriptions.emplace_back(Description().empty() ? Help() : Description(), "", indent + addindent); + descriptions.emplace_back("", "", indent + addindent); + } + + for (Base *child: Children()) + { + if ((child->GetOptions() & Options::HiddenFromDescription) != Options::None) + { + continue; + } + + auto groupDescriptions = child->GetDescription(params, indent + addindent); + descriptions.insert( + std::end(descriptions), + std::make_move_iterator(std::begin(groupDescriptions)), + std::make_move_iterator(std::end(groupDescriptions))); + } + + for (auto childDescription: subparserDescription) + { + std::get<2>(childDescription) += indent + addindent; + descriptions.push_back(std::move(childDescription)); + } + + if (params.showCommandFullHelp && !Matched()) + { + descriptions.emplace_back("", "", indent + addindent); + if (!Epilog().empty()) + { + descriptions.emplace_back(Epilog(), "", indent + addindent); + descriptions.emplace_back("", "", indent + addindent); + } + } + + return descriptions; + } + + virtual void Validate(const std::string &shortprefix, const std::string &longprefix) const override + { + if (!Matched()) + { + return; + } + + auto onValidationError = [&] + { + std::ostringstream problem; + problem << "Group validation failed somewhere!"; +#ifdef ARGS_NOEXCEPT + error = Error::Validation; + errorMsg = problem.str(); +#else + throw ValidationError(problem.str()); +#endif + }; + + for (Base *child: Children()) + { + if (child->IsGroup() && !child->Matched()) + { + onValidationError(); + } + + child->Validate(shortprefix, longprefix); + } + + if (subparser != nullptr) + { + subparser->Validate(shortprefix, longprefix); + if (!subparser->Matched()) + { + onValidationError(); + } + } + + if (selectedCommand == nullptr && commandIsRequired && (Group::HasCommand() || subparserHasCommand)) + { + std::ostringstream problem; + problem << "Command is required"; +#ifdef ARGS_NOEXCEPT + error = Error::Validation; + errorMsg = problem.str(); +#else + throw ValidationError(problem.str()); +#endif + } + } + + virtual void Reset() noexcept override + { + Group::Reset(); + selectedCommand = nullptr; + subparserProgramLine.clear(); + subparserDescription.clear(); + subparserHasFlag = false; + subparserHasPositional = false; + subparserHasCommand = false; +#ifdef ARGS_NOEXCEPT + subparserError = Error::None; +#endif + } + +#ifdef ARGS_NOEXCEPT + /// Only for ARGS_NOEXCEPT + virtual Error GetError() const override + { + if (!Matched()) + { + return Error::None; + } + + if (error != Error::None) + { + return error; + } + + if (subparserError != Error::None) + { + return subparserError; + } + + return Group::GetError(); + } +#endif + }; + + /** The main user facing command line argument parser class + */ + class ArgumentParser : public Command + { + friend class Subparser; + + private: + std::string longprefix; + std::string shortprefix; + + std::string longseparator; + + std::string terminator; + + bool allowJoinedShortValue = true; + bool allowJoinedLongValue = true; + bool allowSeparateShortValue = true; + bool allowSeparateLongValue = true; + + CompletionFlag *completion = nullptr; + bool readCompletion = false; + + protected: + enum class OptionType + { + LongFlag, + ShortFlag, + Positional + }; + + OptionType ParseOption(const std::string &s, bool allowEmpty = false) + { + if (s.find(longprefix) == 0 && (allowEmpty || s.length() > longprefix.length())) + { + return OptionType::LongFlag; + } + + if (s.find(shortprefix) == 0 && (allowEmpty || s.length() > shortprefix.length())) + { + return OptionType::ShortFlag; + } + + return OptionType::Positional; + } + + template <typename It> + bool Complete(FlagBase &flag, It it, It end) + { + auto nextIt = it; + if (!readCompletion || (++nextIt != end)) + { + return false; + } + + const auto &chunk = *it; + for (auto &choice : flag.HelpChoices(helpParams)) + { + AddCompletionReply(chunk, choice); + } + +#ifndef ARGS_NOEXCEPT + throw Completion(completion->Get()); +#else + return true; +#endif + } + + /** (INTERNAL) Parse flag's values + * + * \param arg The string to display in error message as a flag name + * \param[in, out] it The iterator to first value. It will point to the last value + * \param end The end iterator + * \param joinedArg Joined value (e.g. bar in --foo=bar) + * \param canDiscardJoined If true joined value can be parsed as flag not as a value (as in -abcd) + * \param[out] values The vector to store parsed arg's values + */ + template <typename It> + std::string ParseArgsValues(FlagBase &flag, const std::string &arg, It &it, It end, + const bool allowSeparate, const bool allowJoined, + const bool hasJoined, const std::string &joinedArg, + const bool canDiscardJoined, std::vector<std::string> &values) + { + values.clear(); + + Nargs nargs = flag.NumberOfArguments(); + + if (hasJoined && !allowJoined && nargs.min != 0) + { + return "Flag '" + arg + "' was passed a joined argument, but these are disallowed"; + } + + if (hasJoined) + { + if (!canDiscardJoined || nargs.max != 0) + { + values.push_back(joinedArg); + } + } else if (!allowSeparate) + { + if (nargs.min != 0) + { + return "Flag '" + arg + "' was passed a separate argument, but these are disallowed"; + } + } else + { + auto valueIt = it; + ++valueIt; + + while (valueIt != end && + values.size() < nargs.max && + (nargs.min == nargs.max || ParseOption(*valueIt) == OptionType::Positional)) + { + if (Complete(flag, valueIt, end)) + { + it = end; + return ""; + } + + values.push_back(*valueIt); + ++it; + ++valueIt; + } + } + + if (values.size() > nargs.max) + { + return "Passed an argument into a non-argument flag: " + arg; + } else if (values.size() < nargs.min) + { + if (nargs.min == 1 && nargs.max == 1) + { + return "Flag '" + arg + "' requires an argument but received none"; + } else if (nargs.min == 1) + { + return "Flag '" + arg + "' requires at least one argument but received none"; + } else if (nargs.min != nargs.max) + { + return "Flag '" + arg + "' requires at least " + std::to_string(nargs.min) + + " arguments but received " + std::to_string(values.size()); + } else + { + return "Flag '" + arg + "' requires " + std::to_string(nargs.min) + + " arguments but received " + std::to_string(values.size()); + } + } + + return {}; + } + + template <typename It> + bool ParseLong(It &it, It end) + { + const auto &chunk = *it; + const auto argchunk = chunk.substr(longprefix.size()); + // Try to separate it, in case of a separator: + const auto separator = longseparator.empty() ? argchunk.npos : argchunk.find(longseparator); + // If the separator is in the argument, separate it. + const auto arg = (separator != argchunk.npos ? + std::string(argchunk, 0, separator) + : argchunk); + const auto joined = (separator != argchunk.npos ? + argchunk.substr(separator + longseparator.size()) + : std::string()); + + if (auto flag = Match(arg)) + { + std::vector<std::string> values; + const std::string errorMessage = ParseArgsValues(*flag, arg, it, end, allowSeparateLongValue, allowJoinedLongValue, + separator != argchunk.npos, joined, false, values); + if (!errorMessage.empty()) + { +#ifndef ARGS_NOEXCEPT + throw ParseError(errorMessage); +#else + error = Error::Parse; + errorMsg = errorMessage; + return false; +#endif + } + + if (!readCompletion) + { + flag->ParseValue(values); + } + + if (flag->KickOut()) + { + ++it; + return false; + } + } else + { + const std::string errorMessage("Flag could not be matched: " + arg); +#ifndef ARGS_NOEXCEPT + throw ParseError(errorMessage); +#else + error = Error::Parse; + errorMsg = errorMessage; + return false; +#endif + } + + return true; + } + + template <typename It> + bool ParseShort(It &it, It end) + { + const auto &chunk = *it; + const auto argchunk = chunk.substr(shortprefix.size()); + for (auto argit = std::begin(argchunk); argit != std::end(argchunk); ++argit) + { + const auto arg = *argit; + + if (auto flag = Match(arg)) + { + const std::string value(argit + 1, std::end(argchunk)); + std::vector<std::string> values; + const std::string errorMessage = ParseArgsValues(*flag, std::string(1, arg), it, end, + allowSeparateShortValue, allowJoinedShortValue, + !value.empty(), value, !value.empty(), values); + + if (!errorMessage.empty()) + { +#ifndef ARGS_NOEXCEPT + throw ParseError(errorMessage); +#else + error = Error::Parse; + errorMsg = errorMessage; + return false; +#endif + } + + if (!readCompletion) + { + flag->ParseValue(values); + } + + if (flag->KickOut()) + { + ++it; + return false; + } + + if (!values.empty()) + { + break; + } + } else + { + const std::string errorMessage("Flag could not be matched: '" + std::string(1, arg) + "'"); +#ifndef ARGS_NOEXCEPT + throw ParseError(errorMessage); +#else + error = Error::Parse; + errorMsg = errorMessage; + return false; +#endif + } + } + + return true; + } + + bool AddCompletionReply(const std::string &cur, const std::string &choice) + { + if (cur.empty() || choice.find(cur) == 0) + { + if (completion->syntax == "bash" && ParseOption(choice) == OptionType::LongFlag && choice.find(longseparator) != std::string::npos) + { + completion->reply.push_back(choice.substr(choice.find(longseparator) + 1)); + } else + { + completion->reply.push_back(choice); + } + return true; + } + + return false; + } + + template <typename It> + bool Complete(It it, It end) + { + auto nextIt = it; + if (!readCompletion || (++nextIt != end)) + { + return false; + } + + const auto &chunk = *it; + auto pos = GetNextPositional(); + std::vector<Command *> commands = GetCommands(); + const auto optionType = ParseOption(chunk, true); + + if (!commands.empty() && (chunk.empty() || optionType == OptionType::Positional)) + { + for (auto &cmd : commands) + { + if ((cmd->GetOptions() & Options::HiddenFromCompletion) == Options::None) + { + AddCompletionReply(chunk, cmd->Name()); + } + } + } else + { + bool hasPositionalCompletion = true; + + if (!commands.empty()) + { + for (auto &cmd : commands) + { + if ((cmd->GetOptions() & Options::HiddenFromCompletion) == Options::None) + { + AddCompletionReply(chunk, cmd->Name()); + } + } + } else if (pos) + { + if ((pos->GetOptions() & Options::HiddenFromCompletion) == Options::None) + { + auto choices = pos->HelpChoices(helpParams); + hasPositionalCompletion = !choices.empty() || optionType != OptionType::Positional; + for (auto &choice : choices) + { + AddCompletionReply(chunk, choice); + } + } + } + + if (hasPositionalCompletion) + { + auto flags = GetAllFlags(); + for (auto flag : flags) + { + if ((flag->GetOptions() & Options::HiddenFromCompletion) != Options::None) + { + continue; + } + + auto &matcher = flag->GetMatcher(); + if (!AddCompletionReply(chunk, matcher.GetShortOrAny().str(shortprefix, longprefix))) + { + for (auto &flagName : matcher.GetFlagStrings()) + { + if (AddCompletionReply(chunk, flagName.str(shortprefix, longprefix))) + { + break; + } + } + } + } + + if (optionType == OptionType::LongFlag && allowJoinedLongValue) + { + const auto separator = longseparator.empty() ? chunk.npos : chunk.find(longseparator); + if (separator != chunk.npos) + { + std::string arg(chunk, 0, separator); + if (auto flag = this->Match(arg.substr(longprefix.size()))) + { + for (auto &choice : flag->HelpChoices(helpParams)) + { + AddCompletionReply(chunk, arg + longseparator + choice); + } + } + } + } else if (optionType == OptionType::ShortFlag && allowJoinedShortValue) + { + if (chunk.size() > shortprefix.size() + 1) + { + auto arg = chunk.at(shortprefix.size()); + //TODO: support -abcVALUE where a and b take no value + if (auto flag = this->Match(arg)) + { + for (auto &choice : flag->HelpChoices(helpParams)) + { + AddCompletionReply(chunk, shortprefix + arg + choice); + } + } + } + } + } + } + +#ifndef ARGS_NOEXCEPT + throw Completion(completion->Get()); +#else + return true; +#endif + } + + template <typename It> + It Parse(It begin, It end) + { + bool terminated = false; + std::vector<Command *> commands = GetCommands(); + + // Check all arg chunks + for (auto it = begin; it != end; ++it) + { + if (Complete(it, end)) + { + return end; + } + + const auto &chunk = *it; + + if (!terminated && chunk == terminator) + { + terminated = true; + } else if (!terminated && ParseOption(chunk) == OptionType::LongFlag) + { + if (!ParseLong(it, end)) + { + return it; + } + } else if (!terminated && ParseOption(chunk) == OptionType::ShortFlag) + { + if (!ParseShort(it, end)) + { + return it; + } + } else if (!terminated && !commands.empty()) + { + auto itCommand = std::find_if(commands.begin(), commands.end(), [&chunk](Command *c) { return c->Name() == chunk; }); + if (itCommand == commands.end()) + { + const std::string errorMessage("Unknown command: " + chunk); +#ifndef ARGS_NOEXCEPT + throw ParseError(errorMessage); +#else + error = Error::Parse; + errorMsg = errorMessage; + return it; +#endif + } + + SelectCommand(*itCommand); + + if (const auto &coroutine = GetCoroutine()) + { + ++it; + RaiiSubparser coro(*this, std::vector<std::string>(it, end)); + coroutine(coro.Parser()); +#ifdef ARGS_NOEXCEPT + error = GetError(); + if (error != Error::None) + { + return end; + } + + if (!coro.Parser().IsParsed()) + { + error = Error::Usage; + return end; + } +#else + if (!coro.Parser().IsParsed()) + { + throw UsageError("Subparser::Parse was not called"); + } +#endif + + break; + } + + commands = GetCommands(); + } else + { + auto pos = GetNextPositional(); + if (pos) + { + pos->ParseValue(chunk); + + if (pos->KickOut()) + { + return ++it; + } + } else + { + const std::string errorMessage("Passed in argument, but no positional arguments were ready to receive it: " + chunk); +#ifndef ARGS_NOEXCEPT + throw ParseError(errorMessage); +#else + error = Error::Parse; + errorMsg = errorMessage; + return it; +#endif + } + } + + if (!readCompletion && completion != nullptr && completion->Matched()) + { +#ifdef ARGS_NOEXCEPT + error = Error::Completion; +#endif + readCompletion = true; + ++it; + const auto argsLeft = static_cast<size_t>(std::distance(it, end)); + if (completion->cword == 0 || argsLeft <= 1 || completion->cword >= argsLeft) + { +#ifndef ARGS_NOEXCEPT + throw Completion(""); +#endif + } + + std::vector<std::string> curArgs(++it, end); + curArgs.resize(completion->cword); + + if (completion->syntax == "bash") + { + // bash tokenizes --flag=value as --flag=value + for (size_t idx = 0; idx < curArgs.size(); ) + { + if (idx > 0 && curArgs[idx] == "=") + { + curArgs[idx - 1] += "="; + // Avoid warnings from -Wsign-conversion + const auto signedIdx = static_cast<std::ptrdiff_t>(idx); + if (idx + 1 < curArgs.size()) + { + curArgs[idx - 1] += curArgs[idx + 1]; + curArgs.erase(curArgs.begin() + signedIdx, curArgs.begin() + signedIdx + 2); + } else + { + curArgs.erase(curArgs.begin() + signedIdx); + } + } else + { + ++idx; + } + } + + } +#ifndef ARGS_NOEXCEPT + try + { + Parse(curArgs.begin(), curArgs.end()); + throw Completion(""); + } + catch (Completion &) + { + throw; + } + catch (args::Error&) + { + throw Completion(""); + } +#else + return Parse(curArgs.begin(), curArgs.end()); +#endif + } + } + + Validate(shortprefix, longprefix); + return end; + } + + public: + HelpParams helpParams; + + ArgumentParser(const std::string &description_, const std::string &epilog_ = std::string()) + { + Description(description_); + Epilog(epilog_); + LongPrefix("--"); + ShortPrefix("-"); + LongSeparator("="); + Terminator("--"); + SetArgumentSeparations(true, true, true, true); + matched = true; + } + + void AddCompletion(CompletionFlag &completionFlag) + { + completion = &completionFlag; + Add(completionFlag); + } + + /** The program name for help generation + */ + const std::string &Prog() const + { return helpParams.programName; } + /** The program name for help generation + */ + void Prog(const std::string &prog_) + { this->helpParams.programName = prog_; } + + /** The prefix for long flags + */ + const std::string &LongPrefix() const + { return longprefix; } + /** The prefix for long flags + */ + void LongPrefix(const std::string &longprefix_) + { + this->longprefix = longprefix_; + this->helpParams.longPrefix = longprefix_; + } + + /** The prefix for short flags + */ + const std::string &ShortPrefix() const + { return shortprefix; } + /** The prefix for short flags + */ + void ShortPrefix(const std::string &shortprefix_) + { + this->shortprefix = shortprefix_; + this->helpParams.shortPrefix = shortprefix_; + } + + /** The separator for long flags + */ + const std::string &LongSeparator() const + { return longseparator; } + /** The separator for long flags + */ + void LongSeparator(const std::string &longseparator_) + { + if (longseparator_.empty()) + { + const std::string errorMessage("longseparator can not be set to empty"); +#ifdef ARGS_NOEXCEPT + error = Error::Usage; + errorMsg = errorMessage; +#else + throw UsageError(errorMessage); +#endif + } else + { + this->longseparator = longseparator_; + this->helpParams.longSeparator = allowJoinedLongValue ? longseparator_ : " "; + } + } + + /** The terminator that forcibly separates flags from positionals + */ + const std::string &Terminator() const + { return terminator; } + /** The terminator that forcibly separates flags from positionals + */ + void Terminator(const std::string &terminator_) + { this->terminator = terminator_; } + + /** Get the current argument separation parameters. + * + * See SetArgumentSeparations for details on what each one means. + */ + void GetArgumentSeparations( + bool &allowJoinedShortValue_, + bool &allowJoinedLongValue_, + bool &allowSeparateShortValue_, + bool &allowSeparateLongValue_) const + { + allowJoinedShortValue_ = this->allowJoinedShortValue; + allowJoinedLongValue_ = this->allowJoinedLongValue; + allowSeparateShortValue_ = this->allowSeparateShortValue; + allowSeparateLongValue_ = this->allowSeparateLongValue; + } + + /** Change allowed option separation. + * + * \param allowJoinedShortValue_ Allow a short flag that accepts an argument to be passed its argument immediately next to it (ie. in the same argv field) + * \param allowJoinedLongValue_ Allow a long flag that accepts an argument to be passed its argument separated by the longseparator (ie. in the same argv field) + * \param allowSeparateShortValue_ Allow a short flag that accepts an argument to be passed its argument separated by whitespace (ie. in the next argv field) + * \param allowSeparateLongValue_ Allow a long flag that accepts an argument to be passed its argument separated by whitespace (ie. in the next argv field) + */ + void SetArgumentSeparations( + const bool allowJoinedShortValue_, + const bool allowJoinedLongValue_, + const bool allowSeparateShortValue_, + const bool allowSeparateLongValue_) + { + this->allowJoinedShortValue = allowJoinedShortValue_; + this->allowJoinedLongValue = allowJoinedLongValue_; + this->allowSeparateShortValue = allowSeparateShortValue_; + this->allowSeparateLongValue = allowSeparateLongValue_; + + this->helpParams.longSeparator = allowJoinedLongValue ? longseparator : " "; + this->helpParams.shortSeparator = allowJoinedShortValue ? "" : " "; + } + + /** Pass the help menu into an ostream + */ + void Help(std::ostream &help_) const + { + auto &command = SelectedCommand(); + const auto &commandDescription = command.Description().empty() ? command.Help() : command.Description(); + const auto description_text = Wrap(commandDescription, helpParams.width - helpParams.descriptionindent); + const auto epilog_text = Wrap(command.Epilog(), helpParams.width - helpParams.descriptionindent); + + const bool hasoptions = command.HasFlag(); + const bool hasarguments = command.HasPositional(); + + std::vector<std::string> prognameline; + prognameline.push_back(helpParams.usageString); + prognameline.push_back(Prog()); + auto commandProgLine = command.GetProgramLine(helpParams); + prognameline.insert(prognameline.end(), commandProgLine.begin(), commandProgLine.end()); + + const auto proglines = Wrap(prognameline.begin(), prognameline.end(), + helpParams.width - (helpParams.progindent + helpParams.progtailindent), + helpParams.width - helpParams.progindent); + auto progit = std::begin(proglines); + if (progit != std::end(proglines)) + { + help_ << std::string(helpParams.progindent, ' ') << *progit << '\n'; + ++progit; + } + for (; progit != std::end(proglines); ++progit) + { + help_ << std::string(helpParams.progtailindent, ' ') << *progit << '\n'; + } + + help_ << '\n'; + + if (!description_text.empty()) + { + for (const auto &line: description_text) + { + help_ << std::string(helpParams.descriptionindent, ' ') << line << "\n"; + } + help_ << "\n"; + } + + bool lastDescriptionIsNewline = false; + + if (!helpParams.optionsString.empty()) + { + help_ << std::string(helpParams.progindent, ' ') << helpParams.optionsString << "\n\n"; + } + + for (const auto &desc: command.GetDescription(helpParams, 0)) + { + lastDescriptionIsNewline = std::get<0>(desc).empty() && std::get<1>(desc).empty(); + const auto groupindent = std::get<2>(desc) * helpParams.eachgroupindent; + const auto flags = Wrap(std::get<0>(desc), helpParams.width - (helpParams.flagindent + helpParams.helpindent + helpParams.gutter)); + const auto info = Wrap(std::get<1>(desc), helpParams.width - (helpParams.helpindent + groupindent)); + + std::string::size_type flagssize = 0; + for (auto flagsit = std::begin(flags); flagsit != std::end(flags); ++flagsit) + { + if (flagsit != std::begin(flags)) + { + help_ << '\n'; + } + help_ << std::string(groupindent + helpParams.flagindent, ' ') << *flagsit; + flagssize = Glyphs(*flagsit); + } + + auto infoit = std::begin(info); + // groupindent is on both sides of this inequality, and therefore can be removed + if ((helpParams.flagindent + flagssize + helpParams.gutter) > helpParams.helpindent || infoit == std::end(info) || helpParams.addNewlineBeforeDescription) + { + help_ << '\n'; + } else + { + // groupindent is on both sides of the minus sign, and therefore doesn't actually need to be in here + help_ << std::string(helpParams.helpindent - (helpParams.flagindent + flagssize), ' ') << *infoit << '\n'; + ++infoit; + } + for (; infoit != std::end(info); ++infoit) + { + help_ << std::string(groupindent + helpParams.helpindent, ' ') << *infoit << '\n'; + } + } + if (hasoptions && hasarguments && helpParams.showTerminator) + { + lastDescriptionIsNewline = false; + for (const auto &item: Wrap(std::string("\"") + terminator + "\" can be used to terminate flag options and force all following arguments to be treated as positional options", helpParams.width - helpParams.flagindent)) + { + help_ << std::string(helpParams.flagindent, ' ') << item << '\n'; + } + } + + if (!lastDescriptionIsNewline) + { + help_ << "\n"; + } + + for (const auto &line: epilog_text) + { + help_ << std::string(helpParams.descriptionindent, ' ') << line << "\n"; + } + } + + /** Generate a help menu as a string. + * + * \return the help text as a single string + */ + std::string Help() const + { + std::ostringstream help_; + Help(help_); + return help_.str(); + } + + virtual void Reset() noexcept override + { + Command::Reset(); + matched = true; + readCompletion = false; + } + + /** Parse all arguments. + * + * \param begin an iterator to the beginning of the argument list + * \param end an iterator to the past-the-end element of the argument list + * \return the iterator after the last parsed value. Only useful for kick-out + */ + template <typename It> + It ParseArgs(It begin, It end) + { + // Reset all Matched statuses and errors + Reset(); +#ifdef ARGS_NOEXCEPT + error = GetError(); + if (error != Error::None) + { + return end; + } +#endif + return Parse(begin, end); + } + + /** Parse all arguments. + * + * \param args an iterable of the arguments + * \return the iterator after the last parsed value. Only useful for kick-out + */ + template <typename T> + auto ParseArgs(const T &args) -> decltype(std::begin(args)) + { + return ParseArgs(std::begin(args), std::end(args)); + } + + /** Convenience function to parse the CLI from argc and argv + * + * Just assigns the program name and vectorizes arguments for passing into ParseArgs() + * + * \return whether or not all arguments were parsed. This works for detecting kick-out, but is generally useless as it can't do anything with it. + */ + bool ParseCLI(const int argc, const char * const * argv) + { + if (Prog().empty()) + { + Prog(argv[0]); + } + const std::vector<std::string> args(argv + 1, argv + argc); + return ParseArgs(args) == std::end(args); + } + + template <typename T> + bool ParseCLI(const T &args) + { + return ParseArgs(args) == std::end(args); + } + }; + + inline Command::RaiiSubparser::RaiiSubparser(ArgumentParser &parser_, std::vector<std::string> args_) + : command(parser_.SelectedCommand()), parser(std::move(args_), parser_, command, parser_.helpParams), oldSubparser(command.subparser) + { + command.subparser = &parser; + } + + inline Command::RaiiSubparser::RaiiSubparser(const Command &command_, const HelpParams &params_): command(command_), parser(command, params_), oldSubparser(command.subparser) + { + command.subparser = &parser; + } + + inline void Subparser::Parse() + { + isParsed = true; + Reset(); + command.subparserDescription = GetDescription(helpParams, 0); + command.subparserHasFlag = HasFlag(); + command.subparserHasPositional = HasPositional(); + command.subparserHasCommand = HasCommand(); + command.subparserProgramLine = GetProgramLine(helpParams); + if (parser == nullptr) + { +#ifndef ARGS_NOEXCEPT + throw args::SubparserError(); +#else + error = Error::Subparser; + return; +#endif + } + + auto it = parser->Parse(args.begin(), args.end()); + command.Validate(parser->ShortPrefix(), parser->LongPrefix()); + kicked.assign(it, args.end()); + +#ifdef ARGS_NOEXCEPT + command.subparserError = GetError(); +#endif + } + + inline std::ostream &operator<<(std::ostream &os, const ArgumentParser &parser) + { + parser.Help(os); + return os; + } + + /** Boolean argument matcher + */ + class Flag : public FlagBase + { + public: + Flag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, Options options_): FlagBase(name_, help_, std::move(matcher_), options_) + { + group_.Add(*this); + } + + Flag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const bool extraError_ = false): Flag(group_, name_, help_, std::move(matcher_), extraError_ ? Options::Single : Options::None) + { + } + + virtual ~Flag() {} + + /** Get whether this was matched + */ + bool Get() const + { + return Matched(); + } + + virtual Nargs NumberOfArguments() const noexcept override + { + return 0; + } + + virtual void ParseValue(const std::vector<std::string>&) override + { + } + }; + + /** Help flag class + * + * Works like a regular flag, but throws an instance of Help when it is matched + */ + class HelpFlag : public Flag + { + public: + HelpFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, Options options_ = {}): Flag(group_, name_, help_, std::move(matcher_), options_) {} + + virtual ~HelpFlag() {} + + virtual void ParseValue(const std::vector<std::string> &) + { +#ifdef ARGS_NOEXCEPT + error = Error::Help; + errorMsg = Name(); +#else + throw Help(Name()); +#endif + } + + /** Get whether this was matched + */ + bool Get() const noexcept + { + return Matched(); + } + }; + + /** A flag class that simply counts the number of times it's matched + */ + class CounterFlag : public Flag + { + private: + const int startcount; + int count; + + public: + CounterFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const int startcount_ = 0, Options options_ = {}): + Flag(group_, name_, help_, std::move(matcher_), options_), startcount(startcount_), count(startcount_) {} + + virtual ~CounterFlag() {} + + virtual FlagBase *Match(const EitherFlag &arg) override + { + auto me = FlagBase::Match(arg); + if (me) + { + ++count; + } + return me; + } + + /** Get the count + */ + int &Get() noexcept + { + return count; + } + + virtual void Reset() noexcept override + { + FlagBase::Reset(); + count = startcount; + } + }; + + /** A flag class that calls a function when it's matched + */ + class ActionFlag : public FlagBase + { + private: + std::function<void(const std::vector<std::string> &)> action; + Nargs nargs; + + public: + ActionFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, Nargs nargs_, std::function<void(const std::vector<std::string> &)> action_, Options options_ = {}): + FlagBase(name_, help_, std::move(matcher_), options_), action(std::move(action_)), nargs(nargs_) + { + group_.Add(*this); + } + + ActionFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, std::function<void(const std::string &)> action_, Options options_ = {}): + FlagBase(name_, help_, std::move(matcher_), options_), nargs(1) + { + group_.Add(*this); + action = [action_](const std::vector<std::string> &a) { return action_(a.at(0)); }; + } + + ActionFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, std::function<void()> action_, Options options_ = {}): + FlagBase(name_, help_, std::move(matcher_), options_), nargs(0) + { + group_.Add(*this); + action = [action_](const std::vector<std::string> &) { return action_(); }; + } + + virtual Nargs NumberOfArguments() const noexcept override + { return nargs; } + + virtual void ParseValue(const std::vector<std::string> &value) override + { action(value); } + }; + + /** A default Reader class for argument classes + * + * If destination type is assignable to std::string it uses an assignment to std::string. + * Otherwise ValueReader simply uses a std::istringstream to read into the destination type, and + * raises a ParseError if there are any characters left. + */ + struct ValueReader + { + template <typename T> + typename std::enable_if<!std::is_assignable<T, std::string>::value, bool>::type + operator ()(const std::string &name, const std::string &value, T &destination) + { + std::istringstream ss(value); + bool failed = !(ss >> destination); + + if (!failed) + { + ss >> std::ws; + } + + if (ss.rdbuf()->in_avail() > 0 || failed) + { +#ifdef ARGS_NOEXCEPT + (void)name; + return false; +#else + std::ostringstream problem; + problem << "Argument '" << name << "' received invalid value type '" << value << "'"; + throw ParseError(problem.str()); +#endif + } + return true; + } + + template <typename T> + typename std::enable_if<std::is_assignable<T, std::string>::value, bool>::type + operator()(const std::string &, const std::string &value, T &destination) + { + destination = value; + return true; + } + }; + + /** An argument-accepting flag class + * + * \tparam T the type to extract the argument as + * \tparam Reader The functor type used to read the argument, taking the name, value, and destination reference with operator(), and returning a bool (if ARGS_NOEXCEPT is defined) + */ + template < + typename T, + typename Reader = ValueReader> + class ValueFlag : public ValueFlagBase + { + protected: + T value; + T defaultValue; + + virtual std::string GetDefaultString(const HelpParams&) const override + { + return detail::ToString(defaultValue); + } + + private: + Reader reader; + + public: + + ValueFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const T &defaultValue_, Options options_): ValueFlagBase(name_, help_, std::move(matcher_), options_), value(defaultValue_), defaultValue(defaultValue_) + { + group_.Add(*this); + } + + ValueFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const T &defaultValue_ = T(), const bool extraError_ = false): ValueFlag(group_, name_, help_, std::move(matcher_), defaultValue_, extraError_ ? Options::Single : Options::None) + { + } + + ValueFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, Options options_): ValueFlag(group_, name_, help_, std::move(matcher_), T(), options_) + { + } + + virtual ~ValueFlag() {} + + virtual void ParseValue(const std::vector<std::string> &values_) override + { + const std::string &value_ = values_.at(0); + +#ifdef ARGS_NOEXCEPT + if (!reader(name, value_, this->value)) + { + error = Error::Parse; + } +#else + reader(name, value_, this->value); +#endif + } + + virtual void Reset() noexcept override + { + ValueFlagBase::Reset(); + value = defaultValue; + } + + /** Get the value + */ + T &Get() noexcept + { + return value; + } + + /** Get the default value + */ + const T &GetDefault() noexcept + { + return defaultValue; + } + }; + + /** An optional argument-accepting flag class + * + * \tparam T the type to extract the argument as + * \tparam Reader The functor type used to read the argument, taking the name, value, and destination reference with operator(), and returning a bool (if ARGS_NOEXCEPT is defined) + */ + template < + typename T, + typename Reader = ValueReader> + class ImplicitValueFlag : public ValueFlag<T, Reader> + { + protected: + T implicitValue; + + public: + + ImplicitValueFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const T &implicitValue_, const T &defaultValue_ = T(), Options options_ = {}) + : ValueFlag<T, Reader>(group_, name_, help_, std::move(matcher_), defaultValue_, options_), implicitValue(implicitValue_) + { + } + + ImplicitValueFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const T &defaultValue_ = T(), Options options_ = {}) + : ValueFlag<T, Reader>(group_, name_, help_, std::move(matcher_), defaultValue_, options_), implicitValue(defaultValue_) + { + } + + ImplicitValueFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, Options options_) + : ValueFlag<T, Reader>(group_, name_, help_, std::move(matcher_), {}, options_), implicitValue() + { + } + + virtual ~ImplicitValueFlag() {} + + virtual Nargs NumberOfArguments() const noexcept override + { + return {0, 1}; + } + + virtual void ParseValue(const std::vector<std::string> &value_) override + { + if (value_.empty()) + { + this->value = implicitValue; + } else + { + ValueFlag<T, Reader>::ParseValue(value_); + } + } + }; + + /** A variadic arguments accepting flag class + * + * \tparam T the type to extract the argument as + * \tparam List the list type that houses the values + * \tparam Reader The functor type used to read the argument, taking the name, value, and destination reference with operator(), and returning a bool (if ARGS_NOEXCEPT is defined) + */ + template < + typename T, + template <typename...> class List = detail::vector, + typename Reader = ValueReader> + class NargsValueFlag : public FlagBase + { + protected: + + List<T> values; + const List<T> defaultValues; + Nargs nargs; + Reader reader; + + public: + + typedef List<T> Container; + typedef T value_type; + typedef typename Container::allocator_type allocator_type; + typedef typename Container::pointer pointer; + typedef typename Container::const_pointer const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef typename Container::size_type size_type; + typedef typename Container::difference_type difference_type; + typedef typename Container::iterator iterator; + typedef typename Container::const_iterator const_iterator; + typedef std::reverse_iterator<iterator> reverse_iterator; + typedef std::reverse_iterator<const_iterator> const_reverse_iterator; + + NargsValueFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, Nargs nargs_, const List<T> &defaultValues_ = {}, Options options_ = {}) + : FlagBase(name_, help_, std::move(matcher_), options_), values(defaultValues_), defaultValues(defaultValues_),nargs(nargs_) + { + group_.Add(*this); + } + + virtual ~NargsValueFlag() {} + + virtual Nargs NumberOfArguments() const noexcept override + { + return nargs; + } + + virtual void ParseValue(const std::vector<std::string> &values_) override + { + values.clear(); + + for (const std::string &value : values_) + { + T v; +#ifdef ARGS_NOEXCEPT + if (!reader(name, value, v)) + { + error = Error::Parse; + } +#else + reader(name, value, v); +#endif + values.insert(std::end(values), v); + } + } + + List<T> &Get() noexcept + { + return values; + } + + iterator begin() noexcept + { + return values.begin(); + } + + const_iterator begin() const noexcept + { + return values.begin(); + } + + const_iterator cbegin() const noexcept + { + return values.cbegin(); + } + + iterator end() noexcept + { + return values.end(); + } + + const_iterator end() const noexcept + { + return values.end(); + } + + const_iterator cend() const noexcept + { + return values.cend(); + } + + virtual void Reset() noexcept override + { + FlagBase::Reset(); + values = defaultValues; + } + + virtual FlagBase *Match(const EitherFlag &arg) override + { + const bool wasMatched = Matched(); + auto me = FlagBase::Match(arg); + if (me && !wasMatched) + { + values.clear(); + } + return me; + } + }; + + /** An argument-accepting flag class that pushes the found values into a list + * + * \tparam T the type to extract the argument as + * \tparam List the list type that houses the values + * \tparam Reader The functor type used to read the argument, taking the name, value, and destination reference with operator(), and returning a bool (if ARGS_NOEXCEPT is defined) + */ + template < + typename T, + template <typename...> class List = detail::vector, + typename Reader = ValueReader> + class ValueFlagList : public ValueFlagBase + { + private: + using Container = List<T>; + Container values; + const Container defaultValues; + Reader reader; + + public: + + typedef T value_type; + typedef typename Container::allocator_type allocator_type; + typedef typename Container::pointer pointer; + typedef typename Container::const_pointer const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef typename Container::size_type size_type; + typedef typename Container::difference_type difference_type; + typedef typename Container::iterator iterator; + typedef typename Container::const_iterator const_iterator; + typedef std::reverse_iterator<iterator> reverse_iterator; + typedef std::reverse_iterator<const_iterator> const_reverse_iterator; + + ValueFlagList(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const Container &defaultValues_ = Container(), Options options_ = {}): + ValueFlagBase(name_, help_, std::move(matcher_), options_), values(defaultValues_), defaultValues(defaultValues_) + { + group_.Add(*this); + } + + virtual ~ValueFlagList() {} + + virtual void ParseValue(const std::vector<std::string> &values_) override + { + const std::string &value_ = values_.at(0); + + T v; +#ifdef ARGS_NOEXCEPT + if (!reader(name, value_, v)) + { + error = Error::Parse; + } +#else + reader(name, value_, v); +#endif + values.insert(std::end(values), v); + } + + /** Get the values + */ + Container &Get() noexcept + { + return values; + } + + virtual std::string Name() const override + { + return name + std::string("..."); + } + + virtual void Reset() noexcept override + { + ValueFlagBase::Reset(); + values = defaultValues; + } + + virtual FlagBase *Match(const EitherFlag &arg) override + { + const bool wasMatched = Matched(); + auto me = FlagBase::Match(arg); + if (me && !wasMatched) + { + values.clear(); + } + return me; + } + + iterator begin() noexcept + { + return values.begin(); + } + + const_iterator begin() const noexcept + { + return values.begin(); + } + + const_iterator cbegin() const noexcept + { + return values.cbegin(); + } + + iterator end() noexcept + { + return values.end(); + } + + const_iterator end() const noexcept + { + return values.end(); + } + + const_iterator cend() const noexcept + { + return values.cend(); + } + }; + + /** A mapping value flag class + * + * \tparam K the type to extract the argument as + * \tparam T the type to store the result as + * \tparam Reader The functor type used to read the argument, taking the name, value, and destination reference with operator(), and returning a bool (if ARGS_NOEXCEPT is defined) + * \tparam Map The Map type. Should operate like std::map or std::unordered_map + */ + template < + typename K, + typename T, + typename Reader = ValueReader, + template <typename...> class Map = detail::unordered_map> + class MapFlag : public ValueFlagBase + { + private: + const Map<K, T> map; + T value; + const T defaultValue; + Reader reader; + + protected: + virtual std::vector<std::string> GetChoicesStrings(const HelpParams &) const override + { + return detail::MapKeysToStrings(map); + } + + public: + + MapFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const Map<K, T> &map_, const T &defaultValue_, Options options_): ValueFlagBase(name_, help_, std::move(matcher_), options_), map(map_), value(defaultValue_), defaultValue(defaultValue_) + { + group_.Add(*this); + } + + MapFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const Map<K, T> &map_, const T &defaultValue_ = T(), const bool extraError_ = false): MapFlag(group_, name_, help_, std::move(matcher_), map_, defaultValue_, extraError_ ? Options::Single : Options::None) + { + } + + MapFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const Map<K, T> &map_, Options options_): MapFlag(group_, name_, help_, std::move(matcher_), map_, T(), options_) + { + } + + virtual ~MapFlag() {} + + virtual void ParseValue(const std::vector<std::string> &values_) override + { + const std::string &value_ = values_.at(0); + + K key; +#ifdef ARGS_NOEXCEPT + if (!reader(name, value_, key)) + { + error = Error::Parse; + } +#else + reader(name, value_, key); +#endif + auto it = map.find(key); + if (it == std::end(map)) + { + std::ostringstream problem; + problem << "Could not find key '" << key << "' in map for arg '" << name << "'"; +#ifdef ARGS_NOEXCEPT + error = Error::Map; + errorMsg = problem.str(); +#else + throw MapError(problem.str()); +#endif + } else + { + this->value = it->second; + } + } + + /** Get the value + */ + T &Get() noexcept + { + return value; + } + + virtual void Reset() noexcept override + { + ValueFlagBase::Reset(); + value = defaultValue; + } + }; + + /** A mapping value flag list class + * + * \tparam K the type to extract the argument as + * \tparam T the type to store the result as + * \tparam List the list type that houses the values + * \tparam Reader The functor type used to read the argument, taking the name, value, and destination reference with operator(), and returning a bool (if ARGS_NOEXCEPT is defined) + * \tparam Map The Map type. Should operate like std::map or std::unordered_map + */ + template < + typename K, + typename T, + template <typename...> class List = detail::vector, + typename Reader = ValueReader, + template <typename...> class Map = detail::unordered_map> + class MapFlagList : public ValueFlagBase + { + private: + using Container = List<T>; + const Map<K, T> map; + Container values; + const Container defaultValues; + Reader reader; + + protected: + virtual std::vector<std::string> GetChoicesStrings(const HelpParams &) const override + { + return detail::MapKeysToStrings(map); + } + + public: + typedef T value_type; + typedef typename Container::allocator_type allocator_type; + typedef typename Container::pointer pointer; + typedef typename Container::const_pointer const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef typename Container::size_type size_type; + typedef typename Container::difference_type difference_type; + typedef typename Container::iterator iterator; + typedef typename Container::const_iterator const_iterator; + typedef std::reverse_iterator<iterator> reverse_iterator; + typedef std::reverse_iterator<const_iterator> const_reverse_iterator; + + MapFlagList(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const Map<K, T> &map_, const Container &defaultValues_ = Container()): ValueFlagBase(name_, help_, std::move(matcher_)), map(map_), values(defaultValues_), defaultValues(defaultValues_) + { + group_.Add(*this); + } + + virtual ~MapFlagList() {} + + virtual void ParseValue(const std::vector<std::string> &values_) override + { + const std::string &value = values_.at(0); + + K key; +#ifdef ARGS_NOEXCEPT + if (!reader(name, value, key)) + { + error = Error::Parse; + } +#else + reader(name, value, key); +#endif + auto it = map.find(key); + if (it == std::end(map)) + { + std::ostringstream problem; + problem << "Could not find key '" << key << "' in map for arg '" << name << "'"; +#ifdef ARGS_NOEXCEPT + error = Error::Map; + errorMsg = problem.str(); +#else + throw MapError(problem.str()); +#endif + } else + { + this->values.emplace_back(it->second); + } + } + + /** Get the value + */ + Container &Get() noexcept + { + return values; + } + + virtual std::string Name() const override + { + return name + std::string("..."); + } + + virtual void Reset() noexcept override + { + ValueFlagBase::Reset(); + values = defaultValues; + } + + virtual FlagBase *Match(const EitherFlag &arg) override + { + const bool wasMatched = Matched(); + auto me = FlagBase::Match(arg); + if (me && !wasMatched) + { + values.clear(); + } + return me; + } + + iterator begin() noexcept + { + return values.begin(); + } + + const_iterator begin() const noexcept + { + return values.begin(); + } + + const_iterator cbegin() const noexcept + { + return values.cbegin(); + } + + iterator end() noexcept + { + return values.end(); + } + + const_iterator end() const noexcept + { + return values.end(); + } + + const_iterator cend() const noexcept + { + return values.cend(); + } + }; + + /** A positional argument class + * + * \tparam T the type to extract the argument as + * \tparam Reader The functor type used to read the argument, taking the name, value, and destination reference with operator(), and returning a bool (if ARGS_NOEXCEPT is defined) + */ + template < + typename T, + typename Reader = ValueReader> + class Positional : public PositionalBase + { + private: + T value; + const T defaultValue; + Reader reader; + public: + Positional(Group &group_, const std::string &name_, const std::string &help_, const T &defaultValue_ = T(), Options options_ = {}): PositionalBase(name_, help_, options_), value(defaultValue_), defaultValue(defaultValue_) + { + group_.Add(*this); + } + + Positional(Group &group_, const std::string &name_, const std::string &help_, Options options_): Positional(group_, name_, help_, T(), options_) + { + } + + virtual ~Positional() {} + + virtual void ParseValue(const std::string &value_) override + { +#ifdef ARGS_NOEXCEPT + if (!reader(name, value_, this->value)) + { + error = Error::Parse; + } +#else + reader(name, value_, this->value); +#endif + ready = false; + matched = true; + } + + /** Get the value + */ + T &Get() noexcept + { + return value; + } + + virtual void Reset() noexcept override + { + PositionalBase::Reset(); + value = defaultValue; + } + }; + + /** A positional argument class that pushes the found values into a list + * + * \tparam T the type to extract the argument as + * \tparam List the list type that houses the values + * \tparam Reader The functor type used to read the argument, taking the name, value, and destination reference with operator(), and returning a bool (if ARGS_NOEXCEPT is defined) + */ + template < + typename T, + template <typename...> class List = detail::vector, + typename Reader = ValueReader> + class PositionalList : public PositionalBase + { + private: + using Container = List<T>; + Container values; + const Container defaultValues; + Reader reader; + + public: + typedef T value_type; + typedef typename Container::allocator_type allocator_type; + typedef typename Container::pointer pointer; + typedef typename Container::const_pointer const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef typename Container::size_type size_type; + typedef typename Container::difference_type difference_type; + typedef typename Container::iterator iterator; + typedef typename Container::const_iterator const_iterator; + typedef std::reverse_iterator<iterator> reverse_iterator; + typedef std::reverse_iterator<const_iterator> const_reverse_iterator; + + PositionalList(Group &group_, const std::string &name_, const std::string &help_, const Container &defaultValues_ = Container(), Options options_ = {}): PositionalBase(name_, help_, options_), values(defaultValues_), defaultValues(defaultValues_) + { + group_.Add(*this); + } + + PositionalList(Group &group_, const std::string &name_, const std::string &help_, Options options_): PositionalList(group_, name_, help_, {}, options_) + { + } + + virtual ~PositionalList() {} + + virtual void ParseValue(const std::string &value_) override + { + T v; +#ifdef ARGS_NOEXCEPT + if (!reader(name, value_, v)) + { + error = Error::Parse; + } +#else + reader(name, value_, v); +#endif + values.insert(std::end(values), v); + matched = true; + } + + virtual std::string Name() const override + { + return name + std::string("..."); + } + + /** Get the values + */ + Container &Get() noexcept + { + return values; + } + + virtual void Reset() noexcept override + { + PositionalBase::Reset(); + values = defaultValues; + } + + virtual PositionalBase *GetNextPositional() override + { + const bool wasMatched = Matched(); + auto me = PositionalBase::GetNextPositional(); + if (me && !wasMatched) + { + values.clear(); + } + return me; + } + + iterator begin() noexcept + { + return values.begin(); + } + + const_iterator begin() const noexcept + { + return values.begin(); + } + + const_iterator cbegin() const noexcept + { + return values.cbegin(); + } + + iterator end() noexcept + { + return values.end(); + } + + const_iterator end() const noexcept + { + return values.end(); + } + + const_iterator cend() const noexcept + { + return values.cend(); + } + }; + + /** A positional argument mapping class + * + * \tparam K the type to extract the argument as + * \tparam T the type to store the result as + * \tparam Reader The functor type used to read the argument, taking the name, value, and destination reference with operator(), and returning a bool (if ARGS_NOEXCEPT is defined) + * \tparam Map The Map type. Should operate like std::map or std::unordered_map + */ + template < + typename K, + typename T, + typename Reader = ValueReader, + template <typename...> class Map = detail::unordered_map> + class MapPositional : public PositionalBase + { + private: + const Map<K, T> map; + T value; + const T defaultValue; + Reader reader; + + protected: + virtual std::vector<std::string> GetChoicesStrings(const HelpParams &) const override + { + return detail::MapKeysToStrings(map); + } + + public: + + MapPositional(Group &group_, const std::string &name_, const std::string &help_, const Map<K, T> &map_, const T &defaultValue_ = T(), Options options_ = {}): + PositionalBase(name_, help_, options_), map(map_), value(defaultValue_), defaultValue(defaultValue_) + { + group_.Add(*this); + } + + virtual ~MapPositional() {} + + virtual void ParseValue(const std::string &value_) override + { + K key; +#ifdef ARGS_NOEXCEPT + if (!reader(name, value_, key)) + { + error = Error::Parse; + } +#else + reader(name, value_, key); +#endif + auto it = map.find(key); + if (it == std::end(map)) + { + std::ostringstream problem; + problem << "Could not find key '" << key << "' in map for arg '" << name << "'"; +#ifdef ARGS_NOEXCEPT + error = Error::Map; + errorMsg = problem.str(); +#else + throw MapError(problem.str()); +#endif + } else + { + this->value = it->second; + ready = false; + matched = true; + } + } + + /** Get the value + */ + T &Get() noexcept + { + return value; + } + + virtual void Reset() noexcept override + { + PositionalBase::Reset(); + value = defaultValue; + } + }; + + /** A positional argument mapping list class + * + * \tparam K the type to extract the argument as + * \tparam T the type to store the result as + * \tparam List the list type that houses the values + * \tparam Reader The functor type used to read the argument, taking the name, value, and destination reference with operator(), and returning a bool (if ARGS_NOEXCEPT is defined) + * \tparam Map The Map type. Should operate like std::map or std::unordered_map + */ + template < + typename K, + typename T, + template <typename...> class List = detail::vector, + typename Reader = ValueReader, + template <typename...> class Map = detail::unordered_map> + class MapPositionalList : public PositionalBase + { + private: + using Container = List<T>; + + const Map<K, T> map; + Container values; + const Container defaultValues; + Reader reader; + + protected: + virtual std::vector<std::string> GetChoicesStrings(const HelpParams &) const override + { + return detail::MapKeysToStrings(map); + } + + public: + typedef T value_type; + typedef typename Container::allocator_type allocator_type; + typedef typename Container::pointer pointer; + typedef typename Container::const_pointer const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef typename Container::size_type size_type; + typedef typename Container::difference_type difference_type; + typedef typename Container::iterator iterator; + typedef typename Container::const_iterator const_iterator; + typedef std::reverse_iterator<iterator> reverse_iterator; + typedef std::reverse_iterator<const_iterator> const_reverse_iterator; + + MapPositionalList(Group &group_, const std::string &name_, const std::string &help_, const Map<K, T> &map_, const Container &defaultValues_ = Container(), Options options_ = {}): + PositionalBase(name_, help_, options_), map(map_), values(defaultValues_), defaultValues(defaultValues_) + { + group_.Add(*this); + } + + virtual ~MapPositionalList() {} + + virtual void ParseValue(const std::string &value_) override + { + K key; +#ifdef ARGS_NOEXCEPT + if (!reader(name, value_, key)) + { + error = Error::Parse; + } +#else + reader(name, value_, key); +#endif + auto it = map.find(key); + if (it == std::end(map)) + { + std::ostringstream problem; + problem << "Could not find key '" << key << "' in map for arg '" << name << "'"; +#ifdef ARGS_NOEXCEPT + error = Error::Map; + errorMsg = problem.str(); +#else + throw MapError(problem.str()); +#endif + } else + { + this->values.emplace_back(it->second); + matched = true; + } + } + + /** Get the value + */ + Container &Get() noexcept + { + return values; + } + + virtual std::string Name() const override + { + return name + std::string("..."); + } + + virtual void Reset() noexcept override + { + PositionalBase::Reset(); + values = defaultValues; + } + + virtual PositionalBase *GetNextPositional() override + { + const bool wasMatched = Matched(); + auto me = PositionalBase::GetNextPositional(); + if (me && !wasMatched) + { + values.clear(); + } + return me; + } + + iterator begin() noexcept + { + return values.begin(); + } + + const_iterator begin() const noexcept + { + return values.begin(); + } + + const_iterator cbegin() const noexcept + { + return values.cbegin(); + } + + iterator end() noexcept + { + return values.end(); + } + + const_iterator end() const noexcept + { + return values.end(); + } + + const_iterator cend() const noexcept + { + return values.cend(); + } + }; +} + +#endif
CTPUG__wafer-657
icalendar 5.0 breaks the tests With icalendar 5.0, the test_ics_view test fails with ``` File "/home/runner/work/wafer/wafer/wafer/schedule/tests/test_views.py", line 1526, in test_ics_view 20 self.assertEqual(event['dtstart'].params['value'], 'DATE-TIME') 21 File "/opt/hostedtoolcache/Python/3.7.15/x64/lib/python3.7/site-packages/icalendar/caselessdict.py", line 40, in __getitem__ 22 return super().__getitem__(key.upper()) 23 KeyError: 'VALUE' ``` but it works fine with 4.1 There's nothing obvious in the icalendar changelog about this behaviour change, so more investriagtion is needed.
[ { "content": "from glob import glob\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\nREQUIRES = [\n 'Django>=2.2,<5',\n 'bleach',\n 'bleach-allowlist',\n 'diff-match-patch',\n 'django-bakery>=0.13.0',\n 'django-crispy-forms',\n 'django-markitup>=4.0.0',\n 'django-registration-redux',\n 'django-reversion',\n 'django-select2',\n 'djangorestframework',\n 'drf-extensions>=0.5.0',\n 'icalendar>=4.0,<5.0',\n 'jsonfield',\n 'markdown>=2.5',\n 'pillow',\n 'py3dns',\n 'pyLibravatar',\n 'pytz',\n 'requests',\n]\n\nSOURCES = []\n\n\nwith open('README.rst', 'r') as f:\n long_description = f.read()\n\n\ndef compile_translations():\n try:\n subprocess.check_call(['./manage.py', 'compilemessages'])\n except subprocess.CalledProcessError:\n print(\"WARNING: cannot compile translations.\")\n return glob('wafer/locale/*/LC_MESSAGES/django.mo')\n\n\nsetup(\n name=\"wafer\",\n version=\"0.14.1a\",\n url='http://github.com/CTPUG/wafer',\n license='ISC',\n description=\"A wafer-thin Django library for running small conferences.\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n author='CTPUG',\n author_email='[email protected]',\n packages=find_packages(),\n include_package_data=True,\n install_requires=REQUIRES,\n dependency_links=SOURCES,\n data_files=[\n ('locale', compile_translations()),\n ],\n setup_requires=[\n # Add setuptools-git, so we get correct behaviour for\n # include_package_data\n 'setuptools_git >= 1.0',\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: ISC License (ISCL)',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Framework :: Django',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "from glob import glob\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\nREQUIRES = [\n 'Django>=2.2,<5',\n 'bleach',\n 'bleach-allowlist',\n 'diff-match-patch',\n 'django-bakery>=0.13.0',\n 'django-crispy-forms',\n 'django-markitup>=4.0.0',\n 'django-registration-redux',\n 'django-reversion',\n 'django-select2',\n 'djangorestframework',\n 'drf-extensions>=0.5.0',\n 'icalendar>=4.0',\n 'jsonfield',\n 'markdown>=2.5',\n 'pillow',\n 'py3dns',\n 'pyLibravatar',\n 'pytz',\n 'requests',\n]\n\nSOURCES = []\n\n\nwith open('README.rst', 'r') as f:\n long_description = f.read()\n\n\ndef compile_translations():\n try:\n subprocess.check_call(['./manage.py', 'compilemessages'])\n except subprocess.CalledProcessError:\n print(\"WARNING: cannot compile translations.\")\n return glob('wafer/locale/*/LC_MESSAGES/django.mo')\n\n\nsetup(\n name=\"wafer\",\n version=\"0.14.1a\",\n url='http://github.com/CTPUG/wafer',\n license='ISC',\n description=\"A wafer-thin Django library for running small conferences.\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n author='CTPUG',\n author_email='[email protected]',\n packages=find_packages(),\n include_package_data=True,\n install_requires=REQUIRES,\n dependency_links=SOURCES,\n data_files=[\n ('locale', compile_translations()),\n ],\n setup_requires=[\n # Add setuptools-git, so we get correct behaviour for\n # include_package_data\n 'setuptools_git >= 1.0',\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: ISC License (ISCL)',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Framework :: Django',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 94d66192..ff944903 100644 --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ 'django-select2', 'djangorestframework', 'drf-extensions>=0.5.0', - 'icalendar>=4.0,<5.0', + 'icalendar>=4.0', 'jsonfield', 'markdown>=2.5', 'pillow', diff --git a/wafer/schedule/tests/test_views.py b/wafer/schedule/tests/test_views.py index 8656e727..0f1e1cdd 100644 --- a/wafer/schedule/tests/test_views.py +++ b/wafer/schedule/tests/test_views.py @@ -1523,7 +1523,6 @@ def test_ics_view(self): self.assertEqual(len(calendar.walk(name='VEVENT')), 9) # Check we have the right time in places event = calendar.walk(name='VEVENT')[0] - self.assertEqual(event['dtstart'].params['value'], 'DATE-TIME') self.assertEqual(event['dtstart'].dt, D.datetime(2013, 9, 22, 10, 0, 0, tzinfo=D.timezone.utc)) # Check that we have the page slug in the ical event self.assertTrue('/test0/' in event['url'])
translate__translate-4646
rc file parser doesn't treat all whitespace similarly e.g. res.rc ``` IDD_DIALOG DIALOG 0, 0, 340, 180 CAPTION "Caption" BEGIN LTEXT "Right",IDC_STATIC_HEADER,7,0,258,8,NOT WS_GROUP LTEXT "Wrong",IDC_STATIC_HEADER ,7,0,258,8,NOT WS_GROUP END ``` running `rc2po res.rc res.po`, produces res.po containing: ``` #: DIALOG.IDD_DIALOG.CAPTION msgid "Caption" msgstr "" #: DIALOG.IDD_DIALOG.LTEXT.IDC_STATIC_HEADER msgid "Right" msgstr "" #: DIALOG.IDD_DIALOG.LTEXT.[%27IDC_STATIC_HEADER%27] msgid "Wrong" msgstr "" ```
[ { "content": "#\n# Copyright 2004-2006,2008-2009 Zuza Software Foundation\n#\n# This file is part of the Translate Toolkit.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Classes that hold units of .rc files (:class:`rcunit`) or entire files\n(:class:`rcfile`) used in translating Windows Resources.\n\n.. note:::\n\n This implementation is based mostly on observing WINE .rc files,\n these should mimic other non-WINE .rc files.\n\"\"\"\n\nimport re\n\nfrom pyparsing import (\n AtLineStart,\n Combine,\n Forward,\n Group,\n Keyword,\n OneOrMore,\n Optional,\n SkipTo,\n Word,\n ZeroOrMore,\n alphanums,\n alphas,\n c_style_comment,\n delimited_list,\n nums,\n quoted_string,\n rest_of_line,\n)\nfrom pyparsing.common import pyparsing_common\n\nfrom translate.storage import base\n\n\ndef escape_to_python(string):\n \"\"\"Escape a given .rc string into a valid Python string.\"\"\"\n pystring = re.sub('\"\\\\s*\\\\\\\\\\n\\\\s*\"', \"\", string) # xxx\"\\n\"xxx line continuation\n pystring = re.sub(\"\\\\\\\\\\\\\\n\", \"\", pystring) # backslash newline line continuation\n pystring = re.sub(\n \"\\\\\\\\n\", \"\\n\", pystring\n ) # Convert escaped newline to a real newline\n pystring = re.sub(\"\\\\\\\\t\", \"\\t\", pystring) # Convert escape tab to a real tab\n pystring = re.sub(\n \"\\\\\\\\\\\\\\\\\", \"\\\\\\\\\", pystring\n ) # Convert escape backslash to a real escaped backslash\n return pystring\n\n\ndef extract_text(values):\n result = []\n for value in values:\n if isinstance(value, str) and value.startswith('\"'):\n result.append(escape_to_python(value[1:-1]))\n else:\n break\n return \"\".join(result)\n\n\ndef extract_id(values):\n for value in values:\n if isinstance(value, str) and value.startswith('\"'):\n continue\n else:\n if isinstance(value, str):\n return value\n break\n\n return \"UNKNOWN_ID\"\n\n\ndef escape_to_rc(string):\n \"\"\"Escape a given Python string into a valid .rc string.\"\"\"\n rcstring = re.sub(\"\\\\\\\\\", \"\\\\\\\\\\\\\\\\\", string)\n rcstring = re.sub(\"\\t\", \"\\\\\\\\t\", rcstring)\n rcstring = re.sub(\"\\n\", \"\\\\\\\\n\", rcstring)\n return rcstring\n\n\nclass rcunit(base.TranslationUnit):\n \"\"\"A unit of an rc file\"\"\"\n\n def __init__(self, source=\"\", **kwargs):\n \"\"\"Construct a blank rcunit.\"\"\"\n super().__init__(source)\n self.name = \"\"\n self._value = \"\"\n self.comments = []\n self.source = source\n self.match = None\n\n @property\n def source(self):\n return self._value\n\n @source.setter\n def source(self, source):\n \"\"\"Sets the source AND the target to be equal\"\"\"\n self._rich_source = None\n self._value = source or \"\"\n\n @property\n def target(self):\n return self.source\n\n @target.setter\n def target(self, target):\n \"\"\".. note:: This also sets the ``.source`` attribute!\"\"\"\n self._rich_target = None\n self.source = target\n\n def __str__(self):\n \"\"\"Convert to a string.\"\"\"\n return self.getoutput()\n\n def getoutput(self):\n \"\"\"Convert the element back into formatted lines for a .rc file.\"\"\"\n if self.isblank():\n return \"\".join(self.comments + [\"\\n\"])\n else:\n return \"\".join(self.comments + [f\"{self.name}={self._value}\\n\"])\n\n def getlocations(self):\n return [self.name]\n\n def addnote(self, text, origin=None, position=\"append\"):\n self.comments.append(text)\n\n def getnotes(self, origin=None):\n return \"\\n\".join(self.comments)\n\n def removenotes(self, origin=None):\n self.comments = []\n\n def isblank(self):\n \"\"\"Returns whether this is a blank element, containing only comments.\"\"\"\n return not (self.name or self.value)\n\n\ndef rc_statement():\n \"\"\"\n Generate a RC statement parser that can be used to parse a RC file\n\n :rtype: pyparsing.ParserElement\n \"\"\"\n\n one_line_comment = \"//\" + rest_of_line\n\n comments = c_style_comment ^ one_line_comment\n\n precompiler = AtLineStart(Word(\"#\", alphanums) + rest_of_line)\n\n language_definition = (\n \"LANGUAGE\"\n + Word(alphas + \"_\").set_results_name(\"language\")\n + Optional(\",\" + Word(alphas + \"_\").set_results_name(\"sublanguage\"))\n )\n\n block_start = (Keyword(\"{\") | Keyword(\"BEGIN\")).set_name(\"block_start\")\n block_end = (Keyword(\"}\") | Keyword(\"END\")).set_name(\"block_end\")\n\n name_id = Group(Word(alphas, alphanums + \"_\")).set_name(\"name_id\")\n\n numbers = Word(nums)\n\n integerconstant = numbers ^ Combine(\"0x\" + numbers)\n\n constant = Combine(\n Optional(Keyword(\"NOT\")) + (name_id | integerconstant),\n adjacent=False,\n join_string=\" \",\n )\n\n combined_constants = delimited_list(constant, \"|\")\n\n concatenated_string = OneOrMore(quoted_string)\n\n block_options = Optional(\n SkipTo(Keyword(\"CAPTION\"), fail_on=block_start)(\"pre_caption\")\n + Keyword(\"CAPTION\")\n + quoted_string(\"caption\")\n ) + SkipTo(block_start)(\"post_caption\")\n\n undefined_control = (\n Group(\n name_id.set_results_name(\"id_control\")\n + delimited_list(\n concatenated_string ^ constant ^ numbers ^ Group(combined_constants)\n ).set_results_name(\"values_\")\n )\n | comments\n )\n\n block = (\n block_start\n + ZeroOrMore(undefined_control, stop_on=block_end)(\"controls\")\n + block_end\n )\n\n dialog = (\n name_id(\"block_id\")\n + (Keyword(\"DIALOGEX\") | Keyword(\"DIALOG\"))(\"block_type\")\n + block_options\n + block\n )\n\n string_table = Keyword(\"STRINGTABLE\")(\"block_type\") + block_options + block\n\n menu_item = Keyword(\"MENUITEM\")(\"block_type\") + (\n pyparsing_common.comma_separated_list(\"values_\") | Keyword(\"SEPARATOR\")\n )\n\n popup_block = Forward()\n\n popup_block <<= Group(\n Keyword(\"POPUP\")(\"block_type\")\n + Optional(quoted_string(\"caption\"))\n + block_start\n + ZeroOrMore(Group(menu_item | popup_block), stop_on=block_end)(\"elements\")\n + block_end\n )(\"popups*\")\n\n menu = (\n name_id(\"block_id\")\n + Keyword(\"MENU\")(\"block_type\")\n + block_options\n + block_start\n + ZeroOrMore(popup_block, stop_on=block_end)\n + block_end\n )\n\n return comments ^ precompiler ^ language_definition ^ dialog ^ string_table ^ menu\n\n\ndef generate_stringtable_name(identifier):\n \"\"\"Return the name generated for a stringtable element.\"\"\"\n return \"STRINGTABLE.\" + identifier\n\n\ndef generate_menu_pre_name(block_type, block_id):\n \"\"\"Return the pre-name generated for elements of a menu.\"\"\"\n return f\"{block_type}.{block_id}\"\n\n\ndef generate_popup_pre_name(pre_name, caption):\n \"\"\"Return the pre-name generated for subelements of a popup.\n\n :param pre_name: The pre_name that already have the popup.\n :param caption: The caption (whitout quotes) of the popup.\n\n :return: The subelements pre-name based in the pre-name of the popup and\n its caption.\n \"\"\"\n return \"{}.{}\".format(pre_name, caption.replace(\" \", \"_\"))\n\n\ndef generate_popup_caption_name(pre_name):\n \"\"\"Return the name generated for a caption of a popup.\"\"\"\n return \"%s.POPUP.CAPTION\" % (pre_name)\n\n\ndef generate_menuitem_name(pre_name, block_type, identifier):\n \"\"\"Return the name generated for a menuitem of a popup.\"\"\"\n return f\"{pre_name}.{block_type}.{identifier}\"\n\n\ndef generate_dialog_caption_name(block_type, identifier):\n \"\"\"Return the name generated for a caption of a dialog.\"\"\"\n return \"{}.{}.{}\".format(block_type, identifier, \"CAPTION\")\n\n\ndef generate_dialog_control_name(block_type, block_id, control_type, identifier):\n \"\"\"Return the name generated for a control of a dialog.\"\"\"\n return f\"{block_type}.{block_id}.{control_type}.{identifier}\"\n\n\ndef parse_encoding_pragma(pragma):\n pragma = pragma.strip()\n codepage = pragma.split(\"(\")[1].split(\")\")[0].strip()\n if codepage == \"65001\":\n return \"utf-8\"\n if len(codepage) == 4:\n return f\"cp{codepage}\"\n return None\n\n\nclass rcfile(base.TranslationStore):\n \"\"\"This class represents a .rc file, made up of rcunits.\"\"\"\n\n UnitClass = rcunit\n default_encoding = \"cp1252\"\n\n def __init__(self, inputfile=None, lang=None, sublang=None, **kwargs):\n \"\"\"Construct an rcfile, optionally reading in from inputfile.\"\"\"\n super().__init__(**kwargs)\n self.filename = getattr(inputfile, \"name\", \"\")\n self.lang = lang\n self.sublang = sublang\n if inputfile is not None:\n rcsrc = inputfile.read()\n inputfile.close()\n self.parse(rcsrc)\n\n def add_popup_units(self, pre_name, popup):\n \"\"\"Transverses the popup tree making new units as needed.\"\"\"\n\n if popup.caption:\n newunit = rcunit(escape_to_python(popup.caption[1:-1]))\n newunit.name = generate_popup_caption_name(pre_name)\n newunit.match = popup\n self.addunit(newunit)\n\n for element in popup.elements:\n\n if element.block_type and element.block_type == \"MENUITEM\":\n\n if element.values_ and len(element.values_) >= 2:\n newtext = extract_text(element.values_)\n if newtext:\n newunit = rcunit(newtext)\n newunit.name = generate_menuitem_name(\n pre_name, element.block_type, extract_id(element.values_)\n )\n newunit.match = element\n self.addunit(newunit)\n # Else it can be a separator.\n elif element.popups:\n for sub_popup in element.popups:\n self.add_popup_units(\n generate_popup_pre_name(pre_name, popup.caption[1:-1]),\n sub_popup,\n )\n\n def parse(self, rcsrc, encoding=\"auto\"):\n \"\"\"Read the source of a .rc file in and include them as units.\"\"\"\n self.encoding = encoding\n if encoding != \"auto\":\n decoded = rcsrc.decode(encoding)\n elif b\"\\000\" in rcsrc[:2]:\n self.encoding = \"utf-16-le\"\n decoded = rcsrc.decode(self.encoding)\n else:\n decoded, self.encoding = self.detect_encoding(\n rcsrc, default_encodings=[self.default_encoding]\n )\n\n decoded = decoded.replace(\"\\r\", \"\")\n\n # Parse the strings into a structure.\n results = rc_statement().search_string(decoded)\n\n processblocks = True\n\n for statement in results:\n # Parse pragma\n if statement[0] == \"#pragma\" and \"code_page\" in statement[1]:\n expected_encoding = parse_encoding_pragma(statement[1])\n if expected_encoding and expected_encoding != self.encoding:\n self.units = []\n self.parse(rcsrc, expected_encoding)\n return\n if statement.language:\n\n if self.lang is None or statement.language == self.lang:\n if self.sublang is None or statement.sublanguage == self.sublang:\n self.lang = statement.language\n self.sublang = statement.sublanguage\n processblocks = True\n else:\n processblocks = False\n else:\n processblocks = False\n continue\n\n if processblocks and statement.block_type:\n\n if statement.block_type in (\"DIALOG\", \"DIALOGEX\"):\n\n if statement.caption:\n newunit = rcunit(escape_to_python(statement.caption[1:-1]))\n newunit.name = generate_dialog_caption_name(\n statement.block_type, statement.block_id[0]\n )\n newunit.match = statement\n self.addunit(newunit)\n\n for control in statement.controls:\n if isinstance(control, str):\n # This is a comment\n continue\n if control.id_control[0] in (\n \"AUTOCHECKBOX\",\n \"AUTORADIOBUTTON\",\n \"CAPTION\",\n \"CHECKBOX\",\n \"CTEXT\",\n \"CONTROL\",\n \"DEFPUSHBUTTON\",\n \"GROUPBOX\",\n \"LTEXT\",\n \"PUSHBUTTON\",\n \"RADIOBUTTON\",\n \"RTEXT\",\n ) and (\n control.values_[0].startswith('\"')\n or control.values_[0].startswith(\"'\")\n ):\n\n # The first value without quoted chars.\n newtext = extract_text(control.values_)\n if newtext:\n newunit = rcunit(newtext)\n newunit.name = generate_dialog_control_name(\n statement.block_type,\n statement.block_id[0],\n control.id_control[0],\n extract_id(control.values_),\n )\n newunit.match = control\n self.addunit(newunit)\n\n continue\n\n if statement.block_type in (\"MENU\"):\n\n pre_name = generate_menu_pre_name(\n statement.block_type, statement.block_id[0]\n )\n\n for popup in statement.popups:\n\n self.add_popup_units(pre_name, popup)\n\n continue\n\n if statement.block_type in (\"STRINGTABLE\"):\n\n for text in statement.controls:\n if isinstance(text, str):\n # This is a comment\n continue\n\n newtext = extract_text(text.values_)\n if newtext:\n newunit = rcunit(newtext)\n newunit.name = generate_stringtable_name(text.id_control[0])\n newunit.match = text\n self.addunit(newunit)\n\n continue\n\n def serialize(self, out):\n \"\"\"Write the units back to file.\"\"\"\n out.write((\"\".join(self.blocks)).encode(self.encoding))\n", "path": "translate/storage/rc.py" } ]
[ { "content": "#\n# Copyright 2004-2006,2008-2009 Zuza Software Foundation\n#\n# This file is part of the Translate Toolkit.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Classes that hold units of .rc files (:class:`rcunit`) or entire files\n(:class:`rcfile`) used in translating Windows Resources.\n\n.. note:::\n\n This implementation is based mostly on observing WINE .rc files,\n these should mimic other non-WINE .rc files.\n\"\"\"\n\nimport re\n\nfrom pyparsing import (\n AtLineStart,\n Combine,\n Forward,\n Group,\n Keyword,\n OneOrMore,\n Optional,\n SkipTo,\n Word,\n ZeroOrMore,\n alphanums,\n alphas,\n c_style_comment,\n delimited_list,\n nums,\n quoted_string,\n rest_of_line,\n)\nfrom pyparsing.common import pyparsing_common\n\nfrom translate.storage import base\n\n\ndef escape_to_python(string):\n \"\"\"Escape a given .rc string into a valid Python string.\"\"\"\n pystring = re.sub('\"\\\\s*\\\\\\\\\\n\\\\s*\"', \"\", string) # xxx\"\\n\"xxx line continuation\n pystring = re.sub(\"\\\\\\\\\\\\\\n\", \"\", pystring) # backslash newline line continuation\n pystring = re.sub(\n \"\\\\\\\\n\", \"\\n\", pystring\n ) # Convert escaped newline to a real newline\n pystring = re.sub(\"\\\\\\\\t\", \"\\t\", pystring) # Convert escape tab to a real tab\n pystring = re.sub(\n \"\\\\\\\\\\\\\\\\\", \"\\\\\\\\\", pystring\n ) # Convert escape backslash to a real escaped backslash\n return pystring\n\n\ndef extract_text(values):\n result = []\n for value in values:\n if isinstance(value, str) and value.startswith('\"'):\n result.append(escape_to_python(value[1:-1]))\n else:\n break\n return \"\".join(result)\n\n\ndef extract_id(values):\n for value in values:\n if isinstance(value, str) and value.startswith('\"'):\n continue\n else:\n if isinstance(value, str):\n return value\n break\n\n return \"UNKNOWN_ID\"\n\n\ndef escape_to_rc(string):\n \"\"\"Escape a given Python string into a valid .rc string.\"\"\"\n rcstring = re.sub(\"\\\\\\\\\", \"\\\\\\\\\\\\\\\\\", string)\n rcstring = re.sub(\"\\t\", \"\\\\\\\\t\", rcstring)\n rcstring = re.sub(\"\\n\", \"\\\\\\\\n\", rcstring)\n return rcstring\n\n\nclass rcunit(base.TranslationUnit):\n \"\"\"A unit of an rc file\"\"\"\n\n def __init__(self, source=\"\", **kwargs):\n \"\"\"Construct a blank rcunit.\"\"\"\n super().__init__(source)\n self.name = \"\"\n self._value = \"\"\n self.comments = []\n self.source = source\n self.match = None\n\n @property\n def source(self):\n return self._value\n\n @source.setter\n def source(self, source):\n \"\"\"Sets the source AND the target to be equal\"\"\"\n self._rich_source = None\n self._value = source or \"\"\n\n @property\n def target(self):\n return self.source\n\n @target.setter\n def target(self, target):\n \"\"\".. note:: This also sets the ``.source`` attribute!\"\"\"\n self._rich_target = None\n self.source = target\n\n def __str__(self):\n \"\"\"Convert to a string.\"\"\"\n return self.getoutput()\n\n def getoutput(self):\n \"\"\"Convert the element back into formatted lines for a .rc file.\"\"\"\n if self.isblank():\n return \"\".join(self.comments + [\"\\n\"])\n else:\n return \"\".join(self.comments + [f\"{self.name}={self._value}\\n\"])\n\n def getlocations(self):\n return [self.name]\n\n def addnote(self, text, origin=None, position=\"append\"):\n self.comments.append(text)\n\n def getnotes(self, origin=None):\n return \"\\n\".join(self.comments)\n\n def removenotes(self, origin=None):\n self.comments = []\n\n def isblank(self):\n \"\"\"Returns whether this is a blank element, containing only comments.\"\"\"\n return not (self.name or self.value)\n\n\ndef rc_statement():\n \"\"\"\n Generate a RC statement parser that can be used to parse a RC file\n\n :rtype: pyparsing.ParserElement\n \"\"\"\n\n one_line_comment = \"//\" + rest_of_line\n\n comments = c_style_comment ^ one_line_comment\n\n precompiler = AtLineStart(Word(\"#\", alphanums) + rest_of_line)\n\n language_definition = (\n \"LANGUAGE\"\n + Word(alphas + \"_\").set_results_name(\"language\")\n + Optional(\",\" + Word(alphas + \"_\").set_results_name(\"sublanguage\"))\n )\n\n block_start = (Keyword(\"{\") | Keyword(\"BEGIN\")).set_name(\"block_start\")\n block_end = (Keyword(\"}\") | Keyword(\"END\")).set_name(\"block_end\")\n\n name_id = Group(Word(alphas, alphanums + \"_\")).set_name(\"name_id\")\n\n numbers = Word(nums)\n\n integerconstant = numbers ^ Combine(\"0x\" + numbers)\n\n constant = Combine(\n Optional(Keyword(\"NOT\")) + (name_id | integerconstant),\n adjacent=False,\n join_string=\" \",\n )\n\n combined_constants = delimited_list(constant, \"|\", min=2)\n\n concatenated_string = OneOrMore(quoted_string)\n\n block_options = Optional(\n SkipTo(Keyword(\"CAPTION\"), fail_on=block_start)(\"pre_caption\")\n + Keyword(\"CAPTION\")\n + quoted_string(\"caption\")\n ) + SkipTo(block_start)(\"post_caption\")\n\n undefined_control = (\n Group(\n name_id.set_results_name(\"id_control\")\n + delimited_list(\n concatenated_string ^ constant ^ numbers ^ Group(combined_constants)\n ).set_results_name(\"values_\")\n )\n | comments\n )\n\n block = (\n block_start\n + ZeroOrMore(undefined_control, stop_on=block_end)(\"controls\")\n + block_end\n )\n\n dialog = (\n name_id(\"block_id\")\n + (Keyword(\"DIALOGEX\") | Keyword(\"DIALOG\"))(\"block_type\")\n + block_options\n + block\n )\n\n string_table = Keyword(\"STRINGTABLE\")(\"block_type\") + block_options + block\n\n menu_item = Keyword(\"MENUITEM\")(\"block_type\") + (\n pyparsing_common.comma_separated_list(\"values_\") | Keyword(\"SEPARATOR\")\n )\n\n popup_block = Forward()\n\n popup_block <<= Group(\n Keyword(\"POPUP\")(\"block_type\")\n + Optional(quoted_string(\"caption\"))\n + block_start\n + ZeroOrMore(Group(menu_item | popup_block), stop_on=block_end)(\"elements\")\n + block_end\n )(\"popups*\")\n\n menu = (\n name_id(\"block_id\")\n + Keyword(\"MENU\")(\"block_type\")\n + block_options\n + block_start\n + ZeroOrMore(popup_block, stop_on=block_end)\n + block_end\n )\n\n return comments ^ precompiler ^ language_definition ^ dialog ^ string_table ^ menu\n\n\ndef generate_stringtable_name(identifier):\n \"\"\"Return the name generated for a stringtable element.\"\"\"\n return \"STRINGTABLE.\" + identifier\n\n\ndef generate_menu_pre_name(block_type, block_id):\n \"\"\"Return the pre-name generated for elements of a menu.\"\"\"\n return f\"{block_type}.{block_id}\"\n\n\ndef generate_popup_pre_name(pre_name, caption):\n \"\"\"Return the pre-name generated for subelements of a popup.\n\n :param pre_name: The pre_name that already have the popup.\n :param caption: The caption (whitout quotes) of the popup.\n\n :return: The subelements pre-name based in the pre-name of the popup and\n its caption.\n \"\"\"\n return \"{}.{}\".format(pre_name, caption.replace(\" \", \"_\"))\n\n\ndef generate_popup_caption_name(pre_name):\n \"\"\"Return the name generated for a caption of a popup.\"\"\"\n return \"%s.POPUP.CAPTION\" % (pre_name)\n\n\ndef generate_menuitem_name(pre_name, block_type, identifier):\n \"\"\"Return the name generated for a menuitem of a popup.\"\"\"\n return f\"{pre_name}.{block_type}.{identifier}\"\n\n\ndef generate_dialog_caption_name(block_type, identifier):\n \"\"\"Return the name generated for a caption of a dialog.\"\"\"\n return \"{}.{}.{}\".format(block_type, identifier, \"CAPTION\")\n\n\ndef generate_dialog_control_name(block_type, block_id, control_type, identifier):\n \"\"\"Return the name generated for a control of a dialog.\"\"\"\n return f\"{block_type}.{block_id}.{control_type}.{identifier}\"\n\n\ndef parse_encoding_pragma(pragma):\n pragma = pragma.strip()\n codepage = pragma.split(\"(\")[1].split(\")\")[0].strip()\n if codepage == \"65001\":\n return \"utf-8\"\n if len(codepage) == 4:\n return f\"cp{codepage}\"\n return None\n\n\nclass rcfile(base.TranslationStore):\n \"\"\"This class represents a .rc file, made up of rcunits.\"\"\"\n\n UnitClass = rcunit\n default_encoding = \"cp1252\"\n\n def __init__(self, inputfile=None, lang=None, sublang=None, **kwargs):\n \"\"\"Construct an rcfile, optionally reading in from inputfile.\"\"\"\n super().__init__(**kwargs)\n self.filename = getattr(inputfile, \"name\", \"\")\n self.lang = lang\n self.sublang = sublang\n if inputfile is not None:\n rcsrc = inputfile.read()\n inputfile.close()\n self.parse(rcsrc)\n\n def add_popup_units(self, pre_name, popup):\n \"\"\"Transverses the popup tree making new units as needed.\"\"\"\n\n if popup.caption:\n newunit = rcunit(escape_to_python(popup.caption[1:-1]))\n newunit.name = generate_popup_caption_name(pre_name)\n newunit.match = popup\n self.addunit(newunit)\n\n for element in popup.elements:\n\n if element.block_type and element.block_type == \"MENUITEM\":\n\n if element.values_ and len(element.values_) >= 2:\n newtext = extract_text(element.values_)\n if newtext:\n newunit = rcunit(newtext)\n newunit.name = generate_menuitem_name(\n pre_name, element.block_type, extract_id(element.values_)\n )\n newunit.match = element\n self.addunit(newunit)\n # Else it can be a separator.\n elif element.popups:\n for sub_popup in element.popups:\n self.add_popup_units(\n generate_popup_pre_name(pre_name, popup.caption[1:-1]),\n sub_popup,\n )\n\n def parse(self, rcsrc, encoding=\"auto\"):\n \"\"\"Read the source of a .rc file in and include them as units.\"\"\"\n self.encoding = encoding\n if encoding != \"auto\":\n decoded = rcsrc.decode(encoding)\n elif b\"\\000\" in rcsrc[:2]:\n self.encoding = \"utf-16-le\"\n decoded = rcsrc.decode(self.encoding)\n else:\n decoded, self.encoding = self.detect_encoding(\n rcsrc, default_encodings=[self.default_encoding]\n )\n\n decoded = decoded.replace(\"\\r\", \"\")\n\n # Parse the strings into a structure.\n results = rc_statement().search_string(decoded)\n\n processblocks = True\n\n for statement in results:\n # Parse pragma\n if statement[0] == \"#pragma\" and \"code_page\" in statement[1]:\n expected_encoding = parse_encoding_pragma(statement[1])\n if expected_encoding and expected_encoding != self.encoding:\n self.units = []\n self.parse(rcsrc, expected_encoding)\n return\n if statement.language:\n\n if self.lang is None or statement.language == self.lang:\n if self.sublang is None or statement.sublanguage == self.sublang:\n self.lang = statement.language\n self.sublang = statement.sublanguage\n processblocks = True\n else:\n processblocks = False\n else:\n processblocks = False\n continue\n\n if processblocks and statement.block_type:\n\n if statement.block_type in (\"DIALOG\", \"DIALOGEX\"):\n\n if statement.caption:\n newunit = rcunit(escape_to_python(statement.caption[1:-1]))\n newunit.name = generate_dialog_caption_name(\n statement.block_type, statement.block_id[0]\n )\n newunit.match = statement\n self.addunit(newunit)\n\n for control in statement.controls:\n if isinstance(control, str):\n # This is a comment\n continue\n if control.id_control[0] in (\n \"AUTOCHECKBOX\",\n \"AUTORADIOBUTTON\",\n \"CAPTION\",\n \"CHECKBOX\",\n \"CTEXT\",\n \"CONTROL\",\n \"DEFPUSHBUTTON\",\n \"GROUPBOX\",\n \"LTEXT\",\n \"PUSHBUTTON\",\n \"RADIOBUTTON\",\n \"RTEXT\",\n ) and (\n control.values_[0].startswith('\"')\n or control.values_[0].startswith(\"'\")\n ):\n\n # The first value without quoted chars.\n newtext = extract_text(control.values_)\n if newtext:\n newunit = rcunit(newtext)\n newunit.name = generate_dialog_control_name(\n statement.block_type,\n statement.block_id[0],\n control.id_control[0],\n extract_id(control.values_),\n )\n newunit.match = control\n self.addunit(newunit)\n\n continue\n\n if statement.block_type in (\"MENU\"):\n\n pre_name = generate_menu_pre_name(\n statement.block_type, statement.block_id[0]\n )\n\n for popup in statement.popups:\n\n self.add_popup_units(pre_name, popup)\n\n continue\n\n if statement.block_type in (\"STRINGTABLE\"):\n\n for text in statement.controls:\n if isinstance(text, str):\n # This is a comment\n continue\n\n newtext = extract_text(text.values_)\n if newtext:\n newunit = rcunit(newtext)\n newunit.name = generate_stringtable_name(text.id_control[0])\n newunit.match = text\n self.addunit(newunit)\n\n continue\n\n def serialize(self, out):\n \"\"\"Write the units back to file.\"\"\"\n out.write((\"\".join(self.blocks)).encode(self.encoding))\n", "path": "translate/storage/rc.py" } ]
diff --git a/translate/storage/rc.py b/translate/storage/rc.py index f3fd76a0c1..4c4a2e2f75 100644 --- a/translate/storage/rc.py +++ b/translate/storage/rc.py @@ -189,7 +189,7 @@ def rc_statement(): join_string=" ", ) - combined_constants = delimited_list(constant, "|") + combined_constants = delimited_list(constant, "|", min=2) concatenated_string = OneOrMore(quoted_string) diff --git a/translate/storage/test_rc.py b/translate/storage/test_rc.py index 463524e642..52a095f8f2 100644 --- a/translate/storage/test_rc.py +++ b/translate/storage/test_rc.py @@ -523,3 +523,22 @@ def test_textinclude_appstudio(self): assert len(rc_file.units) == 2 assert rc_file.units[0].source == "Copied" assert rc_file.units[1].source == "Other" + + def test_id_whitespace(self): + rc_source = """ +IDD_DIALOG DIALOG 0, 0, 340, 180 +CAPTION "Caption" +BEGIN + LTEXT "Right",IDC_STATIC_HEADER,7,0,258,8,NOT WS_GROUP + LTEXT "Wrong",IDC_STATIC_HEADER2 + ,7,0,258,8,NOT WS_GROUP +END +""" + rc_file = self.source_parse(rc_source, encoding="utf-16") + assert len(rc_file.units) == 3 + assert rc_file.units[0].source == "Caption" + assert rc_file.units[0].name == "DIALOG.IDD_DIALOG.CAPTION" + assert rc_file.units[1].source == "Right" + assert rc_file.units[1].name == "DIALOG.IDD_DIALOG.LTEXT.IDC_STATIC_HEADER" + assert rc_file.units[2].source == "Wrong" + assert rc_file.units[2].name == "DIALOG.IDD_DIALOG.LTEXT.IDC_STATIC_HEADER2"
streamlit__streamlit-1682
Error during exception handling in st.write - TypeError: exception() takes 3 positional arguments but 4 were given # Summary I'm trying to display a dataframe with `st.write` and it fails with a streamlit error while trying to handle an error from rendering the object. ```python TypeError: exception() takes 3 positional arguments but 4 were given Traceback: File "/home/bs3639/.conda/envs/bosch/lib/python3.6/site-packages/streamlit/ScriptRunner.py", line 322, in _run_script exec(code, module.__dict__) File "/scratch/bs3639/bosch-urban-sound/boschurbansnd/app.py", line 256, in <module> data_summary(dflabels, dffiles) File "/scratch/bs3639/bosch-urban-sound/boschurbansnd/app.py", line 130, in data_summary st.write(dffiles.reset_index(drop=True).head()) File "/home/bs3639/.conda/envs/bosch/lib/python3.6/site-packages/streamlit/__init__.py", line 411, in write exception(exc, exc_tb) # noqa: F821 File "/home/bs3639/.conda/envs/bosch/lib/python3.6/site-packages/streamlit/DeltaGenerator.py", line 122, in wrapped_method return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) File "/home/bs3639/.conda/envs/bosch/lib/python3.6/site-packages/streamlit/DeltaGenerator.py", line 367, in _enqueue_new_element_delta rv = marshall_element(msg.delta.new_element) File "/home/bs3639/.conda/envs/bosch/lib/python3.6/site-packages/streamlit/DeltaGenerator.py", line 120, in marshall_element return method(dg, element, *args, **kwargs) ``` # Steps to reproduce It happens when you raise an exception during type conversion in `st.write`. Here's the simplest example I could think of that throws the same error. ```python import streamlit as st class breakstuff: def __str__(self): raise ValueError st.write(breakstuff()) ``` ## Expected behavior: It should display the `ValueError` ## Actual behavior: Instead it throws a streamlit internal `TypeError` error (see above traceback). It means that I can't actually debug the exception that is throwing that code. # Debug info - Streamlit version: `Streamlit, version 0.60.0` (get it with `$ streamlit version`) - Python version: `Python 3.6.10 :: Anaconda, Inc.` (get it with `$ python --version`) - Using Conda? PipEnv? PyEnv? Pex? - Conda - OS version: - Browser version: # Additional information From a quick spin following the traceback, I believe I can see the issue. The traceback says that the error originates here: https://github.com/streamlit/streamlit/blob/a9be6773eceba186c4094908c5403575810d760a/lib/streamlit/DeltaGenerator.py#L120 The wrapper provides 2 args and it says it's receiving 4 while expecting 3, so that means that the wrapped method is being called with 2 instead of an expected 1 argument. Earlier in the traceback, it says that it's being raised in `st.write` by `exception` (notice it's being called with 2 arguments): https://github.com/streamlit/streamlit/blob/a9be6773eceba186c4094908c5403575810d760a/lib/streamlit/__init__.py#L409-L411 Looking at its definition, `exception` is wrapped with `_with_element` and takes 3 arguments, 2 of which are provided by the wrapper. https://github.com/streamlit/streamlit/blob/a9be6773eceba186c4094908c5403575810d760a/lib/streamlit/DeltaGenerator.py#L788-L789 tl;dr - you probably shouldn't be passing the traceback into the exception function (or perhaps you meant to and you haven't finished implementing it on the other side.) Either way, it's broken rn. # Possible Solution? From a cursory glance, it looks like you should just simplify to this and this issue will go away: ```python # change here: https://github.com/streamlit/streamlit/blob/a9be6773eceba186c4094908c5403575810d760a/lib/streamlit/__init__.py#L409-L411 try: ... except Exception as exc: exception(exc) ```
[ { "content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Streamlit.\n\nHow to use Streamlit in 3 seconds:\n\n 1. Write an app\n >>> import streamlit as st\n >>> st.write(anything_you_want)\n\n 2. Run your app\n $ streamlit run my_script.py\n\n 3. Use your app\n A new tab will open on your browser. That's your Streamlit app!\n\n 4. Modify your code, save it, and watch changes live on your browser.\n\nTake a look at the other commands in this module to find out what else\nStreamlit can do:\n\n >>> dir(streamlit)\n\nOr try running our \"Hello World\":\n\n $ streamlit hello\n\nFor more detailed info, see https://docs.streamlit.io.\n\"\"\"\n\n# IMPORTANT: Prefix with an underscore anything that the user shouldn't see.\n\n# NOTE: You'll see lots of \"noqa: F821\" in this file. That's because we\n# manually mess with the local namespace so the linter can't know that some\n# identifiers actually exist in the namespace.\n\n# Must be at the top, to avoid circular dependency.\nfrom streamlit import logger as _logger\nfrom streamlit import config as _config\n\n_LOGGER = _logger.get_logger(\"root\")\n\n# Give the package a version.\nimport pkg_resources as _pkg_resources\nimport uuid as _uuid\nimport subprocess\nimport platform\nimport os\nfrom typing import Any, List, Tuple, Type\n\n# This used to be pkg_resources.require('streamlit') but it would cause\n# pex files to fail. See #394 for more details.\n__version__ = _pkg_resources.get_distribution(\"streamlit\").version\n\n# Deterministic Unique Streamlit User ID\nif (\n platform.system() == \"Linux\"\n and os.path.isfile(\"/etc/machine-id\") == False\n and os.path.isfile(\"/var/lib/dbus/machine-id\") == False\n):\n print(\"Generate machine-id\")\n subprocess.run([\"sudo\", \"dbus-uuidgen\", \"--ensure\"])\n\nmachine_id = str(_uuid.getnode())\nif os.path.isfile(\"/etc/machine-id\"):\n with open(\"/etc/machine-id\", \"r\") as f:\n machine_id = f.read()\nelif os.path.isfile(\"/var/lib/dbus/machine-id\"):\n with open(\"/var/lib/dbus/machine-id\", \"r\") as f:\n machine_id = f.read()\n\n__installation_id__ = str(_uuid.uuid5(_uuid.NAMESPACE_DNS, machine_id))\n\n\nimport contextlib as _contextlib\nimport re as _re\nimport sys as _sys\nimport textwrap as _textwrap\nimport threading as _threading\nimport traceback as _traceback\nimport types as _types\nimport json as _json\nimport numpy as _np\n\nfrom streamlit import code_util as _code_util\nfrom streamlit import env_util as _env_util\nfrom streamlit import source_util as _source_util\nfrom streamlit import string_util as _string_util\nfrom streamlit import type_util as _type_util\nfrom streamlit.DeltaGenerator import DeltaGenerator as _DeltaGenerator\nfrom streamlit.ReportThread import add_report_ctx as _add_report_ctx\nfrom streamlit.ReportThread import get_report_ctx as _get_report_ctx\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.proto import BlockPath_pb2 as _BlockPath_pb2\nfrom streamlit.util import functools_wraps as _functools_wraps\n\n# Modules that the user should have access to. These are imported with \"as\"\n# syntax pass mypy checking with implicit_reexport disabled.\nfrom streamlit.caching import cache as cache # noqa: F401\n\n# This is set to True inside cli._main_run(), and is False otherwise.\n# If False, we should assume that DeltaGenerator functions are effectively\n# no-ops, and adapt gracefully.\n_is_running_with_streamlit = False\n\n\ndef _set_log_level():\n _logger.set_log_level(_config.get_option(\"global.logLevel\").upper())\n _logger.init_tornado_logs()\n\n\n# Make this file only depend on config option in an asynchronous manner. This\n# avoids a race condition when another file (such as a test file) tries to pass\n# in an alternative config.\n_config.on_config_parsed(_set_log_level, True)\n\n\n_main = _DeltaGenerator(container=_BlockPath_pb2.BlockPath.MAIN)\nsidebar = _DeltaGenerator(container=_BlockPath_pb2.BlockPath.SIDEBAR)\n\n# DeltaGenerator methods:\n\naltair_chart = _main.altair_chart # noqa: E221\narea_chart = _main.area_chart # noqa: E221\naudio = _main.audio # noqa: E221\nballoons = _main.balloons # noqa: E221\nbar_chart = _main.bar_chart # noqa: E221\nbokeh_chart = _main.bokeh_chart # noqa: E221\nbutton = _main.button # noqa: E221\ncheckbox = _main.checkbox # noqa: E221\ncode = _main.code # noqa: E221\ndataframe = _main.dataframe # noqa: E221\ndate_input = _main.date_input # noqa: E221\ndeck_gl_chart = _main.deck_gl_chart # noqa: E221\npydeck_chart = _main.pydeck_chart # noqa: E221\nempty = _main.empty # noqa: E221\nerror = _main.error # noqa: E221\nexception = _main.exception # noqa: E221\nbeta_set_favicon = _main.favicon # noqa: E221\nfile_uploader = _main.file_uploader # noqa: E221\ngraphviz_chart = _main.graphviz_chart # noqa: E221\nheader = _main.header # noqa: E221\nhelp = _main.help # noqa: E221\nimage = _main.image # noqa: E221\ninfo = _main.info # noqa: E221\njson = _main.json # noqa: E221\nlatex = _main.latex # noqa: E221\nline_chart = _main.line_chart # noqa: E221\nmap = _main.map # noqa: E221\nmarkdown = _main.markdown # noqa: E221\nmultiselect = _main.multiselect # noqa: E221\nnumber_input = _main.number_input # noqa: E221\nplotly_chart = _main.plotly_chart # noqa: E221\nprogress = _main.progress # noqa: E221\npyplot = _main.pyplot # noqa: E221\nradio = _main.radio # noqa: E221\nselectbox = _main.selectbox # noqa: E221\nslider = _main.slider # noqa: E221\nsubheader = _main.subheader # noqa: E221\nsuccess = _main.success # noqa: E221\ntable = _main.table # noqa: E221\ntext = _main.text # noqa: E221\ntext_area = _main.text_area # noqa: E221\ntext_input = _main.text_input # noqa: E221\ntime_input = _main.time_input # noqa: E221\ntitle = _main.title # noqa: E221\nvega_lite_chart = _main.vega_lite_chart # noqa: E221\nvideo = _main.video # noqa: E221\nwarning = _main.warning # noqa: E221\nbeta_color_picker = _main.beta_color_picker # noqa: E221\n\n# Config\n\nget_option = _config.get_option\n\n\ndef set_option(key, value):\n \"\"\"Set config option.\n\n Currently, only two config options can be set within the script itself:\n * client.caching\n * client.displayEnabled\n\n Calling with any other options will raise StreamlitAPIException.\n\n Run `streamlit config show` in the terminal to see all available options.\n\n Parameters\n ----------\n key : str\n The config option key of the form \"section.optionName\". To see all\n available options, run `streamlit config show` on a terminal.\n\n value\n The new value to assign to this config option.\n\n \"\"\"\n opt = _config._config_options[key]\n if opt.scriptable:\n _config.set_option(key, value)\n return\n\n raise StreamlitAPIException(\n \"{key} cannot be set on the fly. Set as command line option, e.g. streamlit run script.py --{key}, or in config.toml instead.\".format(\n key=key\n )\n )\n\n\n# Special methods:\n\n_HELP_TYPES = (\n _types.BuiltinFunctionType,\n _types.BuiltinMethodType,\n _types.FunctionType,\n _types.MethodType,\n _types.ModuleType,\n) # type: Tuple[Type[Any], ...]\n\n\ndef write(*args, **kwargs):\n \"\"\"Write arguments to the app.\n\n This is the Swiss Army knife of Streamlit commands: it does different\n things depending on what you throw at it. Unlike other Streamlit commands,\n write() has some unique properties:\n\n 1. You can pass in multiple arguments, all of which will be written.\n 2. Its behavior depends on the input types as follows.\n 3. It returns None, so it's \"slot\" in the App cannot be reused.\n\n Parameters\n ----------\n *args : any\n One or many objects to print to the App.\n\n Arguments are handled as follows:\n\n - write(string) : Prints the formatted Markdown string, with\n support for LaTeX expression and emoji shortcodes.\n See docs for st.markdown for more.\n - write(data_frame) : Displays the DataFrame as a table.\n - write(error) : Prints an exception specially.\n - write(func) : Displays information about a function.\n - write(module) : Displays information about the module.\n - write(dict) : Displays dict in an interactive widget.\n - write(obj) : The default is to print str(obj).\n - write(mpl_fig) : Displays a Matplotlib figure.\n - write(altair) : Displays an Altair chart.\n - write(keras) : Displays a Keras model.\n - write(graphviz) : Displays a Graphviz graph.\n - write(plotly_fig) : Displays a Plotly figure.\n - write(bokeh_fig) : Displays a Bokeh figure.\n - write(sympy_expr) : Prints SymPy expression using LaTeX.\n\n unsafe_allow_html : bool\n This is a keyword-only argument that defaults to False.\n\n By default, any HTML tags found in strings will be escaped and\n therefore treated as pure text. This behavior may be turned off by\n setting this argument to True.\n\n That said, *we strongly advise* against it*. It is hard to write secure\n HTML, so by using this argument you may be compromising your users'\n security. For more information, see:\n\n https://github.com/streamlit/streamlit/issues/152\n\n **Also note that `unsafe_allow_html` is a temporary measure and may be\n removed from Streamlit at any time.**\n\n If you decide to turn on HTML anyway, we ask you to please tell us your\n exact use case here:\n https://discuss.streamlit.io/t/96 .\n\n This will help us come up with safe APIs that allow you to do what you\n want.\n\n Example\n -------\n\n Its simplest use case is to draw Markdown-formatted text, whenever the\n input is a string:\n\n >>> write('Hello, *World!* :sunglasses:')\n\n .. output::\n https://share.streamlit.io/0.50.2-ZWk9/index.html?id=Pn5sjhgNs4a8ZbiUoSTRxE\n height: 50px\n\n As mentioned earlier, `st.write()` also accepts other data formats, such as\n numbers, data frames, styled data frames, and assorted objects:\n\n >>> st.write(1234)\n >>> st.write(pd.DataFrame({\n ... 'first column': [1, 2, 3, 4],\n ... 'second column': [10, 20, 30, 40],\n ... }))\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=FCp9AMJHwHRsWSiqMgUZGD\n height: 250px\n\n Finally, you can pass in multiple arguments to do things like:\n\n >>> st.write('1 + 1 = ', 2)\n >>> st.write('Below is a DataFrame:', data_frame, 'Above is a dataframe.')\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=DHkcU72sxYcGarkFbf4kK1\n height: 300px\n\n Oh, one more thing: `st.write` accepts chart objects too! For example:\n\n >>> import pandas as pd\n >>> import numpy as np\n >>> import altair as alt\n >>>\n >>> df = pd.DataFrame(\n ... np.random.randn(200, 3),\n ... columns=['a', 'b', 'c'])\n ...\n >>> c = alt.Chart(df).mark_circle().encode(\n ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c'])\n >>>\n >>> st.write(c)\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5\n height: 200px\n\n \"\"\"\n try:\n string_buffer = [] # type: List[str]\n unsafe_allow_html = kwargs.get(\"unsafe_allow_html\", False)\n\n def flush_buffer():\n if string_buffer:\n markdown(\n \" \".join(string_buffer), unsafe_allow_html=unsafe_allow_html,\n ) # noqa: F821\n string_buffer[:] = []\n\n for arg in args:\n # Order matters!\n if isinstance(arg, str):\n string_buffer.append(arg)\n elif _type_util.is_dataframe_like(arg):\n flush_buffer()\n if len(_np.shape(arg)) > 2:\n text(arg)\n else:\n dataframe(arg) # noqa: F821\n elif isinstance(arg, Exception):\n flush_buffer()\n exception(arg) # noqa: F821\n elif isinstance(arg, _HELP_TYPES):\n flush_buffer()\n help(arg)\n elif _type_util.is_altair_chart(arg):\n flush_buffer()\n altair_chart(arg)\n elif _type_util.is_type(arg, \"matplotlib.figure.Figure\"):\n flush_buffer()\n pyplot(arg)\n elif _type_util.is_plotly_chart(arg):\n flush_buffer()\n plotly_chart(arg)\n elif _type_util.is_type(arg, \"bokeh.plotting.figure.Figure\"):\n flush_buffer()\n bokeh_chart(arg)\n elif _type_util.is_graphviz_chart(arg):\n flush_buffer()\n graphviz_chart(arg)\n elif _type_util.is_sympy_expession(arg):\n flush_buffer()\n latex(arg)\n elif _type_util.is_keras_model(arg):\n from tensorflow.python.keras.utils import vis_utils\n\n flush_buffer()\n dot = vis_utils.model_to_dot(arg)\n graphviz_chart(dot.to_string())\n elif isinstance(arg, (dict, list)):\n flush_buffer()\n json(arg)\n elif _type_util.is_namedtuple(arg):\n flush_buffer()\n json(_json.dumps(arg._asdict()))\n elif _type_util.is_pydeck(arg):\n flush_buffer()\n pydeck_chart(arg)\n else:\n string_buffer.append(\"`%s`\" % str(arg).replace(\"`\", \"\\\\`\"))\n\n flush_buffer()\n\n except Exception:\n _, exc, exc_tb = _sys.exc_info()\n exception(exc, exc_tb) # noqa: F821\n\n\ndef experimental_show(*args):\n \"\"\"Write arguments and *argument names* to your app for debugging purposes.\n\n Show() has similar properties to write():\n\n 1. You can pass in multiple arguments, all of which will be debugged.\n 2. It returns None, so it's \"slot\" in the app cannot be reused.\n\n Note: This is an experimental feature. See\n https://docs.streamlit.io/en/latest/pre_release_features.html for more information.\n\n Parameters\n ----------\n *args : any\n One or many objects to debug in the App.\n\n Example\n -------\n\n >>> dataframe = pd.DataFrame({\n ... 'first column': [1, 2, 3, 4],\n ... 'second column': [10, 20, 30, 40],\n ... }))\n >>> st.experimental_show(dataframe)\n\n Notes\n -----\n\n This is an experimental feature with usage limitations:\n\n - The method must be called with the name `show`.\n - Must be called in one line of code, and only once per line.\n - When passing multiple arguments the inclusion of `,` or `)` in a string\n argument may cause an error.\n\n \"\"\"\n if not args:\n return\n\n try:\n import inspect\n\n # Get the calling line of code\n current_frame = inspect.currentframe()\n if current_frame is None:\n warning(\"`show` not enabled in the shell\")\n return\n lines = inspect.getframeinfo(current_frame.f_back)[3]\n\n if not lines:\n warning(\"`show` not enabled in the shell\")\n return\n\n # Parse arguments from the line\n line = lines[0].split(\"show\", 1)[1]\n inputs = _code_util.get_method_args_from_code(args, line)\n\n # Escape markdown and add deltas\n for idx, input in enumerate(inputs):\n escaped = _string_util.escape_markdown(input)\n\n markdown(\"**%s**\" % escaped)\n write(args[idx])\n\n except Exception:\n _, exc, exc_tb = _sys.exc_info()\n exception(exc, exc_tb) # noqa: F821\n\n\n@_contextlib.contextmanager\ndef spinner(text=\"In progress...\"):\n \"\"\"Temporarily displays a message while executing a block of code.\n\n Parameters\n ----------\n text : str\n A message to display while executing that block\n\n Example\n -------\n\n >>> with st.spinner('Wait for it...'):\n >>> time.sleep(5)\n >>> st.success('Done!')\n\n \"\"\"\n import streamlit.caching as caching\n\n # @st.cache optionally uses spinner for long-running computations.\n # Normally, streamlit warns the user when they call st functions\n # from within an @st.cache'd function. But we do *not* want to show\n # these warnings for spinner's message, so we create and mutate this\n # message delta within the \"suppress_cached_st_function_warning\"\n # context.\n with caching.suppress_cached_st_function_warning():\n message = empty()\n\n try:\n # Set the message 0.1 seconds in the future to avoid annoying\n # flickering if this spinner runs too quickly.\n DELAY_SECS = 0.1\n display_message = True\n display_message_lock = _threading.Lock()\n\n def set_message():\n with display_message_lock:\n if display_message:\n with caching.suppress_cached_st_function_warning():\n message.warning(str(text))\n\n _add_report_ctx(_threading.Timer(DELAY_SECS, set_message)).start()\n\n # Yield control back to the context.\n yield\n finally:\n if display_message_lock:\n with display_message_lock:\n display_message = False\n with caching.suppress_cached_st_function_warning():\n message.empty()\n\n\n_SPACES_RE = _re.compile(\"\\\\s*\")\n\n\n@_contextlib.contextmanager\ndef echo(code_location=\"above\"):\n \"\"\"Use in a `with` block to draw some code on the app, then execute it.\n\n Parameters\n ----------\n code_location : \"above\" or \"below\"\n Whether to show the echoed code before or after the results of the\n executed code block.\n\n Example\n -------\n\n >>> with st.echo():\n >>> st.write('This code will be printed')\n\n \"\"\"\n if code_location == \"below\":\n show_code = code\n show_warning = warning\n else:\n placeholder = empty() # noqa: F821\n show_code = placeholder.code\n show_warning = placeholder.warning\n\n try:\n frame = _traceback.extract_stack()[-3]\n filename, start_line = frame.filename, frame.lineno\n yield\n frame = _traceback.extract_stack()[-3]\n end_line = frame.lineno\n lines_to_display = [] # type: List[str]\n with _source_util.open_python_file(filename) as source_file:\n source_lines = source_file.readlines()\n lines_to_display.extend(source_lines[start_line:end_line])\n match = _SPACES_RE.match(lines_to_display[0])\n initial_spaces = match.end() if match else 0\n for line in source_lines[end_line:]:\n match = _SPACES_RE.match(line)\n indentation = match.end() if match else 0\n # The != 1 is because we want to allow '\\n' between sections.\n if indentation != 1 and indentation < initial_spaces:\n break\n lines_to_display.append(line)\n line_to_display = _textwrap.dedent(\"\".join(lines_to_display))\n\n show_code(line_to_display, \"python\")\n\n except FileNotFoundError as err:\n show_warning(\"Unable to display code. %s\" % err)\n\n\ndef _transparent_write(*args):\n \"\"\"This is just st.write, but returns the arguments you passed to it.\"\"\"\n write(*args)\n if len(args) == 1:\n return args[0]\n return args\n\n\n# We want to show a warning when the user runs a Streamlit script without\n# 'streamlit run', but we need to make sure the warning appears only once no\n# matter how many times __init__ gets loaded.\n_repl_warning_has_been_displayed = False\n\n\ndef _maybe_print_repl_warning():\n global _repl_warning_has_been_displayed\n\n if not _repl_warning_has_been_displayed:\n _repl_warning_has_been_displayed = True\n\n if _env_util.is_repl():\n _LOGGER.warning(\n _textwrap.dedent(\n \"\"\"\n\n Will not generate Streamlit app\n\n To generate an app, use Streamlit in a file and run it with:\n $ streamlit run [FILE_NAME] [ARGUMENTS]\n\n \"\"\"\n )\n )\n\n elif _config.get_option(\"global.showWarningOnDirectExecution\"):\n script_name = _sys.argv[0]\n\n _LOGGER.warning(\n _textwrap.dedent(\n \"\"\"\n\n Will not generate Streamlit App\n\n To generate an App, run this file with:\n $ streamlit run %s [ARGUMENTS]\n\n \"\"\"\n ),\n script_name,\n )\n", "path": "lib/streamlit/__init__.py" } ]
[ { "content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Streamlit.\n\nHow to use Streamlit in 3 seconds:\n\n 1. Write an app\n >>> import streamlit as st\n >>> st.write(anything_you_want)\n\n 2. Run your app\n $ streamlit run my_script.py\n\n 3. Use your app\n A new tab will open on your browser. That's your Streamlit app!\n\n 4. Modify your code, save it, and watch changes live on your browser.\n\nTake a look at the other commands in this module to find out what else\nStreamlit can do:\n\n >>> dir(streamlit)\n\nOr try running our \"Hello World\":\n\n $ streamlit hello\n\nFor more detailed info, see https://docs.streamlit.io.\n\"\"\"\n\n# IMPORTANT: Prefix with an underscore anything that the user shouldn't see.\n\n# NOTE: You'll see lots of \"noqa: F821\" in this file. That's because we\n# manually mess with the local namespace so the linter can't know that some\n# identifiers actually exist in the namespace.\n\n# Must be at the top, to avoid circular dependency.\nfrom streamlit import logger as _logger\nfrom streamlit import config as _config\n\n_LOGGER = _logger.get_logger(\"root\")\n\n# Give the package a version.\nimport pkg_resources as _pkg_resources\nimport uuid as _uuid\nimport subprocess\nimport platform\nimport os\nfrom typing import Any, List, Tuple, Type\n\n# This used to be pkg_resources.require('streamlit') but it would cause\n# pex files to fail. See #394 for more details.\n__version__ = _pkg_resources.get_distribution(\"streamlit\").version\n\n# Deterministic Unique Streamlit User ID\nif (\n platform.system() == \"Linux\"\n and os.path.isfile(\"/etc/machine-id\") == False\n and os.path.isfile(\"/var/lib/dbus/machine-id\") == False\n):\n print(\"Generate machine-id\")\n subprocess.run([\"sudo\", \"dbus-uuidgen\", \"--ensure\"])\n\nmachine_id = str(_uuid.getnode())\nif os.path.isfile(\"/etc/machine-id\"):\n with open(\"/etc/machine-id\", \"r\") as f:\n machine_id = f.read()\nelif os.path.isfile(\"/var/lib/dbus/machine-id\"):\n with open(\"/var/lib/dbus/machine-id\", \"r\") as f:\n machine_id = f.read()\n\n__installation_id__ = str(_uuid.uuid5(_uuid.NAMESPACE_DNS, machine_id))\n\n\nimport contextlib as _contextlib\nimport re as _re\nimport sys as _sys\nimport textwrap as _textwrap\nimport threading as _threading\nimport traceback as _traceback\nimport types as _types\nimport json as _json\nimport numpy as _np\n\nfrom streamlit import code_util as _code_util\nfrom streamlit import env_util as _env_util\nfrom streamlit import source_util as _source_util\nfrom streamlit import string_util as _string_util\nfrom streamlit import type_util as _type_util\nfrom streamlit.DeltaGenerator import DeltaGenerator as _DeltaGenerator\nfrom streamlit.ReportThread import add_report_ctx as _add_report_ctx\nfrom streamlit.ReportThread import get_report_ctx as _get_report_ctx\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.proto import BlockPath_pb2 as _BlockPath_pb2\nfrom streamlit.util import functools_wraps as _functools_wraps\n\n# Modules that the user should have access to. These are imported with \"as\"\n# syntax pass mypy checking with implicit_reexport disabled.\nfrom streamlit.caching import cache as cache # noqa: F401\n\n# This is set to True inside cli._main_run(), and is False otherwise.\n# If False, we should assume that DeltaGenerator functions are effectively\n# no-ops, and adapt gracefully.\n_is_running_with_streamlit = False\n\n\ndef _set_log_level():\n _logger.set_log_level(_config.get_option(\"global.logLevel\").upper())\n _logger.init_tornado_logs()\n\n\n# Make this file only depend on config option in an asynchronous manner. This\n# avoids a race condition when another file (such as a test file) tries to pass\n# in an alternative config.\n_config.on_config_parsed(_set_log_level, True)\n\n\n_main = _DeltaGenerator(container=_BlockPath_pb2.BlockPath.MAIN)\nsidebar = _DeltaGenerator(container=_BlockPath_pb2.BlockPath.SIDEBAR)\n\n# DeltaGenerator methods:\n\naltair_chart = _main.altair_chart # noqa: E221\narea_chart = _main.area_chart # noqa: E221\naudio = _main.audio # noqa: E221\nballoons = _main.balloons # noqa: E221\nbar_chart = _main.bar_chart # noqa: E221\nbokeh_chart = _main.bokeh_chart # noqa: E221\nbutton = _main.button # noqa: E221\ncheckbox = _main.checkbox # noqa: E221\ncode = _main.code # noqa: E221\ndataframe = _main.dataframe # noqa: E221\ndate_input = _main.date_input # noqa: E221\ndeck_gl_chart = _main.deck_gl_chart # noqa: E221\npydeck_chart = _main.pydeck_chart # noqa: E221\nempty = _main.empty # noqa: E221\nerror = _main.error # noqa: E221\nexception = _main.exception # noqa: E221\nbeta_set_favicon = _main.favicon # noqa: E221\nfile_uploader = _main.file_uploader # noqa: E221\ngraphviz_chart = _main.graphviz_chart # noqa: E221\nheader = _main.header # noqa: E221\nhelp = _main.help # noqa: E221\nimage = _main.image # noqa: E221\ninfo = _main.info # noqa: E221\njson = _main.json # noqa: E221\nlatex = _main.latex # noqa: E221\nline_chart = _main.line_chart # noqa: E221\nmap = _main.map # noqa: E221\nmarkdown = _main.markdown # noqa: E221\nmultiselect = _main.multiselect # noqa: E221\nnumber_input = _main.number_input # noqa: E221\nplotly_chart = _main.plotly_chart # noqa: E221\nprogress = _main.progress # noqa: E221\npyplot = _main.pyplot # noqa: E221\nradio = _main.radio # noqa: E221\nselectbox = _main.selectbox # noqa: E221\nslider = _main.slider # noqa: E221\nsubheader = _main.subheader # noqa: E221\nsuccess = _main.success # noqa: E221\ntable = _main.table # noqa: E221\ntext = _main.text # noqa: E221\ntext_area = _main.text_area # noqa: E221\ntext_input = _main.text_input # noqa: E221\ntime_input = _main.time_input # noqa: E221\ntitle = _main.title # noqa: E221\nvega_lite_chart = _main.vega_lite_chart # noqa: E221\nvideo = _main.video # noqa: E221\nwarning = _main.warning # noqa: E221\nbeta_color_picker = _main.beta_color_picker # noqa: E221\n\n# Config\n\nget_option = _config.get_option\n\n\ndef set_option(key, value):\n \"\"\"Set config option.\n\n Currently, only two config options can be set within the script itself:\n * client.caching\n * client.displayEnabled\n\n Calling with any other options will raise StreamlitAPIException.\n\n Run `streamlit config show` in the terminal to see all available options.\n\n Parameters\n ----------\n key : str\n The config option key of the form \"section.optionName\". To see all\n available options, run `streamlit config show` on a terminal.\n\n value\n The new value to assign to this config option.\n\n \"\"\"\n opt = _config._config_options[key]\n if opt.scriptable:\n _config.set_option(key, value)\n return\n\n raise StreamlitAPIException(\n \"{key} cannot be set on the fly. Set as command line option, e.g. streamlit run script.py --{key}, or in config.toml instead.\".format(\n key=key\n )\n )\n\n\n# Special methods:\n\n_HELP_TYPES = (\n _types.BuiltinFunctionType,\n _types.BuiltinMethodType,\n _types.FunctionType,\n _types.MethodType,\n _types.ModuleType,\n) # type: Tuple[Type[Any], ...]\n\n\ndef write(*args, **kwargs):\n \"\"\"Write arguments to the app.\n\n This is the Swiss Army knife of Streamlit commands: it does different\n things depending on what you throw at it. Unlike other Streamlit commands,\n write() has some unique properties:\n\n 1. You can pass in multiple arguments, all of which will be written.\n 2. Its behavior depends on the input types as follows.\n 3. It returns None, so it's \"slot\" in the App cannot be reused.\n\n Parameters\n ----------\n *args : any\n One or many objects to print to the App.\n\n Arguments are handled as follows:\n\n - write(string) : Prints the formatted Markdown string, with\n support for LaTeX expression and emoji shortcodes.\n See docs for st.markdown for more.\n - write(data_frame) : Displays the DataFrame as a table.\n - write(error) : Prints an exception specially.\n - write(func) : Displays information about a function.\n - write(module) : Displays information about the module.\n - write(dict) : Displays dict in an interactive widget.\n - write(obj) : The default is to print str(obj).\n - write(mpl_fig) : Displays a Matplotlib figure.\n - write(altair) : Displays an Altair chart.\n - write(keras) : Displays a Keras model.\n - write(graphviz) : Displays a Graphviz graph.\n - write(plotly_fig) : Displays a Plotly figure.\n - write(bokeh_fig) : Displays a Bokeh figure.\n - write(sympy_expr) : Prints SymPy expression using LaTeX.\n\n unsafe_allow_html : bool\n This is a keyword-only argument that defaults to False.\n\n By default, any HTML tags found in strings will be escaped and\n therefore treated as pure text. This behavior may be turned off by\n setting this argument to True.\n\n That said, *we strongly advise* against it*. It is hard to write secure\n HTML, so by using this argument you may be compromising your users'\n security. For more information, see:\n\n https://github.com/streamlit/streamlit/issues/152\n\n **Also note that `unsafe_allow_html` is a temporary measure and may be\n removed from Streamlit at any time.**\n\n If you decide to turn on HTML anyway, we ask you to please tell us your\n exact use case here:\n https://discuss.streamlit.io/t/96 .\n\n This will help us come up with safe APIs that allow you to do what you\n want.\n\n Example\n -------\n\n Its simplest use case is to draw Markdown-formatted text, whenever the\n input is a string:\n\n >>> write('Hello, *World!* :sunglasses:')\n\n .. output::\n https://share.streamlit.io/0.50.2-ZWk9/index.html?id=Pn5sjhgNs4a8ZbiUoSTRxE\n height: 50px\n\n As mentioned earlier, `st.write()` also accepts other data formats, such as\n numbers, data frames, styled data frames, and assorted objects:\n\n >>> st.write(1234)\n >>> st.write(pd.DataFrame({\n ... 'first column': [1, 2, 3, 4],\n ... 'second column': [10, 20, 30, 40],\n ... }))\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=FCp9AMJHwHRsWSiqMgUZGD\n height: 250px\n\n Finally, you can pass in multiple arguments to do things like:\n\n >>> st.write('1 + 1 = ', 2)\n >>> st.write('Below is a DataFrame:', data_frame, 'Above is a dataframe.')\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=DHkcU72sxYcGarkFbf4kK1\n height: 300px\n\n Oh, one more thing: `st.write` accepts chart objects too! For example:\n\n >>> import pandas as pd\n >>> import numpy as np\n >>> import altair as alt\n >>>\n >>> df = pd.DataFrame(\n ... np.random.randn(200, 3),\n ... columns=['a', 'b', 'c'])\n ...\n >>> c = alt.Chart(df).mark_circle().encode(\n ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c'])\n >>>\n >>> st.write(c)\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5\n height: 200px\n\n \"\"\"\n try:\n string_buffer = [] # type: List[str]\n unsafe_allow_html = kwargs.get(\"unsafe_allow_html\", False)\n\n def flush_buffer():\n if string_buffer:\n markdown(\n \" \".join(string_buffer), unsafe_allow_html=unsafe_allow_html,\n ) # noqa: F821\n string_buffer[:] = []\n\n for arg in args:\n # Order matters!\n if isinstance(arg, str):\n string_buffer.append(arg)\n elif _type_util.is_dataframe_like(arg):\n flush_buffer()\n if len(_np.shape(arg)) > 2:\n text(arg)\n else:\n dataframe(arg) # noqa: F821\n elif isinstance(arg, Exception):\n flush_buffer()\n exception(arg) # noqa: F821\n elif isinstance(arg, _HELP_TYPES):\n flush_buffer()\n help(arg)\n elif _type_util.is_altair_chart(arg):\n flush_buffer()\n altair_chart(arg)\n elif _type_util.is_type(arg, \"matplotlib.figure.Figure\"):\n flush_buffer()\n pyplot(arg)\n elif _type_util.is_plotly_chart(arg):\n flush_buffer()\n plotly_chart(arg)\n elif _type_util.is_type(arg, \"bokeh.plotting.figure.Figure\"):\n flush_buffer()\n bokeh_chart(arg)\n elif _type_util.is_graphviz_chart(arg):\n flush_buffer()\n graphviz_chart(arg)\n elif _type_util.is_sympy_expession(arg):\n flush_buffer()\n latex(arg)\n elif _type_util.is_keras_model(arg):\n from tensorflow.python.keras.utils import vis_utils\n\n flush_buffer()\n dot = vis_utils.model_to_dot(arg)\n graphviz_chart(dot.to_string())\n elif isinstance(arg, (dict, list)):\n flush_buffer()\n json(arg)\n elif _type_util.is_namedtuple(arg):\n flush_buffer()\n json(_json.dumps(arg._asdict()))\n elif _type_util.is_pydeck(arg):\n flush_buffer()\n pydeck_chart(arg)\n else:\n string_buffer.append(\"`%s`\" % str(arg).replace(\"`\", \"\\\\`\"))\n\n flush_buffer()\n\n except Exception as exc:\n exception(exc)\n\n\ndef experimental_show(*args):\n \"\"\"Write arguments and *argument names* to your app for debugging purposes.\n\n Show() has similar properties to write():\n\n 1. You can pass in multiple arguments, all of which will be debugged.\n 2. It returns None, so it's \"slot\" in the app cannot be reused.\n\n Note: This is an experimental feature. See\n https://docs.streamlit.io/en/latest/pre_release_features.html for more information.\n\n Parameters\n ----------\n *args : any\n One or many objects to debug in the App.\n\n Example\n -------\n\n >>> dataframe = pd.DataFrame({\n ... 'first column': [1, 2, 3, 4],\n ... 'second column': [10, 20, 30, 40],\n ... }))\n >>> st.experimental_show(dataframe)\n\n Notes\n -----\n\n This is an experimental feature with usage limitations:\n\n - The method must be called with the name `show`.\n - Must be called in one line of code, and only once per line.\n - When passing multiple arguments the inclusion of `,` or `)` in a string\n argument may cause an error.\n\n \"\"\"\n if not args:\n return\n\n try:\n import inspect\n\n # Get the calling line of code\n current_frame = inspect.currentframe()\n if current_frame is None:\n warning(\"`show` not enabled in the shell\")\n return\n lines = inspect.getframeinfo(current_frame.f_back)[3]\n\n if not lines:\n warning(\"`show` not enabled in the shell\")\n return\n\n # Parse arguments from the line\n line = lines[0].split(\"show\", 1)[1]\n inputs = _code_util.get_method_args_from_code(args, line)\n\n # Escape markdown and add deltas\n for idx, input in enumerate(inputs):\n escaped = _string_util.escape_markdown(input)\n\n markdown(\"**%s**\" % escaped)\n write(args[idx])\n\n except Exception:\n _, exc, exc_tb = _sys.exc_info()\n exception(exc, exc_tb) # noqa: F821\n\n\n@_contextlib.contextmanager\ndef spinner(text=\"In progress...\"):\n \"\"\"Temporarily displays a message while executing a block of code.\n\n Parameters\n ----------\n text : str\n A message to display while executing that block\n\n Example\n -------\n\n >>> with st.spinner('Wait for it...'):\n >>> time.sleep(5)\n >>> st.success('Done!')\n\n \"\"\"\n import streamlit.caching as caching\n\n # @st.cache optionally uses spinner for long-running computations.\n # Normally, streamlit warns the user when they call st functions\n # from within an @st.cache'd function. But we do *not* want to show\n # these warnings for spinner's message, so we create and mutate this\n # message delta within the \"suppress_cached_st_function_warning\"\n # context.\n with caching.suppress_cached_st_function_warning():\n message = empty()\n\n try:\n # Set the message 0.1 seconds in the future to avoid annoying\n # flickering if this spinner runs too quickly.\n DELAY_SECS = 0.1\n display_message = True\n display_message_lock = _threading.Lock()\n\n def set_message():\n with display_message_lock:\n if display_message:\n with caching.suppress_cached_st_function_warning():\n message.warning(str(text))\n\n _add_report_ctx(_threading.Timer(DELAY_SECS, set_message)).start()\n\n # Yield control back to the context.\n yield\n finally:\n if display_message_lock:\n with display_message_lock:\n display_message = False\n with caching.suppress_cached_st_function_warning():\n message.empty()\n\n\n_SPACES_RE = _re.compile(\"\\\\s*\")\n\n\n@_contextlib.contextmanager\ndef echo(code_location=\"above\"):\n \"\"\"Use in a `with` block to draw some code on the app, then execute it.\n\n Parameters\n ----------\n code_location : \"above\" or \"below\"\n Whether to show the echoed code before or after the results of the\n executed code block.\n\n Example\n -------\n\n >>> with st.echo():\n >>> st.write('This code will be printed')\n\n \"\"\"\n if code_location == \"below\":\n show_code = code\n show_warning = warning\n else:\n placeholder = empty() # noqa: F821\n show_code = placeholder.code\n show_warning = placeholder.warning\n\n try:\n frame = _traceback.extract_stack()[-3]\n filename, start_line = frame.filename, frame.lineno\n yield\n frame = _traceback.extract_stack()[-3]\n end_line = frame.lineno\n lines_to_display = [] # type: List[str]\n with _source_util.open_python_file(filename) as source_file:\n source_lines = source_file.readlines()\n lines_to_display.extend(source_lines[start_line:end_line])\n match = _SPACES_RE.match(lines_to_display[0])\n initial_spaces = match.end() if match else 0\n for line in source_lines[end_line:]:\n match = _SPACES_RE.match(line)\n indentation = match.end() if match else 0\n # The != 1 is because we want to allow '\\n' between sections.\n if indentation != 1 and indentation < initial_spaces:\n break\n lines_to_display.append(line)\n line_to_display = _textwrap.dedent(\"\".join(lines_to_display))\n\n show_code(line_to_display, \"python\")\n\n except FileNotFoundError as err:\n show_warning(\"Unable to display code. %s\" % err)\n\n\ndef _transparent_write(*args):\n \"\"\"This is just st.write, but returns the arguments you passed to it.\"\"\"\n write(*args)\n if len(args) == 1:\n return args[0]\n return args\n\n\n# We want to show a warning when the user runs a Streamlit script without\n# 'streamlit run', but we need to make sure the warning appears only once no\n# matter how many times __init__ gets loaded.\n_repl_warning_has_been_displayed = False\n\n\ndef _maybe_print_repl_warning():\n global _repl_warning_has_been_displayed\n\n if not _repl_warning_has_been_displayed:\n _repl_warning_has_been_displayed = True\n\n if _env_util.is_repl():\n _LOGGER.warning(\n _textwrap.dedent(\n \"\"\"\n\n Will not generate Streamlit app\n\n To generate an app, use Streamlit in a file and run it with:\n $ streamlit run [FILE_NAME] [ARGUMENTS]\n\n \"\"\"\n )\n )\n\n elif _config.get_option(\"global.showWarningOnDirectExecution\"):\n script_name = _sys.argv[0]\n\n _LOGGER.warning(\n _textwrap.dedent(\n \"\"\"\n\n Will not generate Streamlit App\n\n To generate an App, run this file with:\n $ streamlit run %s [ARGUMENTS]\n\n \"\"\"\n ),\n script_name,\n )\n", "path": "lib/streamlit/__init__.py" } ]
diff --git a/lib/streamlit/__init__.py b/lib/streamlit/__init__.py index 1a306d74c095..8f5dbb534aec 100644 --- a/lib/streamlit/__init__.py +++ b/lib/streamlit/__init__.py @@ -407,9 +407,8 @@ def flush_buffer(): flush_buffer() - except Exception: - _, exc, exc_tb = _sys.exc_info() - exception(exc, exc_tb) # noqa: F821 + except Exception as exc: + exception(exc) def experimental_show(*args): diff --git a/lib/tests/streamlit/write_test.py b/lib/tests/streamlit/write_test.py index 85af335d8bed..69d1ab849234 100644 --- a/lib/tests/streamlit/write_test.py +++ b/lib/tests/streamlit/write_test.py @@ -184,7 +184,12 @@ def __str__(self): def test_exception(self): """Test st.write that raises an exception.""" - with patch("streamlit.markdown") as m, patch("streamlit.exception") as e: + # We patch streamlit.exception to observe it, but we also make sure + # it's still called (via side_effect). This ensures that it's called + # with the proper arguments. + with patch("streamlit.markdown") as m, patch( + "streamlit.exception", side_effect=st.exception + ) as e: m.side_effect = Exception("some exception") st.write("some text")
Lightning-AI__torchmetrics-1384
module 'torchmetrics.classification' has no attribute 'AUC' ## 🐛 Bug Importing all the classification metrics causes the `AttributeError`: `from torchmetrics.classification import *` `AttributeError: module 'torchmetrics.classification' has no attribute 'AUC'` Environment torchmetrics 0.11.0 pytorch 1.13.0 In order to fix it someone should remove AUC from the list __all__ (src/torchmetrics/classification/__init__.py)
[ { "content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom torchmetrics.classification.confusion_matrix import ( # isort:skip\n BinaryConfusionMatrix,\n ConfusionMatrix,\n MulticlassConfusionMatrix,\n MultilabelConfusionMatrix,\n)\nfrom torchmetrics.classification.precision_recall_curve import ( # isort:skip\n PrecisionRecallCurve,\n BinaryPrecisionRecallCurve,\n MulticlassPrecisionRecallCurve,\n MultilabelPrecisionRecallCurve,\n)\nfrom torchmetrics.classification.stat_scores import ( # isort:skip\n BinaryStatScores,\n MulticlassStatScores,\n MultilabelStatScores,\n StatScores,\n)\nfrom torchmetrics.classification.accuracy import Accuracy, BinaryAccuracy, MulticlassAccuracy, MultilabelAccuracy\nfrom torchmetrics.classification.auroc import AUROC, BinaryAUROC, MulticlassAUROC, MultilabelAUROC\nfrom torchmetrics.classification.average_precision import (\n AveragePrecision,\n BinaryAveragePrecision,\n MulticlassAveragePrecision,\n MultilabelAveragePrecision,\n)\nfrom torchmetrics.classification.calibration_error import (\n BinaryCalibrationError,\n CalibrationError,\n MulticlassCalibrationError,\n)\nfrom torchmetrics.classification.cohen_kappa import BinaryCohenKappa, CohenKappa, MulticlassCohenKappa\nfrom torchmetrics.classification.dice import Dice\nfrom torchmetrics.classification.exact_match import ExactMatch, MulticlassExactMatch, MultilabelExactMatch\nfrom torchmetrics.classification.f_beta import (\n BinaryF1Score,\n BinaryFBetaScore,\n F1Score,\n FBetaScore,\n MulticlassF1Score,\n MulticlassFBetaScore,\n MultilabelF1Score,\n MultilabelFBetaScore,\n)\nfrom torchmetrics.classification.hamming import (\n BinaryHammingDistance,\n HammingDistance,\n MulticlassHammingDistance,\n MultilabelHammingDistance,\n)\nfrom torchmetrics.classification.hinge import BinaryHingeLoss, HingeLoss, MulticlassHingeLoss\nfrom torchmetrics.classification.jaccard import (\n BinaryJaccardIndex,\n JaccardIndex,\n MulticlassJaccardIndex,\n MultilabelJaccardIndex,\n)\nfrom torchmetrics.classification.matthews_corrcoef import (\n BinaryMatthewsCorrCoef,\n MatthewsCorrCoef,\n MulticlassMatthewsCorrCoef,\n MultilabelMatthewsCorrCoef,\n)\nfrom torchmetrics.classification.precision_recall import (\n BinaryPrecision,\n BinaryRecall,\n MulticlassPrecision,\n MulticlassRecall,\n MultilabelPrecision,\n MultilabelRecall,\n Precision,\n Recall,\n)\nfrom torchmetrics.classification.ranking import (\n MultilabelCoverageError,\n MultilabelRankingAveragePrecision,\n MultilabelRankingLoss,\n)\nfrom torchmetrics.classification.recall_at_fixed_precision import (\n BinaryRecallAtFixedPrecision,\n MulticlassRecallAtFixedPrecision,\n MultilabelRecallAtFixedPrecision,\n)\nfrom torchmetrics.classification.roc import ROC, BinaryROC, MulticlassROC, MultilabelROC\nfrom torchmetrics.classification.specificity import (\n BinarySpecificity,\n MulticlassSpecificity,\n MultilabelSpecificity,\n Specificity,\n)\n\n__all__ = [\n \"BinaryConfusionMatrix\",\n \"ConfusionMatrix\",\n \"MulticlassConfusionMatrix\",\n \"MultilabelConfusionMatrix\",\n \"PrecisionRecallCurve\",\n \"BinaryPrecisionRecallCurve\",\n \"MulticlassPrecisionRecallCurve\",\n \"MultilabelPrecisionRecallCurve\",\n \"BinaryStatScores\",\n \"MulticlassStatScores\",\n \"MultilabelStatScores\",\n \"StatScores\",\n \"Accuracy\",\n \"BinaryAccuracy\",\n \"MulticlassAccuracy\",\n \"MultilabelAccuracy\",\n \"AUC\",\n \"AUROC\",\n \"BinaryAUROC\",\n \"MulticlassAUROC\",\n \"MultilabelAUROC\",\n \"AveragePrecision\",\n \"BinaryAveragePrecision\",\n \"MulticlassAveragePrecision\",\n \"MultilabelAveragePrecision\",\n \"BinnedAveragePrecision\",\n \"BinnedPrecisionRecallCurve\",\n \"BinnedRecallAtFixedPrecision\",\n \"BinaryCalibrationError\",\n \"CalibrationError\",\n \"MulticlassCalibrationError\",\n \"BinaryCohenKappa\",\n \"CohenKappa\",\n \"MulticlassCohenKappa\",\n \"Dice\",\n \"ExactMatch\",\n \"MulticlassExactMatch\",\n \"MultilabelExactMatch\",\n \"BinaryF1Score\",\n \"BinaryFBetaScore\",\n \"F1Score\",\n \"FBetaScore\",\n \"MulticlassF1Score\",\n \"MulticlassFBetaScore\",\n \"MultilabelF1Score\",\n \"MultilabelFBetaScore\",\n \"BinaryHammingDistance\",\n \"HammingDistance\",\n \"MulticlassHammingDistance\",\n \"MultilabelHammingDistance\",\n \"BinaryHingeLoss\",\n \"HingeLoss\",\n \"MulticlassHingeLoss\",\n \"BinaryJaccardIndex\",\n \"JaccardIndex\",\n \"MulticlassJaccardIndex\",\n \"MultilabelJaccardIndex\",\n \"BinaryMatthewsCorrCoef\",\n \"MatthewsCorrCoef\",\n \"MulticlassMatthewsCorrCoef\",\n \"MultilabelMatthewsCorrCoef\",\n \"BinaryPrecision\",\n \"BinaryRecall\",\n \"MulticlassPrecision\",\n \"MulticlassRecall\",\n \"MultilabelPrecision\",\n \"MultilabelRecall\",\n \"Precision\",\n \"Recall\",\n \"CoverageError\",\n \"LabelRankingAveragePrecision\",\n \"LabelRankingLoss\",\n \"MultilabelCoverageError\",\n \"MultilabelRankingAveragePrecision\",\n \"MultilabelRankingLoss\",\n \"BinaryRecallAtFixedPrecision\",\n \"MulticlassRecallAtFixedPrecision\",\n \"MultilabelRecallAtFixedPrecision\",\n \"ROC\",\n \"BinaryROC\",\n \"MulticlassROC\",\n \"MultilabelROC\",\n \"BinarySpecificity\",\n \"MulticlassSpecificity\",\n \"MultilabelSpecificity\",\n \"Specificity\",\n]\n", "path": "src/torchmetrics/classification/__init__.py" } ]
[ { "content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom torchmetrics.classification.confusion_matrix import ( # isort:skip\n BinaryConfusionMatrix,\n ConfusionMatrix,\n MulticlassConfusionMatrix,\n MultilabelConfusionMatrix,\n)\nfrom torchmetrics.classification.precision_recall_curve import ( # isort:skip\n PrecisionRecallCurve,\n BinaryPrecisionRecallCurve,\n MulticlassPrecisionRecallCurve,\n MultilabelPrecisionRecallCurve,\n)\nfrom torchmetrics.classification.stat_scores import ( # isort:skip\n BinaryStatScores,\n MulticlassStatScores,\n MultilabelStatScores,\n StatScores,\n)\nfrom torchmetrics.classification.accuracy import Accuracy, BinaryAccuracy, MulticlassAccuracy, MultilabelAccuracy\nfrom torchmetrics.classification.auroc import AUROC, BinaryAUROC, MulticlassAUROC, MultilabelAUROC\nfrom torchmetrics.classification.average_precision import (\n AveragePrecision,\n BinaryAveragePrecision,\n MulticlassAveragePrecision,\n MultilabelAveragePrecision,\n)\nfrom torchmetrics.classification.calibration_error import (\n BinaryCalibrationError,\n CalibrationError,\n MulticlassCalibrationError,\n)\nfrom torchmetrics.classification.cohen_kappa import BinaryCohenKappa, CohenKappa, MulticlassCohenKappa\nfrom torchmetrics.classification.dice import Dice\nfrom torchmetrics.classification.exact_match import ExactMatch, MulticlassExactMatch, MultilabelExactMatch\nfrom torchmetrics.classification.f_beta import (\n BinaryF1Score,\n BinaryFBetaScore,\n F1Score,\n FBetaScore,\n MulticlassF1Score,\n MulticlassFBetaScore,\n MultilabelF1Score,\n MultilabelFBetaScore,\n)\nfrom torchmetrics.classification.hamming import (\n BinaryHammingDistance,\n HammingDistance,\n MulticlassHammingDistance,\n MultilabelHammingDistance,\n)\nfrom torchmetrics.classification.hinge import BinaryHingeLoss, HingeLoss, MulticlassHingeLoss\nfrom torchmetrics.classification.jaccard import (\n BinaryJaccardIndex,\n JaccardIndex,\n MulticlassJaccardIndex,\n MultilabelJaccardIndex,\n)\nfrom torchmetrics.classification.matthews_corrcoef import (\n BinaryMatthewsCorrCoef,\n MatthewsCorrCoef,\n MulticlassMatthewsCorrCoef,\n MultilabelMatthewsCorrCoef,\n)\nfrom torchmetrics.classification.precision_recall import (\n BinaryPrecision,\n BinaryRecall,\n MulticlassPrecision,\n MulticlassRecall,\n MultilabelPrecision,\n MultilabelRecall,\n Precision,\n Recall,\n)\nfrom torchmetrics.classification.ranking import (\n MultilabelCoverageError,\n MultilabelRankingAveragePrecision,\n MultilabelRankingLoss,\n)\nfrom torchmetrics.classification.recall_at_fixed_precision import (\n BinaryRecallAtFixedPrecision,\n MulticlassRecallAtFixedPrecision,\n MultilabelRecallAtFixedPrecision,\n)\nfrom torchmetrics.classification.roc import ROC, BinaryROC, MulticlassROC, MultilabelROC\nfrom torchmetrics.classification.specificity import (\n BinarySpecificity,\n MulticlassSpecificity,\n MultilabelSpecificity,\n Specificity,\n)\n\n__all__ = [\n \"BinaryConfusionMatrix\",\n \"ConfusionMatrix\",\n \"MulticlassConfusionMatrix\",\n \"MultilabelConfusionMatrix\",\n \"PrecisionRecallCurve\",\n \"BinaryPrecisionRecallCurve\",\n \"MulticlassPrecisionRecallCurve\",\n \"MultilabelPrecisionRecallCurve\",\n \"BinaryStatScores\",\n \"MulticlassStatScores\",\n \"MultilabelStatScores\",\n \"StatScores\",\n \"Accuracy\",\n \"BinaryAccuracy\",\n \"MulticlassAccuracy\",\n \"MultilabelAccuracy\",\n \"AUROC\",\n \"BinaryAUROC\",\n \"MulticlassAUROC\",\n \"MultilabelAUROC\",\n \"AveragePrecision\",\n \"BinaryAveragePrecision\",\n \"MulticlassAveragePrecision\",\n \"MultilabelAveragePrecision\",\n \"BinnedAveragePrecision\",\n \"BinnedPrecisionRecallCurve\",\n \"BinnedRecallAtFixedPrecision\",\n \"BinaryCalibrationError\",\n \"CalibrationError\",\n \"MulticlassCalibrationError\",\n \"BinaryCohenKappa\",\n \"CohenKappa\",\n \"MulticlassCohenKappa\",\n \"Dice\",\n \"ExactMatch\",\n \"MulticlassExactMatch\",\n \"MultilabelExactMatch\",\n \"BinaryF1Score\",\n \"BinaryFBetaScore\",\n \"F1Score\",\n \"FBetaScore\",\n \"MulticlassF1Score\",\n \"MulticlassFBetaScore\",\n \"MultilabelF1Score\",\n \"MultilabelFBetaScore\",\n \"BinaryHammingDistance\",\n \"HammingDistance\",\n \"MulticlassHammingDistance\",\n \"MultilabelHammingDistance\",\n \"BinaryHingeLoss\",\n \"HingeLoss\",\n \"MulticlassHingeLoss\",\n \"BinaryJaccardIndex\",\n \"JaccardIndex\",\n \"MulticlassJaccardIndex\",\n \"MultilabelJaccardIndex\",\n \"BinaryMatthewsCorrCoef\",\n \"MatthewsCorrCoef\",\n \"MulticlassMatthewsCorrCoef\",\n \"MultilabelMatthewsCorrCoef\",\n \"BinaryPrecision\",\n \"BinaryRecall\",\n \"MulticlassPrecision\",\n \"MulticlassRecall\",\n \"MultilabelPrecision\",\n \"MultilabelRecall\",\n \"Precision\",\n \"Recall\",\n \"CoverageError\",\n \"LabelRankingAveragePrecision\",\n \"LabelRankingLoss\",\n \"MultilabelCoverageError\",\n \"MultilabelRankingAveragePrecision\",\n \"MultilabelRankingLoss\",\n \"BinaryRecallAtFixedPrecision\",\n \"MulticlassRecallAtFixedPrecision\",\n \"MultilabelRecallAtFixedPrecision\",\n \"ROC\",\n \"BinaryROC\",\n \"MulticlassROC\",\n \"MultilabelROC\",\n \"BinarySpecificity\",\n \"MulticlassSpecificity\",\n \"MultilabelSpecificity\",\n \"Specificity\",\n]\n", "path": "src/torchmetrics/classification/__init__.py" } ]
diff --git a/src/torchmetrics/classification/__init__.py b/src/torchmetrics/classification/__init__.py index ad9c1e39e36..60069403e8a 100644 --- a/src/torchmetrics/classification/__init__.py +++ b/src/torchmetrics/classification/__init__.py @@ -119,7 +119,6 @@ "BinaryAccuracy", "MulticlassAccuracy", "MultilabelAccuracy", - "AUC", "AUROC", "BinaryAUROC", "MulticlassAUROC",
vllm-project__vllm-1212
[v0.2.0] Release Tracker ## Major changes * Up to 60% performance improvement by optimizing de-tokenization and sampler * Initial support for AWQ (performance not optimized) * Support for RoPE scaling and LongChat * Support for Mistral-7B ## PRs to be merged before the release - [x] Vectorized sampler: #1048, #820 - [x] LongChat: #555 - [x] `TORCH_CUDA_ARCH_LIST` build option: #1074 - [x] Support for Mistral-7B: #1196 - [x] #1198 - ~~[ ] FP32 RoPE kernel: #1061~~ (deferred to the next PR)
[ { "content": "\"\"\"vLLM: a high-throughput and memory-efficient inference engine for LLMs\"\"\"\n\nfrom vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs\nfrom vllm.engine.async_llm_engine import AsyncLLMEngine\nfrom vllm.engine.llm_engine import LLMEngine\nfrom vllm.engine.ray_utils import initialize_cluster\nfrom vllm.entrypoints.llm import LLM\nfrom vllm.outputs import CompletionOutput, RequestOutput\nfrom vllm.sampling_params import SamplingParams\n\n__version__ = \"0.1.7\"\n\n__all__ = [\n \"LLM\",\n \"SamplingParams\",\n \"RequestOutput\",\n \"CompletionOutput\",\n \"LLMEngine\",\n \"EngineArgs\",\n \"AsyncLLMEngine\",\n \"AsyncEngineArgs\",\n \"initialize_cluster\",\n]\n", "path": "vllm/__init__.py" } ]
[ { "content": "\"\"\"vLLM: a high-throughput and memory-efficient inference engine for LLMs\"\"\"\n\nfrom vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs\nfrom vllm.engine.async_llm_engine import AsyncLLMEngine\nfrom vllm.engine.llm_engine import LLMEngine\nfrom vllm.engine.ray_utils import initialize_cluster\nfrom vllm.entrypoints.llm import LLM\nfrom vllm.outputs import CompletionOutput, RequestOutput\nfrom vllm.sampling_params import SamplingParams\n\n__version__ = \"0.2.0\"\n\n__all__ = [\n \"LLM\",\n \"SamplingParams\",\n \"RequestOutput\",\n \"CompletionOutput\",\n \"LLMEngine\",\n \"EngineArgs\",\n \"AsyncLLMEngine\",\n \"AsyncEngineArgs\",\n \"initialize_cluster\",\n]\n", "path": "vllm/__init__.py" } ]
diff --git a/vllm/__init__.py b/vllm/__init__.py index b7b019f57b2..6a8b7c8fb9b 100644 --- a/vllm/__init__.py +++ b/vllm/__init__.py @@ -8,7 +8,7 @@ from vllm.outputs import CompletionOutput, RequestOutput from vllm.sampling_params import SamplingParams -__version__ = "0.1.7" +__version__ = "0.2.0" __all__ = [ "LLM",
python-pillow__Pillow-3912
Crash on trying to load corrupted font as file handle ### What did you do? When loading corrupted file by handle, PIL crashes: ```console # python -c "from PIL import ImageFont; print(ImageFont.truetype(open('setup.py', 'rb')))" double free or corruption (top) Aborted ``` (Originally observed on actually corrupt TTF file, but it as well triggered by non font) ### What did you expect to happen? Raise an exception ### What actually happened? Crash (SIGABRT) ### What are your OS, Python and Pillow versions? * OS: Linux * Python: 3.7.3 * Pillow: 6.0.0, reproduced with current git master
[ { "content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# PIL raster font management\n#\n# History:\n# 1996-08-07 fl created (experimental)\n# 1997-08-25 fl minor adjustments to handle fonts from pilfont 0.3\n# 1999-02-06 fl rewrote most font management stuff in C\n# 1999-03-17 fl take pth files into account in load_path (from Richard Jones)\n# 2001-02-17 fl added freetype support\n# 2001-05-09 fl added TransposedFont wrapper class\n# 2002-03-04 fl make sure we have a \"L\" or \"1\" font\n# 2002-12-04 fl skip non-directory entries in the system path\n# 2003-04-29 fl add embedded default font\n# 2003-09-27 fl added support for truetype charmap encodings\n#\n# Todo:\n# Adapt to PILFONT2 format (16-bit fonts, compressed, single file)\n#\n# Copyright (c) 1997-2003 by Secret Labs AB\n# Copyright (c) 1996-2003 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nfrom . import Image\nfrom ._util import isDirectory, isPath, py3\nimport os\nimport sys\n\nLAYOUT_BASIC = 0\nLAYOUT_RAQM = 1\n\n\nclass _imagingft_not_installed(object):\n # module placeholder\n def __getattr__(self, id):\n raise ImportError(\"The _imagingft C module is not installed\")\n\n\ntry:\n from . import _imagingft as core\nexcept ImportError:\n core = _imagingft_not_installed()\n\n\n# FIXME: add support for pilfont2 format (see FontFile.py)\n\n# --------------------------------------------------------------------\n# Font metrics format:\n# \"PILfont\" LF\n# fontdescriptor LF\n# (optional) key=value... LF\n# \"DATA\" LF\n# binary data: 256*10*2 bytes (dx, dy, dstbox, srcbox)\n#\n# To place a character, cut out srcbox and paste at dstbox,\n# relative to the character position. Then move the character\n# position according to dx, dy.\n# --------------------------------------------------------------------\n\n\nclass ImageFont(object):\n \"PIL font wrapper\"\n\n def _load_pilfont(self, filename):\n\n with open(filename, \"rb\") as fp:\n for ext in (\".png\", \".gif\", \".pbm\"):\n try:\n fullname = os.path.splitext(filename)[0] + ext\n image = Image.open(fullname)\n except Exception:\n pass\n else:\n if image and image.mode in (\"1\", \"L\"):\n break\n else:\n raise IOError(\"cannot find glyph data file\")\n\n self.file = fullname\n\n return self._load_pilfont_data(fp, image)\n\n def _load_pilfont_data(self, file, image):\n\n # read PILfont header\n if file.readline() != b\"PILfont\\n\":\n raise SyntaxError(\"Not a PILfont file\")\n file.readline().split(b\";\")\n self.info = [] # FIXME: should be a dictionary\n while True:\n s = file.readline()\n if not s or s == b\"DATA\\n\":\n break\n self.info.append(s)\n\n # read PILfont metrics\n data = file.read(256 * 20)\n\n # check image\n if image.mode not in (\"1\", \"L\"):\n raise TypeError(\"invalid font image mode\")\n\n image.load()\n\n self.font = Image.core.font(image.im, data)\n\n def getsize(self, text, *args, **kwargs):\n return self.font.getsize(text)\n\n def getmask(self, text, mode=\"\", *args, **kwargs):\n return self.font.getmask(text, mode)\n\n\n##\n# Wrapper for FreeType fonts. Application code should use the\n# <b>truetype</b> factory function to create font objects.\n\n\nclass FreeTypeFont(object):\n \"FreeType font wrapper (requires _imagingft service)\"\n\n def __init__(self, font=None, size=10, index=0, encoding=\"\", layout_engine=None):\n # FIXME: use service provider instead\n\n self.path = font\n self.size = size\n self.index = index\n self.encoding = encoding\n\n if layout_engine not in (LAYOUT_BASIC, LAYOUT_RAQM):\n layout_engine = LAYOUT_BASIC\n if core.HAVE_RAQM:\n layout_engine = LAYOUT_RAQM\n elif layout_engine == LAYOUT_RAQM and not core.HAVE_RAQM:\n layout_engine = LAYOUT_BASIC\n\n self.layout_engine = layout_engine\n\n def load_from_bytes(f):\n self.font_bytes = f.read()\n self.font = core.getfont(\n \"\", size, index, encoding, self.font_bytes, layout_engine\n )\n\n if isPath(font):\n if sys.platform == \"win32\":\n font_bytes_path = font if isinstance(font, bytes) else font.encode()\n try:\n font_bytes_path.decode(\"ascii\")\n except UnicodeDecodeError:\n # FreeType cannot load fonts with non-ASCII characters on Windows\n # So load it into memory first\n with open(font, \"rb\") as f:\n load_from_bytes(f)\n return\n self.font = core.getfont(\n font, size, index, encoding, layout_engine=layout_engine\n )\n else:\n load_from_bytes(font)\n\n def _multiline_split(self, text):\n split_character = \"\\n\" if isinstance(text, str) else b\"\\n\"\n return text.split(split_character)\n\n def getname(self):\n \"\"\"\n :return: A tuple of the font family (e.g. Helvetica) and the font style\n (e.g. Bold)\n \"\"\"\n return self.font.family, self.font.style\n\n def getmetrics(self):\n \"\"\"\n :return: A tuple of the font ascent (the distance from the baseline to\n the highest outline point) and descent (the distance from the\n baseline to the lowest outline point, a negative value)\n \"\"\"\n return self.font.ascent, self.font.descent\n\n def getsize(self, text, direction=None, features=None, language=None):\n \"\"\"\n Returns width and height (in pixels) of given text if rendered in font with\n provided direction, features, and language.\n\n :param text: Text to measure.\n\n :param direction: Direction of the text. It can be 'rtl' (right to\n left), 'ltr' (left to right) or 'ttb' (top to bottom).\n Requires libraqm.\n\n .. versionadded:: 4.2.0\n\n :param features: A list of OpenType font features to be used during text\n layout. This is usually used to turn on optional\n font features that are not enabled by default,\n for example 'dlig' or 'ss01', but can be also\n used to turn off default font features for\n example '-liga' to disable ligatures or '-kern'\n to disable kerning. To get all supported\n features, see\n https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist\n Requires libraqm.\n\n .. versionadded:: 4.2.0\n\n :param language: Language of the text. Different languages may use\n different glyph shapes or ligatures. This parameter tells\n the font which language the text is in, and to apply the\n correct substitutions as appropriate, if available.\n It should be a `BCP 47 language code\n <https://www.w3.org/International/articles/language-tags/>`\n Requires libraqm.\n\n .. versionadded:: 6.0.0\n\n :return: (width, height)\n \"\"\"\n size, offset = self.font.getsize(text, direction, features, language)\n return (size[0] + offset[0], size[1] + offset[1])\n\n def getsize_multiline(\n self, text, direction=None, spacing=4, features=None, language=None\n ):\n \"\"\"\n Returns width and height (in pixels) of given text if rendered in font\n with provided direction, features, and language, while respecting\n newline characters.\n\n :param text: Text to measure.\n\n :param direction: Direction of the text. It can be 'rtl' (right to\n left), 'ltr' (left to right) or 'ttb' (top to bottom).\n Requires libraqm.\n\n :param spacing: The vertical gap between lines, defaulting to 4 pixels.\n\n :param features: A list of OpenType font features to be used during text\n layout. This is usually used to turn on optional\n font features that are not enabled by default,\n for example 'dlig' or 'ss01', but can be also\n used to turn off default font features for\n example '-liga' to disable ligatures or '-kern'\n to disable kerning. To get all supported\n features, see\n https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist\n Requires libraqm.\n\n :param language: Language of the text. Different languages may use\n different glyph shapes or ligatures. This parameter tells\n the font which language the text is in, and to apply the\n correct substitutions as appropriate, if available.\n It should be a `BCP 47 language code\n <https://www.w3.org/International/articles/language-tags/>`\n Requires libraqm.\n\n .. versionadded:: 6.0.0\n\n :return: (width, height)\n \"\"\"\n max_width = 0\n lines = self._multiline_split(text)\n line_spacing = self.getsize(\"A\")[1] + spacing\n for line in lines:\n line_width, line_height = self.getsize(line, direction, features, language)\n max_width = max(max_width, line_width)\n\n return max_width, len(lines) * line_spacing - spacing\n\n def getoffset(self, text):\n \"\"\"\n Returns the offset of given text. This is the gap between the\n starting coordinate and the first marking. Note that this gap is\n included in the result of :py:func:`~PIL.ImageFont.FreeTypeFont.getsize`.\n\n :param text: Text to measure.\n\n :return: A tuple of the x and y offset\n \"\"\"\n return self.font.getsize(text)[1]\n\n def getmask(self, text, mode=\"\", direction=None, features=None, language=None):\n \"\"\"\n Create a bitmap for the text.\n\n If the font uses antialiasing, the bitmap should have mode ``L`` and use a\n maximum value of 255. Otherwise, it should have mode ``1``.\n\n :param text: Text to render.\n :param mode: Used by some graphics drivers to indicate what mode the\n driver prefers; if empty, the renderer may return either\n mode. Note that the mode is always a string, to simplify\n C-level implementations.\n\n .. versionadded:: 1.1.5\n\n :param direction: Direction of the text. It can be 'rtl' (right to\n left), 'ltr' (left to right) or 'ttb' (top to bottom).\n Requires libraqm.\n\n .. versionadded:: 4.2.0\n\n :param features: A list of OpenType font features to be used during text\n layout. This is usually used to turn on optional\n font features that are not enabled by default,\n for example 'dlig' or 'ss01', but can be also\n used to turn off default font features for\n example '-liga' to disable ligatures or '-kern'\n to disable kerning. To get all supported\n features, see\n https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist\n Requires libraqm.\n\n .. versionadded:: 4.2.0\n\n :param language: Language of the text. Different languages may use\n different glyph shapes or ligatures. This parameter tells\n the font which language the text is in, and to apply the\n correct substitutions as appropriate, if available.\n It should be a `BCP 47 language code\n <https://www.w3.org/International/articles/language-tags/>`\n Requires libraqm.\n\n .. versionadded:: 6.0.0\n\n :return: An internal PIL storage memory instance as defined by the\n :py:mod:`PIL.Image.core` interface module.\n \"\"\"\n return self.getmask2(\n text, mode, direction=direction, features=features, language=language\n )[0]\n\n def getmask2(\n self,\n text,\n mode=\"\",\n fill=Image.core.fill,\n direction=None,\n features=None,\n language=None,\n *args,\n **kwargs\n ):\n \"\"\"\n Create a bitmap for the text.\n\n If the font uses antialiasing, the bitmap should have mode ``L`` and use a\n maximum value of 255. Otherwise, it should have mode ``1``.\n\n :param text: Text to render.\n :param mode: Used by some graphics drivers to indicate what mode the\n driver prefers; if empty, the renderer may return either\n mode. Note that the mode is always a string, to simplify\n C-level implementations.\n\n .. versionadded:: 1.1.5\n\n :param direction: Direction of the text. It can be 'rtl' (right to\n left), 'ltr' (left to right) or 'ttb' (top to bottom).\n Requires libraqm.\n\n .. versionadded:: 4.2.0\n\n :param features: A list of OpenType font features to be used during text\n layout. This is usually used to turn on optional\n font features that are not enabled by default,\n for example 'dlig' or 'ss01', but can be also\n used to turn off default font features for\n example '-liga' to disable ligatures or '-kern'\n to disable kerning. To get all supported\n features, see\n https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist\n Requires libraqm.\n\n .. versionadded:: 4.2.0\n\n :param language: Language of the text. Different languages may use\n different glyph shapes or ligatures. This parameter tells\n the font which language the text is in, and to apply the\n correct substitutions as appropriate, if available.\n It should be a `BCP 47 language code\n <https://www.w3.org/International/articles/language-tags/>`\n Requires libraqm.\n\n .. versionadded:: 6.0.0\n\n :return: A tuple of an internal PIL storage memory instance as defined by the\n :py:mod:`PIL.Image.core` interface module, and the text offset, the\n gap between the starting coordinate and the first marking\n \"\"\"\n size, offset = self.font.getsize(text, direction, features, language)\n im = fill(\"L\", size, 0)\n self.font.render(text, im.id, mode == \"1\", direction, features, language)\n return im, offset\n\n def font_variant(\n self, font=None, size=None, index=None, encoding=None, layout_engine=None\n ):\n \"\"\"\n Create a copy of this FreeTypeFont object,\n using any specified arguments to override the settings.\n\n Parameters are identical to the parameters used to initialize this\n object.\n\n :return: A FreeTypeFont object.\n \"\"\"\n return FreeTypeFont(\n font=self.path if font is None else font,\n size=self.size if size is None else size,\n index=self.index if index is None else index,\n encoding=self.encoding if encoding is None else encoding,\n layout_engine=layout_engine or self.layout_engine,\n )\n\n def get_variation_names(self):\n \"\"\"\n :returns: A list of the named styles in a variation font.\n :exception IOError: If the font is not a variation font.\n \"\"\"\n try:\n names = self.font.getvarnames()\n except AttributeError:\n raise NotImplementedError(\"FreeType 2.9.1 or greater is required\")\n return [name.replace(b\"\\x00\", b\"\") for name in names]\n\n def set_variation_by_name(self, name):\n \"\"\"\n :param name: The name of the style.\n :exception IOError: If the font is not a variation font.\n \"\"\"\n names = self.get_variation_names()\n if not isinstance(name, bytes):\n name = name.encode()\n index = names.index(name)\n\n if index == getattr(self, \"_last_variation_index\", None):\n # When the same name is set twice in a row,\n # there is an 'unknown freetype error'\n # https://savannah.nongnu.org/bugs/?56186\n return\n self._last_variation_index = index\n\n self.font.setvarname(index)\n\n def get_variation_axes(self):\n \"\"\"\n :returns: A list of the axes in a variation font.\n :exception IOError: If the font is not a variation font.\n \"\"\"\n try:\n axes = self.font.getvaraxes()\n except AttributeError:\n raise NotImplementedError(\"FreeType 2.9.1 or greater is required\")\n for axis in axes:\n axis[\"name\"] = axis[\"name\"].replace(b\"\\x00\", b\"\")\n return axes\n\n def set_variation_by_axes(self, axes):\n \"\"\"\n :param axes: A list of values for each axis.\n :exception IOError: If the font is not a variation font.\n \"\"\"\n try:\n self.font.setvaraxes(axes)\n except AttributeError:\n raise NotImplementedError(\"FreeType 2.9.1 or greater is required\")\n\n\nclass TransposedFont(object):\n \"Wrapper for writing rotated or mirrored text\"\n\n def __init__(self, font, orientation=None):\n \"\"\"\n Wrapper that creates a transposed font from any existing font\n object.\n\n :param font: A font object.\n :param orientation: An optional orientation. If given, this should\n be one of Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM,\n Image.ROTATE_90, Image.ROTATE_180, or Image.ROTATE_270.\n \"\"\"\n self.font = font\n self.orientation = orientation # any 'transpose' argument, or None\n\n def getsize(self, text, *args, **kwargs):\n w, h = self.font.getsize(text)\n if self.orientation in (Image.ROTATE_90, Image.ROTATE_270):\n return h, w\n return w, h\n\n def getmask(self, text, mode=\"\", *args, **kwargs):\n im = self.font.getmask(text, mode, *args, **kwargs)\n if self.orientation is not None:\n return im.transpose(self.orientation)\n return im\n\n\ndef load(filename):\n \"\"\"\n Load a font file. This function loads a font object from the given\n bitmap font file, and returns the corresponding font object.\n\n :param filename: Name of font file.\n :return: A font object.\n :exception IOError: If the file could not be read.\n \"\"\"\n f = ImageFont()\n f._load_pilfont(filename)\n return f\n\n\ndef truetype(font=None, size=10, index=0, encoding=\"\", layout_engine=None):\n \"\"\"\n Load a TrueType or OpenType font from a file or file-like object,\n and create a font object.\n This function loads a font object from the given file or file-like\n object, and creates a font object for a font of the given size.\n\n This function requires the _imagingft service.\n\n :param font: A filename or file-like object containing a TrueType font.\n Under Windows, if the file is not found in this filename,\n the loader also looks in Windows :file:`fonts/` directory.\n :param size: The requested size, in points.\n :param index: Which font face to load (default is first available face).\n :param encoding: Which font encoding to use (default is Unicode). Common\n encodings are \"unic\" (Unicode), \"symb\" (Microsoft\n Symbol), \"ADOB\" (Adobe Standard), \"ADBE\" (Adobe Expert),\n and \"armn\" (Apple Roman). See the FreeType documentation\n for more information.\n :param layout_engine: Which layout engine to use, if available:\n `ImageFont.LAYOUT_BASIC` or `ImageFont.LAYOUT_RAQM`.\n :return: A font object.\n :exception IOError: If the file could not be read.\n \"\"\"\n\n def freetype(font):\n return FreeTypeFont(font, size, index, encoding, layout_engine)\n\n try:\n return freetype(font)\n except IOError:\n ttf_filename = os.path.basename(font)\n\n dirs = []\n if sys.platform == \"win32\":\n # check the windows font repository\n # NOTE: must use uppercase WINDIR, to work around bugs in\n # 1.5.2's os.environ.get()\n windir = os.environ.get(\"WINDIR\")\n if windir:\n dirs.append(os.path.join(windir, \"fonts\"))\n elif sys.platform in (\"linux\", \"linux2\"):\n lindirs = os.environ.get(\"XDG_DATA_DIRS\", \"\")\n if not lindirs:\n # According to the freedesktop spec, XDG_DATA_DIRS should\n # default to /usr/share\n lindirs = \"/usr/share\"\n dirs += [os.path.join(lindir, \"fonts\") for lindir in lindirs.split(\":\")]\n elif sys.platform == \"darwin\":\n dirs += [\n \"/Library/Fonts\",\n \"/System/Library/Fonts\",\n os.path.expanduser(\"~/Library/Fonts\"),\n ]\n\n ext = os.path.splitext(ttf_filename)[1]\n first_font_with_a_different_extension = None\n for directory in dirs:\n for walkroot, walkdir, walkfilenames in os.walk(directory):\n for walkfilename in walkfilenames:\n if ext and walkfilename == ttf_filename:\n return freetype(os.path.join(walkroot, walkfilename))\n elif not ext and os.path.splitext(walkfilename)[0] == ttf_filename:\n fontpath = os.path.join(walkroot, walkfilename)\n if os.path.splitext(fontpath)[1] == \".ttf\":\n return freetype(fontpath)\n if not ext and first_font_with_a_different_extension is None:\n first_font_with_a_different_extension = fontpath\n if first_font_with_a_different_extension:\n return freetype(first_font_with_a_different_extension)\n raise\n\n\ndef load_path(filename):\n \"\"\"\n Load font file. Same as :py:func:`~PIL.ImageFont.load`, but searches for a\n bitmap font along the Python path.\n\n :param filename: Name of font file.\n :return: A font object.\n :exception IOError: If the file could not be read.\n \"\"\"\n for directory in sys.path:\n if isDirectory(directory):\n if not isinstance(filename, str):\n if py3:\n filename = filename.decode(\"utf-8\")\n else:\n filename = filename.encode(\"utf-8\")\n try:\n return load(os.path.join(directory, filename))\n except IOError:\n pass\n raise IOError(\"cannot find font file\")\n\n\ndef load_default():\n \"\"\"Load a \"better than nothing\" default font.\n\n .. versionadded:: 1.1.4\n\n :return: A font object.\n \"\"\"\n from io import BytesIO\n import base64\n\n f = ImageFont()\n f._load_pilfont_data(\n # courB08\n BytesIO(\n base64.b64decode(\n b\"\"\"\nUElMZm9udAo7Ozs7OzsxMDsKREFUQQoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAA//8AAQAAAAAAAAABAAEA\nBgAAAAH/+gADAAAAAQAAAAMABgAGAAAAAf/6AAT//QADAAAABgADAAYAAAAA//kABQABAAYAAAAL\nAAgABgAAAAD/+AAFAAEACwAAABAACQAGAAAAAP/5AAUAAAAQAAAAFQAHAAYAAP////oABQAAABUA\nAAAbAAYABgAAAAH/+QAE//wAGwAAAB4AAwAGAAAAAf/5AAQAAQAeAAAAIQAIAAYAAAAB//kABAAB\nACEAAAAkAAgABgAAAAD/+QAE//0AJAAAACgABAAGAAAAAP/6AAX//wAoAAAALQAFAAYAAAAB//8A\nBAACAC0AAAAwAAMABgAAAAD//AAF//0AMAAAADUAAQAGAAAAAf//AAMAAAA1AAAANwABAAYAAAAB\n//kABQABADcAAAA7AAgABgAAAAD/+QAFAAAAOwAAAEAABwAGAAAAAP/5AAYAAABAAAAARgAHAAYA\nAAAA//kABQAAAEYAAABLAAcABgAAAAD/+QAFAAAASwAAAFAABwAGAAAAAP/5AAYAAABQAAAAVgAH\nAAYAAAAA//kABQAAAFYAAABbAAcABgAAAAD/+QAFAAAAWwAAAGAABwAGAAAAAP/5AAUAAABgAAAA\nZQAHAAYAAAAA//kABQAAAGUAAABqAAcABgAAAAD/+QAFAAAAagAAAG8ABwAGAAAAAf/8AAMAAABv\nAAAAcQAEAAYAAAAA//wAAwACAHEAAAB0AAYABgAAAAD/+gAE//8AdAAAAHgABQAGAAAAAP/7AAT/\n/gB4AAAAfAADAAYAAAAB//oABf//AHwAAACAAAUABgAAAAD/+gAFAAAAgAAAAIUABgAGAAAAAP/5\nAAYAAQCFAAAAiwAIAAYAAP////oABgAAAIsAAACSAAYABgAA////+gAFAAAAkgAAAJgABgAGAAAA\nAP/6AAUAAACYAAAAnQAGAAYAAP////oABQAAAJ0AAACjAAYABgAA////+gAFAAAAowAAAKkABgAG\nAAD////6AAUAAACpAAAArwAGAAYAAAAA//oABQAAAK8AAAC0AAYABgAA////+gAGAAAAtAAAALsA\nBgAGAAAAAP/6AAQAAAC7AAAAvwAGAAYAAP////oABQAAAL8AAADFAAYABgAA////+gAGAAAAxQAA\nAMwABgAGAAD////6AAUAAADMAAAA0gAGAAYAAP////oABQAAANIAAADYAAYABgAA////+gAGAAAA\n2AAAAN8ABgAGAAAAAP/6AAUAAADfAAAA5AAGAAYAAP////oABQAAAOQAAADqAAYABgAAAAD/+gAF\nAAEA6gAAAO8ABwAGAAD////6AAYAAADvAAAA9gAGAAYAAAAA//oABQAAAPYAAAD7AAYABgAA////\n+gAFAAAA+wAAAQEABgAGAAD////6AAYAAAEBAAABCAAGAAYAAP////oABgAAAQgAAAEPAAYABgAA\n////+gAGAAABDwAAARYABgAGAAAAAP/6AAYAAAEWAAABHAAGAAYAAP////oABgAAARwAAAEjAAYA\nBgAAAAD/+gAFAAABIwAAASgABgAGAAAAAf/5AAQAAQEoAAABKwAIAAYAAAAA//kABAABASsAAAEv\nAAgABgAAAAH/+QAEAAEBLwAAATIACAAGAAAAAP/5AAX//AEyAAABNwADAAYAAAAAAAEABgACATcA\nAAE9AAEABgAAAAH/+QAE//wBPQAAAUAAAwAGAAAAAP/7AAYAAAFAAAABRgAFAAYAAP////kABQAA\nAUYAAAFMAAcABgAAAAD/+wAFAAABTAAAAVEABQAGAAAAAP/5AAYAAAFRAAABVwAHAAYAAAAA//sA\nBQAAAVcAAAFcAAUABgAAAAD/+QAFAAABXAAAAWEABwAGAAAAAP/7AAYAAgFhAAABZwAHAAYAAP//\n//kABQAAAWcAAAFtAAcABgAAAAD/+QAGAAABbQAAAXMABwAGAAAAAP/5AAQAAgFzAAABdwAJAAYA\nAP////kABgAAAXcAAAF+AAcABgAAAAD/+QAGAAABfgAAAYQABwAGAAD////7AAUAAAGEAAABigAF\nAAYAAP////sABQAAAYoAAAGQAAUABgAAAAD/+wAFAAABkAAAAZUABQAGAAD////7AAUAAgGVAAAB\nmwAHAAYAAAAA//sABgACAZsAAAGhAAcABgAAAAD/+wAGAAABoQAAAacABQAGAAAAAP/7AAYAAAGn\nAAABrQAFAAYAAAAA//kABgAAAa0AAAGzAAcABgAA////+wAGAAABswAAAboABQAGAAD////7AAUA\nAAG6AAABwAAFAAYAAP////sABgAAAcAAAAHHAAUABgAAAAD/+wAGAAABxwAAAc0ABQAGAAD////7\nAAYAAgHNAAAB1AAHAAYAAAAA//sABQAAAdQAAAHZAAUABgAAAAH/+QAFAAEB2QAAAd0ACAAGAAAA\nAv/6AAMAAQHdAAAB3gAHAAYAAAAA//kABAABAd4AAAHiAAgABgAAAAD/+wAF//0B4gAAAecAAgAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAB\n//sAAwACAecAAAHpAAcABgAAAAD/+QAFAAEB6QAAAe4ACAAGAAAAAP/5AAYAAAHuAAAB9AAHAAYA\nAAAA//oABf//AfQAAAH5AAUABgAAAAD/+QAGAAAB+QAAAf8ABwAGAAAAAv/5AAMAAgH/AAACAAAJ\nAAYAAAAA//kABQABAgAAAAIFAAgABgAAAAH/+gAE//sCBQAAAggAAQAGAAAAAP/5AAYAAAIIAAAC\nDgAHAAYAAAAB//kABf/+Ag4AAAISAAUABgAA////+wAGAAACEgAAAhkABQAGAAAAAP/7AAX//gIZ\nAAACHgADAAYAAAAA//wABf/9Ah4AAAIjAAEABgAAAAD/+QAHAAACIwAAAioABwAGAAAAAP/6AAT/\n+wIqAAACLgABAAYAAAAA//kABP/8Ai4AAAIyAAMABgAAAAD/+gAFAAACMgAAAjcABgAGAAAAAf/5\nAAT//QI3AAACOgAEAAYAAAAB//kABP/9AjoAAAI9AAQABgAAAAL/+QAE//sCPQAAAj8AAgAGAAD/\n///7AAYAAgI/AAACRgAHAAYAAAAA//kABgABAkYAAAJMAAgABgAAAAH//AAD//0CTAAAAk4AAQAG\nAAAAAf//AAQAAgJOAAACUQADAAYAAAAB//kABP/9AlEAAAJUAAQABgAAAAH/+QAF//4CVAAAAlgA\nBQAGAAD////7AAYAAAJYAAACXwAFAAYAAP////kABgAAAl8AAAJmAAcABgAA////+QAGAAACZgAA\nAm0ABwAGAAD////5AAYAAAJtAAACdAAHAAYAAAAA//sABQACAnQAAAJ5AAcABgAA////9wAGAAAC\neQAAAoAACQAGAAD////3AAYAAAKAAAAChwAJAAYAAP////cABgAAAocAAAKOAAkABgAA////9wAG\nAAACjgAAApUACQAGAAD////4AAYAAAKVAAACnAAIAAYAAP////cABgAAApwAAAKjAAkABgAA////\n+gAGAAACowAAAqoABgAGAAAAAP/6AAUAAgKqAAACrwAIAAYAAP////cABQAAAq8AAAK1AAkABgAA\n////9wAFAAACtQAAArsACQAGAAD////3AAUAAAK7AAACwQAJAAYAAP////gABQAAAsEAAALHAAgA\nBgAAAAD/9wAEAAACxwAAAssACQAGAAAAAP/3AAQAAALLAAACzwAJAAYAAAAA//cABAAAAs8AAALT\nAAkABgAAAAD/+AAEAAAC0wAAAtcACAAGAAD////6AAUAAALXAAAC3QAGAAYAAP////cABgAAAt0A\nAALkAAkABgAAAAD/9wAFAAAC5AAAAukACQAGAAAAAP/3AAUAAALpAAAC7gAJAAYAAAAA//cABQAA\nAu4AAALzAAkABgAAAAD/9wAFAAAC8wAAAvgACQAGAAAAAP/4AAUAAAL4AAAC/QAIAAYAAAAA//oA\nBf//Av0AAAMCAAUABgAA////+gAGAAADAgAAAwkABgAGAAD////3AAYAAAMJAAADEAAJAAYAAP//\n//cABgAAAxAAAAMXAAkABgAA////9wAGAAADFwAAAx4ACQAGAAD////4AAYAAAAAAAoABwASAAYA\nAP////cABgAAAAcACgAOABMABgAA////+gAFAAAADgAKABQAEAAGAAD////6AAYAAAAUAAoAGwAQ\nAAYAAAAA//gABgAAABsACgAhABIABgAAAAD/+AAGAAAAIQAKACcAEgAGAAAAAP/4AAYAAAAnAAoA\nLQASAAYAAAAA//gABgAAAC0ACgAzABIABgAAAAD/+QAGAAAAMwAKADkAEQAGAAAAAP/3AAYAAAA5\nAAoAPwATAAYAAP////sABQAAAD8ACgBFAA8ABgAAAAD/+wAFAAIARQAKAEoAEQAGAAAAAP/4AAUA\nAABKAAoATwASAAYAAAAA//gABQAAAE8ACgBUABIABgAAAAD/+AAFAAAAVAAKAFkAEgAGAAAAAP/5\nAAUAAABZAAoAXgARAAYAAAAA//gABgAAAF4ACgBkABIABgAAAAD/+AAGAAAAZAAKAGoAEgAGAAAA\nAP/4AAYAAABqAAoAcAASAAYAAAAA//kABgAAAHAACgB2ABEABgAAAAD/+AAFAAAAdgAKAHsAEgAG\nAAD////4AAYAAAB7AAoAggASAAYAAAAA//gABQAAAIIACgCHABIABgAAAAD/+AAFAAAAhwAKAIwA\nEgAGAAAAAP/4AAUAAACMAAoAkQASAAYAAAAA//gABQAAAJEACgCWABIABgAAAAD/+QAFAAAAlgAK\nAJsAEQAGAAAAAP/6AAX//wCbAAoAoAAPAAYAAAAA//oABQABAKAACgClABEABgAA////+AAGAAAA\npQAKAKwAEgAGAAD////4AAYAAACsAAoAswASAAYAAP////gABgAAALMACgC6ABIABgAA////+QAG\nAAAAugAKAMEAEQAGAAD////4AAYAAgDBAAoAyAAUAAYAAP////kABQACAMgACgDOABMABgAA////\n+QAGAAIAzgAKANUAEw==\n\"\"\"\n )\n ),\n Image.open(\n BytesIO(\n base64.b64decode(\n b\"\"\"\niVBORw0KGgoAAAANSUhEUgAAAx4AAAAUAQAAAAArMtZoAAAEwElEQVR4nABlAJr/AHVE4czCI/4u\nMc4b7vuds/xzjz5/3/7u/n9vMe7vnfH/9++vPn/xyf5zhxzjt8GHw8+2d83u8x27199/nxuQ6Od9\nM43/5z2I+9n9ZtmDBwMQECDRQw/eQIQohJXxpBCNVE6QCCAAAAD//wBlAJr/AgALyj1t/wINwq0g\nLeNZUworuN1cjTPIzrTX6ofHWeo3v336qPzfEwRmBnHTtf95/fglZK5N0PDgfRTslpGBvz7LFc4F\nIUXBWQGjQ5MGCx34EDFPwXiY4YbYxavpnhHFrk14CDAAAAD//wBlAJr/AgKqRooH2gAgPeggvUAA\nBu2WfgPoAwzRAABAAAAAAACQgLz/3Uv4Gv+gX7BJgDeeGP6AAAD1NMDzKHD7ANWr3loYbxsAD791\nNAADfcoIDyP44K/jv4Y63/Z+t98Ovt+ub4T48LAAAAD//wBlAJr/AuplMlADJAAAAGuAphWpqhMx\nin0A/fRvAYBABPgBwBUgABBQ/sYAyv9g0bCHgOLoGAAAAAAAREAAwI7nr0ArYpow7aX8//9LaP/9\nSjdavWA8ePHeBIKB//81/83ndznOaXx379wAAAD//wBlAJr/AqDxW+D3AABAAbUh/QMnbQag/gAY\nAYDAAACgtgD/gOqAAAB5IA/8AAAk+n9w0AAA8AAAmFRJuPo27ciC0cD5oeW4E7KA/wD3ECMAn2tt\ny8PgwH8AfAxFzC0JzeAMtratAsC/ffwAAAD//wBlAJr/BGKAyCAA4AAAAvgeYTAwHd1kmQF5chkG\nABoMIHcL5xVpTfQbUqzlAAAErwAQBgAAEOClA5D9il08AEh/tUzdCBsXkbgACED+woQg8Si9VeqY\nlODCn7lmF6NhnAEYgAAA/NMIAAAAAAD//2JgjLZgVGBg5Pv/Tvpc8hwGBjYGJADjHDrAwPzAjv/H\n/Wf3PzCwtzcwHmBgYGcwbZz8wHaCAQMDOwMDQ8MCBgYOC3W7mp+f0w+wHOYxO3OG+e376hsMZjk3\nAAAAAP//YmCMY2A4wMAIN5e5gQETPD6AZisDAwMDgzSDAAPjByiHcQMDAwMDg1nOze1lByRu5/47\nc4859311AYNZzg0AAAAA//9iYGDBYihOIIMuwIjGL39/fwffA8b//xv/P2BPtzzHwCBjUQAAAAD/\n/yLFBrIBAAAA//9i1HhcwdhizX7u8NZNzyLbvT97bfrMf/QHI8evOwcSqGUJAAAA//9iYBB81iSw\npEE170Qrg5MIYydHqwdDQRMrAwcVrQAAAAD//2J4x7j9AAMDn8Q/BgYLBoaiAwwMjPdvMDBYM1Tv\noJodAAAAAP//Yqo/83+dxePWlxl3npsel9lvLfPcqlE9725C+acfVLMEAAAA//9i+s9gwCoaaGMR\nevta/58PTEWzr21hufPjA8N+qlnBwAAAAAD//2JiWLci5v1+HmFXDqcnULE/MxgYGBj+f6CaJQAA\nAAD//2Ji2FrkY3iYpYC5qDeGgeEMAwPDvwQBBoYvcTwOVLMEAAAA//9isDBgkP///0EOg9z35v//\nGc/eeW7BwPj5+QGZhANUswMAAAD//2JgqGBgYGBgqEMXlvhMPUsAAAAA//8iYDd1AAAAAP//AwDR\nw7IkEbzhVQAAAABJRU5ErkJggg==\n\"\"\"\n )\n )\n ),\n )\n return f\n", "path": "src/PIL/ImageFont.py" } ]
[ { "content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# PIL raster font management\n#\n# History:\n# 1996-08-07 fl created (experimental)\n# 1997-08-25 fl minor adjustments to handle fonts from pilfont 0.3\n# 1999-02-06 fl rewrote most font management stuff in C\n# 1999-03-17 fl take pth files into account in load_path (from Richard Jones)\n# 2001-02-17 fl added freetype support\n# 2001-05-09 fl added TransposedFont wrapper class\n# 2002-03-04 fl make sure we have a \"L\" or \"1\" font\n# 2002-12-04 fl skip non-directory entries in the system path\n# 2003-04-29 fl add embedded default font\n# 2003-09-27 fl added support for truetype charmap encodings\n#\n# Todo:\n# Adapt to PILFONT2 format (16-bit fonts, compressed, single file)\n#\n# Copyright (c) 1997-2003 by Secret Labs AB\n# Copyright (c) 1996-2003 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nfrom . import Image\nfrom ._util import isDirectory, isPath, py3\nimport os\nimport sys\n\nLAYOUT_BASIC = 0\nLAYOUT_RAQM = 1\n\n\nclass _imagingft_not_installed(object):\n # module placeholder\n def __getattr__(self, id):\n raise ImportError(\"The _imagingft C module is not installed\")\n\n\ntry:\n from . import _imagingft as core\nexcept ImportError:\n core = _imagingft_not_installed()\n\n\n# FIXME: add support for pilfont2 format (see FontFile.py)\n\n# --------------------------------------------------------------------\n# Font metrics format:\n# \"PILfont\" LF\n# fontdescriptor LF\n# (optional) key=value... LF\n# \"DATA\" LF\n# binary data: 256*10*2 bytes (dx, dy, dstbox, srcbox)\n#\n# To place a character, cut out srcbox and paste at dstbox,\n# relative to the character position. Then move the character\n# position according to dx, dy.\n# --------------------------------------------------------------------\n\n\nclass ImageFont(object):\n \"PIL font wrapper\"\n\n def _load_pilfont(self, filename):\n\n with open(filename, \"rb\") as fp:\n for ext in (\".png\", \".gif\", \".pbm\"):\n try:\n fullname = os.path.splitext(filename)[0] + ext\n image = Image.open(fullname)\n except Exception:\n pass\n else:\n if image and image.mode in (\"1\", \"L\"):\n break\n else:\n raise IOError(\"cannot find glyph data file\")\n\n self.file = fullname\n\n return self._load_pilfont_data(fp, image)\n\n def _load_pilfont_data(self, file, image):\n\n # read PILfont header\n if file.readline() != b\"PILfont\\n\":\n raise SyntaxError(\"Not a PILfont file\")\n file.readline().split(b\";\")\n self.info = [] # FIXME: should be a dictionary\n while True:\n s = file.readline()\n if not s or s == b\"DATA\\n\":\n break\n self.info.append(s)\n\n # read PILfont metrics\n data = file.read(256 * 20)\n\n # check image\n if image.mode not in (\"1\", \"L\"):\n raise TypeError(\"invalid font image mode\")\n\n image.load()\n\n self.font = Image.core.font(image.im, data)\n\n def getsize(self, text, *args, **kwargs):\n return self.font.getsize(text)\n\n def getmask(self, text, mode=\"\", *args, **kwargs):\n return self.font.getmask(text, mode)\n\n\n##\n# Wrapper for FreeType fonts. Application code should use the\n# <b>truetype</b> factory function to create font objects.\n\n\nclass FreeTypeFont(object):\n \"FreeType font wrapper (requires _imagingft service)\"\n\n def __init__(self, font=None, size=10, index=0, encoding=\"\", layout_engine=None):\n # FIXME: use service provider instead\n\n self.path = font\n self.size = size\n self.index = index\n self.encoding = encoding\n\n if layout_engine not in (LAYOUT_BASIC, LAYOUT_RAQM):\n layout_engine = LAYOUT_BASIC\n if core.HAVE_RAQM:\n layout_engine = LAYOUT_RAQM\n elif layout_engine == LAYOUT_RAQM and not core.HAVE_RAQM:\n layout_engine = LAYOUT_BASIC\n\n self.layout_engine = layout_engine\n\n def load_from_bytes(f):\n self.font_bytes = f.read()\n self.font = core.getfont(\n \"\", size, index, encoding, self.font_bytes, layout_engine\n )\n\n if isPath(font):\n if sys.platform == \"win32\":\n font_bytes_path = font if isinstance(font, bytes) else font.encode()\n try:\n font_bytes_path.decode(\"ascii\")\n except UnicodeDecodeError:\n # FreeType cannot load fonts with non-ASCII characters on Windows\n # So load it into memory first\n with open(font, \"rb\") as f:\n load_from_bytes(f)\n return\n self.font = core.getfont(\n font, size, index, encoding, layout_engine=layout_engine\n )\n else:\n load_from_bytes(font)\n\n def _multiline_split(self, text):\n split_character = \"\\n\" if isinstance(text, str) else b\"\\n\"\n return text.split(split_character)\n\n def getname(self):\n \"\"\"\n :return: A tuple of the font family (e.g. Helvetica) and the font style\n (e.g. Bold)\n \"\"\"\n return self.font.family, self.font.style\n\n def getmetrics(self):\n \"\"\"\n :return: A tuple of the font ascent (the distance from the baseline to\n the highest outline point) and descent (the distance from the\n baseline to the lowest outline point, a negative value)\n \"\"\"\n return self.font.ascent, self.font.descent\n\n def getsize(self, text, direction=None, features=None, language=None):\n \"\"\"\n Returns width and height (in pixels) of given text if rendered in font with\n provided direction, features, and language.\n\n :param text: Text to measure.\n\n :param direction: Direction of the text. It can be 'rtl' (right to\n left), 'ltr' (left to right) or 'ttb' (top to bottom).\n Requires libraqm.\n\n .. versionadded:: 4.2.0\n\n :param features: A list of OpenType font features to be used during text\n layout. This is usually used to turn on optional\n font features that are not enabled by default,\n for example 'dlig' or 'ss01', but can be also\n used to turn off default font features for\n example '-liga' to disable ligatures or '-kern'\n to disable kerning. To get all supported\n features, see\n https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist\n Requires libraqm.\n\n .. versionadded:: 4.2.0\n\n :param language: Language of the text. Different languages may use\n different glyph shapes or ligatures. This parameter tells\n the font which language the text is in, and to apply the\n correct substitutions as appropriate, if available.\n It should be a `BCP 47 language code\n <https://www.w3.org/International/articles/language-tags/>`\n Requires libraqm.\n\n .. versionadded:: 6.0.0\n\n :return: (width, height)\n \"\"\"\n size, offset = self.font.getsize(text, direction, features, language)\n return (size[0] + offset[0], size[1] + offset[1])\n\n def getsize_multiline(\n self, text, direction=None, spacing=4, features=None, language=None\n ):\n \"\"\"\n Returns width and height (in pixels) of given text if rendered in font\n with provided direction, features, and language, while respecting\n newline characters.\n\n :param text: Text to measure.\n\n :param direction: Direction of the text. It can be 'rtl' (right to\n left), 'ltr' (left to right) or 'ttb' (top to bottom).\n Requires libraqm.\n\n :param spacing: The vertical gap between lines, defaulting to 4 pixels.\n\n :param features: A list of OpenType font features to be used during text\n layout. This is usually used to turn on optional\n font features that are not enabled by default,\n for example 'dlig' or 'ss01', but can be also\n used to turn off default font features for\n example '-liga' to disable ligatures or '-kern'\n to disable kerning. To get all supported\n features, see\n https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist\n Requires libraqm.\n\n :param language: Language of the text. Different languages may use\n different glyph shapes or ligatures. This parameter tells\n the font which language the text is in, and to apply the\n correct substitutions as appropriate, if available.\n It should be a `BCP 47 language code\n <https://www.w3.org/International/articles/language-tags/>`\n Requires libraqm.\n\n .. versionadded:: 6.0.0\n\n :return: (width, height)\n \"\"\"\n max_width = 0\n lines = self._multiline_split(text)\n line_spacing = self.getsize(\"A\")[1] + spacing\n for line in lines:\n line_width, line_height = self.getsize(line, direction, features, language)\n max_width = max(max_width, line_width)\n\n return max_width, len(lines) * line_spacing - spacing\n\n def getoffset(self, text):\n \"\"\"\n Returns the offset of given text. This is the gap between the\n starting coordinate and the first marking. Note that this gap is\n included in the result of :py:func:`~PIL.ImageFont.FreeTypeFont.getsize`.\n\n :param text: Text to measure.\n\n :return: A tuple of the x and y offset\n \"\"\"\n return self.font.getsize(text)[1]\n\n def getmask(self, text, mode=\"\", direction=None, features=None, language=None):\n \"\"\"\n Create a bitmap for the text.\n\n If the font uses antialiasing, the bitmap should have mode ``L`` and use a\n maximum value of 255. Otherwise, it should have mode ``1``.\n\n :param text: Text to render.\n :param mode: Used by some graphics drivers to indicate what mode the\n driver prefers; if empty, the renderer may return either\n mode. Note that the mode is always a string, to simplify\n C-level implementations.\n\n .. versionadded:: 1.1.5\n\n :param direction: Direction of the text. It can be 'rtl' (right to\n left), 'ltr' (left to right) or 'ttb' (top to bottom).\n Requires libraqm.\n\n .. versionadded:: 4.2.0\n\n :param features: A list of OpenType font features to be used during text\n layout. This is usually used to turn on optional\n font features that are not enabled by default,\n for example 'dlig' or 'ss01', but can be also\n used to turn off default font features for\n example '-liga' to disable ligatures or '-kern'\n to disable kerning. To get all supported\n features, see\n https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist\n Requires libraqm.\n\n .. versionadded:: 4.2.0\n\n :param language: Language of the text. Different languages may use\n different glyph shapes or ligatures. This parameter tells\n the font which language the text is in, and to apply the\n correct substitutions as appropriate, if available.\n It should be a `BCP 47 language code\n <https://www.w3.org/International/articles/language-tags/>`\n Requires libraqm.\n\n .. versionadded:: 6.0.0\n\n :return: An internal PIL storage memory instance as defined by the\n :py:mod:`PIL.Image.core` interface module.\n \"\"\"\n return self.getmask2(\n text, mode, direction=direction, features=features, language=language\n )[0]\n\n def getmask2(\n self,\n text,\n mode=\"\",\n fill=Image.core.fill,\n direction=None,\n features=None,\n language=None,\n *args,\n **kwargs\n ):\n \"\"\"\n Create a bitmap for the text.\n\n If the font uses antialiasing, the bitmap should have mode ``L`` and use a\n maximum value of 255. Otherwise, it should have mode ``1``.\n\n :param text: Text to render.\n :param mode: Used by some graphics drivers to indicate what mode the\n driver prefers; if empty, the renderer may return either\n mode. Note that the mode is always a string, to simplify\n C-level implementations.\n\n .. versionadded:: 1.1.5\n\n :param direction: Direction of the text. It can be 'rtl' (right to\n left), 'ltr' (left to right) or 'ttb' (top to bottom).\n Requires libraqm.\n\n .. versionadded:: 4.2.0\n\n :param features: A list of OpenType font features to be used during text\n layout. This is usually used to turn on optional\n font features that are not enabled by default,\n for example 'dlig' or 'ss01', but can be also\n used to turn off default font features for\n example '-liga' to disable ligatures or '-kern'\n to disable kerning. To get all supported\n features, see\n https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist\n Requires libraqm.\n\n .. versionadded:: 4.2.0\n\n :param language: Language of the text. Different languages may use\n different glyph shapes or ligatures. This parameter tells\n the font which language the text is in, and to apply the\n correct substitutions as appropriate, if available.\n It should be a `BCP 47 language code\n <https://www.w3.org/International/articles/language-tags/>`\n Requires libraqm.\n\n .. versionadded:: 6.0.0\n\n :return: A tuple of an internal PIL storage memory instance as defined by the\n :py:mod:`PIL.Image.core` interface module, and the text offset, the\n gap between the starting coordinate and the first marking\n \"\"\"\n size, offset = self.font.getsize(text, direction, features, language)\n im = fill(\"L\", size, 0)\n self.font.render(text, im.id, mode == \"1\", direction, features, language)\n return im, offset\n\n def font_variant(\n self, font=None, size=None, index=None, encoding=None, layout_engine=None\n ):\n \"\"\"\n Create a copy of this FreeTypeFont object,\n using any specified arguments to override the settings.\n\n Parameters are identical to the parameters used to initialize this\n object.\n\n :return: A FreeTypeFont object.\n \"\"\"\n return FreeTypeFont(\n font=self.path if font is None else font,\n size=self.size if size is None else size,\n index=self.index if index is None else index,\n encoding=self.encoding if encoding is None else encoding,\n layout_engine=layout_engine or self.layout_engine,\n )\n\n def get_variation_names(self):\n \"\"\"\n :returns: A list of the named styles in a variation font.\n :exception IOError: If the font is not a variation font.\n \"\"\"\n try:\n names = self.font.getvarnames()\n except AttributeError:\n raise NotImplementedError(\"FreeType 2.9.1 or greater is required\")\n return [name.replace(b\"\\x00\", b\"\") for name in names]\n\n def set_variation_by_name(self, name):\n \"\"\"\n :param name: The name of the style.\n :exception IOError: If the font is not a variation font.\n \"\"\"\n names = self.get_variation_names()\n if not isinstance(name, bytes):\n name = name.encode()\n index = names.index(name)\n\n if index == getattr(self, \"_last_variation_index\", None):\n # When the same name is set twice in a row,\n # there is an 'unknown freetype error'\n # https://savannah.nongnu.org/bugs/?56186\n return\n self._last_variation_index = index\n\n self.font.setvarname(index)\n\n def get_variation_axes(self):\n \"\"\"\n :returns: A list of the axes in a variation font.\n :exception IOError: If the font is not a variation font.\n \"\"\"\n try:\n axes = self.font.getvaraxes()\n except AttributeError:\n raise NotImplementedError(\"FreeType 2.9.1 or greater is required\")\n for axis in axes:\n axis[\"name\"] = axis[\"name\"].replace(b\"\\x00\", b\"\")\n return axes\n\n def set_variation_by_axes(self, axes):\n \"\"\"\n :param axes: A list of values for each axis.\n :exception IOError: If the font is not a variation font.\n \"\"\"\n try:\n self.font.setvaraxes(axes)\n except AttributeError:\n raise NotImplementedError(\"FreeType 2.9.1 or greater is required\")\n\n\nclass TransposedFont(object):\n \"Wrapper for writing rotated or mirrored text\"\n\n def __init__(self, font, orientation=None):\n \"\"\"\n Wrapper that creates a transposed font from any existing font\n object.\n\n :param font: A font object.\n :param orientation: An optional orientation. If given, this should\n be one of Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM,\n Image.ROTATE_90, Image.ROTATE_180, or Image.ROTATE_270.\n \"\"\"\n self.font = font\n self.orientation = orientation # any 'transpose' argument, or None\n\n def getsize(self, text, *args, **kwargs):\n w, h = self.font.getsize(text)\n if self.orientation in (Image.ROTATE_90, Image.ROTATE_270):\n return h, w\n return w, h\n\n def getmask(self, text, mode=\"\", *args, **kwargs):\n im = self.font.getmask(text, mode, *args, **kwargs)\n if self.orientation is not None:\n return im.transpose(self.orientation)\n return im\n\n\ndef load(filename):\n \"\"\"\n Load a font file. This function loads a font object from the given\n bitmap font file, and returns the corresponding font object.\n\n :param filename: Name of font file.\n :return: A font object.\n :exception IOError: If the file could not be read.\n \"\"\"\n f = ImageFont()\n f._load_pilfont(filename)\n return f\n\n\ndef truetype(font=None, size=10, index=0, encoding=\"\", layout_engine=None):\n \"\"\"\n Load a TrueType or OpenType font from a file or file-like object,\n and create a font object.\n This function loads a font object from the given file or file-like\n object, and creates a font object for a font of the given size.\n\n This function requires the _imagingft service.\n\n :param font: A filename or file-like object containing a TrueType font.\n Under Windows, if the file is not found in this filename,\n the loader also looks in Windows :file:`fonts/` directory.\n :param size: The requested size, in points.\n :param index: Which font face to load (default is first available face).\n :param encoding: Which font encoding to use (default is Unicode). Common\n encodings are \"unic\" (Unicode), \"symb\" (Microsoft\n Symbol), \"ADOB\" (Adobe Standard), \"ADBE\" (Adobe Expert),\n and \"armn\" (Apple Roman). See the FreeType documentation\n for more information.\n :param layout_engine: Which layout engine to use, if available:\n `ImageFont.LAYOUT_BASIC` or `ImageFont.LAYOUT_RAQM`.\n :return: A font object.\n :exception IOError: If the file could not be read.\n \"\"\"\n\n def freetype(font):\n return FreeTypeFont(font, size, index, encoding, layout_engine)\n\n try:\n return freetype(font)\n except IOError:\n if not isPath(font):\n raise\n ttf_filename = os.path.basename(font)\n\n dirs = []\n if sys.platform == \"win32\":\n # check the windows font repository\n # NOTE: must use uppercase WINDIR, to work around bugs in\n # 1.5.2's os.environ.get()\n windir = os.environ.get(\"WINDIR\")\n if windir:\n dirs.append(os.path.join(windir, \"fonts\"))\n elif sys.platform in (\"linux\", \"linux2\"):\n lindirs = os.environ.get(\"XDG_DATA_DIRS\", \"\")\n if not lindirs:\n # According to the freedesktop spec, XDG_DATA_DIRS should\n # default to /usr/share\n lindirs = \"/usr/share\"\n dirs += [os.path.join(lindir, \"fonts\") for lindir in lindirs.split(\":\")]\n elif sys.platform == \"darwin\":\n dirs += [\n \"/Library/Fonts\",\n \"/System/Library/Fonts\",\n os.path.expanduser(\"~/Library/Fonts\"),\n ]\n\n ext = os.path.splitext(ttf_filename)[1]\n first_font_with_a_different_extension = None\n for directory in dirs:\n for walkroot, walkdir, walkfilenames in os.walk(directory):\n for walkfilename in walkfilenames:\n if ext and walkfilename == ttf_filename:\n return freetype(os.path.join(walkroot, walkfilename))\n elif not ext and os.path.splitext(walkfilename)[0] == ttf_filename:\n fontpath = os.path.join(walkroot, walkfilename)\n if os.path.splitext(fontpath)[1] == \".ttf\":\n return freetype(fontpath)\n if not ext and first_font_with_a_different_extension is None:\n first_font_with_a_different_extension = fontpath\n if first_font_with_a_different_extension:\n return freetype(first_font_with_a_different_extension)\n raise\n\n\ndef load_path(filename):\n \"\"\"\n Load font file. Same as :py:func:`~PIL.ImageFont.load`, but searches for a\n bitmap font along the Python path.\n\n :param filename: Name of font file.\n :return: A font object.\n :exception IOError: If the file could not be read.\n \"\"\"\n for directory in sys.path:\n if isDirectory(directory):\n if not isinstance(filename, str):\n if py3:\n filename = filename.decode(\"utf-8\")\n else:\n filename = filename.encode(\"utf-8\")\n try:\n return load(os.path.join(directory, filename))\n except IOError:\n pass\n raise IOError(\"cannot find font file\")\n\n\ndef load_default():\n \"\"\"Load a \"better than nothing\" default font.\n\n .. versionadded:: 1.1.4\n\n :return: A font object.\n \"\"\"\n from io import BytesIO\n import base64\n\n f = ImageFont()\n f._load_pilfont_data(\n # courB08\n BytesIO(\n base64.b64decode(\n b\"\"\"\nUElMZm9udAo7Ozs7OzsxMDsKREFUQQoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAA//8AAQAAAAAAAAABAAEA\nBgAAAAH/+gADAAAAAQAAAAMABgAGAAAAAf/6AAT//QADAAAABgADAAYAAAAA//kABQABAAYAAAAL\nAAgABgAAAAD/+AAFAAEACwAAABAACQAGAAAAAP/5AAUAAAAQAAAAFQAHAAYAAP////oABQAAABUA\nAAAbAAYABgAAAAH/+QAE//wAGwAAAB4AAwAGAAAAAf/5AAQAAQAeAAAAIQAIAAYAAAAB//kABAAB\nACEAAAAkAAgABgAAAAD/+QAE//0AJAAAACgABAAGAAAAAP/6AAX//wAoAAAALQAFAAYAAAAB//8A\nBAACAC0AAAAwAAMABgAAAAD//AAF//0AMAAAADUAAQAGAAAAAf//AAMAAAA1AAAANwABAAYAAAAB\n//kABQABADcAAAA7AAgABgAAAAD/+QAFAAAAOwAAAEAABwAGAAAAAP/5AAYAAABAAAAARgAHAAYA\nAAAA//kABQAAAEYAAABLAAcABgAAAAD/+QAFAAAASwAAAFAABwAGAAAAAP/5AAYAAABQAAAAVgAH\nAAYAAAAA//kABQAAAFYAAABbAAcABgAAAAD/+QAFAAAAWwAAAGAABwAGAAAAAP/5AAUAAABgAAAA\nZQAHAAYAAAAA//kABQAAAGUAAABqAAcABgAAAAD/+QAFAAAAagAAAG8ABwAGAAAAAf/8AAMAAABv\nAAAAcQAEAAYAAAAA//wAAwACAHEAAAB0AAYABgAAAAD/+gAE//8AdAAAAHgABQAGAAAAAP/7AAT/\n/gB4AAAAfAADAAYAAAAB//oABf//AHwAAACAAAUABgAAAAD/+gAFAAAAgAAAAIUABgAGAAAAAP/5\nAAYAAQCFAAAAiwAIAAYAAP////oABgAAAIsAAACSAAYABgAA////+gAFAAAAkgAAAJgABgAGAAAA\nAP/6AAUAAACYAAAAnQAGAAYAAP////oABQAAAJ0AAACjAAYABgAA////+gAFAAAAowAAAKkABgAG\nAAD////6AAUAAACpAAAArwAGAAYAAAAA//oABQAAAK8AAAC0AAYABgAA////+gAGAAAAtAAAALsA\nBgAGAAAAAP/6AAQAAAC7AAAAvwAGAAYAAP////oABQAAAL8AAADFAAYABgAA////+gAGAAAAxQAA\nAMwABgAGAAD////6AAUAAADMAAAA0gAGAAYAAP////oABQAAANIAAADYAAYABgAA////+gAGAAAA\n2AAAAN8ABgAGAAAAAP/6AAUAAADfAAAA5AAGAAYAAP////oABQAAAOQAAADqAAYABgAAAAD/+gAF\nAAEA6gAAAO8ABwAGAAD////6AAYAAADvAAAA9gAGAAYAAAAA//oABQAAAPYAAAD7AAYABgAA////\n+gAFAAAA+wAAAQEABgAGAAD////6AAYAAAEBAAABCAAGAAYAAP////oABgAAAQgAAAEPAAYABgAA\n////+gAGAAABDwAAARYABgAGAAAAAP/6AAYAAAEWAAABHAAGAAYAAP////oABgAAARwAAAEjAAYA\nBgAAAAD/+gAFAAABIwAAASgABgAGAAAAAf/5AAQAAQEoAAABKwAIAAYAAAAA//kABAABASsAAAEv\nAAgABgAAAAH/+QAEAAEBLwAAATIACAAGAAAAAP/5AAX//AEyAAABNwADAAYAAAAAAAEABgACATcA\nAAE9AAEABgAAAAH/+QAE//wBPQAAAUAAAwAGAAAAAP/7AAYAAAFAAAABRgAFAAYAAP////kABQAA\nAUYAAAFMAAcABgAAAAD/+wAFAAABTAAAAVEABQAGAAAAAP/5AAYAAAFRAAABVwAHAAYAAAAA//sA\nBQAAAVcAAAFcAAUABgAAAAD/+QAFAAABXAAAAWEABwAGAAAAAP/7AAYAAgFhAAABZwAHAAYAAP//\n//kABQAAAWcAAAFtAAcABgAAAAD/+QAGAAABbQAAAXMABwAGAAAAAP/5AAQAAgFzAAABdwAJAAYA\nAP////kABgAAAXcAAAF+AAcABgAAAAD/+QAGAAABfgAAAYQABwAGAAD////7AAUAAAGEAAABigAF\nAAYAAP////sABQAAAYoAAAGQAAUABgAAAAD/+wAFAAABkAAAAZUABQAGAAD////7AAUAAgGVAAAB\nmwAHAAYAAAAA//sABgACAZsAAAGhAAcABgAAAAD/+wAGAAABoQAAAacABQAGAAAAAP/7AAYAAAGn\nAAABrQAFAAYAAAAA//kABgAAAa0AAAGzAAcABgAA////+wAGAAABswAAAboABQAGAAD////7AAUA\nAAG6AAABwAAFAAYAAP////sABgAAAcAAAAHHAAUABgAAAAD/+wAGAAABxwAAAc0ABQAGAAD////7\nAAYAAgHNAAAB1AAHAAYAAAAA//sABQAAAdQAAAHZAAUABgAAAAH/+QAFAAEB2QAAAd0ACAAGAAAA\nAv/6AAMAAQHdAAAB3gAHAAYAAAAA//kABAABAd4AAAHiAAgABgAAAAD/+wAF//0B4gAAAecAAgAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAB\n//sAAwACAecAAAHpAAcABgAAAAD/+QAFAAEB6QAAAe4ACAAGAAAAAP/5AAYAAAHuAAAB9AAHAAYA\nAAAA//oABf//AfQAAAH5AAUABgAAAAD/+QAGAAAB+QAAAf8ABwAGAAAAAv/5AAMAAgH/AAACAAAJ\nAAYAAAAA//kABQABAgAAAAIFAAgABgAAAAH/+gAE//sCBQAAAggAAQAGAAAAAP/5AAYAAAIIAAAC\nDgAHAAYAAAAB//kABf/+Ag4AAAISAAUABgAA////+wAGAAACEgAAAhkABQAGAAAAAP/7AAX//gIZ\nAAACHgADAAYAAAAA//wABf/9Ah4AAAIjAAEABgAAAAD/+QAHAAACIwAAAioABwAGAAAAAP/6AAT/\n+wIqAAACLgABAAYAAAAA//kABP/8Ai4AAAIyAAMABgAAAAD/+gAFAAACMgAAAjcABgAGAAAAAf/5\nAAT//QI3AAACOgAEAAYAAAAB//kABP/9AjoAAAI9AAQABgAAAAL/+QAE//sCPQAAAj8AAgAGAAD/\n///7AAYAAgI/AAACRgAHAAYAAAAA//kABgABAkYAAAJMAAgABgAAAAH//AAD//0CTAAAAk4AAQAG\nAAAAAf//AAQAAgJOAAACUQADAAYAAAAB//kABP/9AlEAAAJUAAQABgAAAAH/+QAF//4CVAAAAlgA\nBQAGAAD////7AAYAAAJYAAACXwAFAAYAAP////kABgAAAl8AAAJmAAcABgAA////+QAGAAACZgAA\nAm0ABwAGAAD////5AAYAAAJtAAACdAAHAAYAAAAA//sABQACAnQAAAJ5AAcABgAA////9wAGAAAC\neQAAAoAACQAGAAD////3AAYAAAKAAAAChwAJAAYAAP////cABgAAAocAAAKOAAkABgAA////9wAG\nAAACjgAAApUACQAGAAD////4AAYAAAKVAAACnAAIAAYAAP////cABgAAApwAAAKjAAkABgAA////\n+gAGAAACowAAAqoABgAGAAAAAP/6AAUAAgKqAAACrwAIAAYAAP////cABQAAAq8AAAK1AAkABgAA\n////9wAFAAACtQAAArsACQAGAAD////3AAUAAAK7AAACwQAJAAYAAP////gABQAAAsEAAALHAAgA\nBgAAAAD/9wAEAAACxwAAAssACQAGAAAAAP/3AAQAAALLAAACzwAJAAYAAAAA//cABAAAAs8AAALT\nAAkABgAAAAD/+AAEAAAC0wAAAtcACAAGAAD////6AAUAAALXAAAC3QAGAAYAAP////cABgAAAt0A\nAALkAAkABgAAAAD/9wAFAAAC5AAAAukACQAGAAAAAP/3AAUAAALpAAAC7gAJAAYAAAAA//cABQAA\nAu4AAALzAAkABgAAAAD/9wAFAAAC8wAAAvgACQAGAAAAAP/4AAUAAAL4AAAC/QAIAAYAAAAA//oA\nBf//Av0AAAMCAAUABgAA////+gAGAAADAgAAAwkABgAGAAD////3AAYAAAMJAAADEAAJAAYAAP//\n//cABgAAAxAAAAMXAAkABgAA////9wAGAAADFwAAAx4ACQAGAAD////4AAYAAAAAAAoABwASAAYA\nAP////cABgAAAAcACgAOABMABgAA////+gAFAAAADgAKABQAEAAGAAD////6AAYAAAAUAAoAGwAQ\nAAYAAAAA//gABgAAABsACgAhABIABgAAAAD/+AAGAAAAIQAKACcAEgAGAAAAAP/4AAYAAAAnAAoA\nLQASAAYAAAAA//gABgAAAC0ACgAzABIABgAAAAD/+QAGAAAAMwAKADkAEQAGAAAAAP/3AAYAAAA5\nAAoAPwATAAYAAP////sABQAAAD8ACgBFAA8ABgAAAAD/+wAFAAIARQAKAEoAEQAGAAAAAP/4AAUA\nAABKAAoATwASAAYAAAAA//gABQAAAE8ACgBUABIABgAAAAD/+AAFAAAAVAAKAFkAEgAGAAAAAP/5\nAAUAAABZAAoAXgARAAYAAAAA//gABgAAAF4ACgBkABIABgAAAAD/+AAGAAAAZAAKAGoAEgAGAAAA\nAP/4AAYAAABqAAoAcAASAAYAAAAA//kABgAAAHAACgB2ABEABgAAAAD/+AAFAAAAdgAKAHsAEgAG\nAAD////4AAYAAAB7AAoAggASAAYAAAAA//gABQAAAIIACgCHABIABgAAAAD/+AAFAAAAhwAKAIwA\nEgAGAAAAAP/4AAUAAACMAAoAkQASAAYAAAAA//gABQAAAJEACgCWABIABgAAAAD/+QAFAAAAlgAK\nAJsAEQAGAAAAAP/6AAX//wCbAAoAoAAPAAYAAAAA//oABQABAKAACgClABEABgAA////+AAGAAAA\npQAKAKwAEgAGAAD////4AAYAAACsAAoAswASAAYAAP////gABgAAALMACgC6ABIABgAA////+QAG\nAAAAugAKAMEAEQAGAAD////4AAYAAgDBAAoAyAAUAAYAAP////kABQACAMgACgDOABMABgAA////\n+QAGAAIAzgAKANUAEw==\n\"\"\"\n )\n ),\n Image.open(\n BytesIO(\n base64.b64decode(\n b\"\"\"\niVBORw0KGgoAAAANSUhEUgAAAx4AAAAUAQAAAAArMtZoAAAEwElEQVR4nABlAJr/AHVE4czCI/4u\nMc4b7vuds/xzjz5/3/7u/n9vMe7vnfH/9++vPn/xyf5zhxzjt8GHw8+2d83u8x27199/nxuQ6Od9\nM43/5z2I+9n9ZtmDBwMQECDRQw/eQIQohJXxpBCNVE6QCCAAAAD//wBlAJr/AgALyj1t/wINwq0g\nLeNZUworuN1cjTPIzrTX6ofHWeo3v336qPzfEwRmBnHTtf95/fglZK5N0PDgfRTslpGBvz7LFc4F\nIUXBWQGjQ5MGCx34EDFPwXiY4YbYxavpnhHFrk14CDAAAAD//wBlAJr/AgKqRooH2gAgPeggvUAA\nBu2WfgPoAwzRAABAAAAAAACQgLz/3Uv4Gv+gX7BJgDeeGP6AAAD1NMDzKHD7ANWr3loYbxsAD791\nNAADfcoIDyP44K/jv4Y63/Z+t98Ovt+ub4T48LAAAAD//wBlAJr/AuplMlADJAAAAGuAphWpqhMx\nin0A/fRvAYBABPgBwBUgABBQ/sYAyv9g0bCHgOLoGAAAAAAAREAAwI7nr0ArYpow7aX8//9LaP/9\nSjdavWA8ePHeBIKB//81/83ndznOaXx379wAAAD//wBlAJr/AqDxW+D3AABAAbUh/QMnbQag/gAY\nAYDAAACgtgD/gOqAAAB5IA/8AAAk+n9w0AAA8AAAmFRJuPo27ciC0cD5oeW4E7KA/wD3ECMAn2tt\ny8PgwH8AfAxFzC0JzeAMtratAsC/ffwAAAD//wBlAJr/BGKAyCAA4AAAAvgeYTAwHd1kmQF5chkG\nABoMIHcL5xVpTfQbUqzlAAAErwAQBgAAEOClA5D9il08AEh/tUzdCBsXkbgACED+woQg8Si9VeqY\nlODCn7lmF6NhnAEYgAAA/NMIAAAAAAD//2JgjLZgVGBg5Pv/Tvpc8hwGBjYGJADjHDrAwPzAjv/H\n/Wf3PzCwtzcwHmBgYGcwbZz8wHaCAQMDOwMDQ8MCBgYOC3W7mp+f0w+wHOYxO3OG+e376hsMZjk3\nAAAAAP//YmCMY2A4wMAIN5e5gQETPD6AZisDAwMDgzSDAAPjByiHcQMDAwMDg1nOze1lByRu5/47\nc4859311AYNZzg0AAAAA//9iYGDBYihOIIMuwIjGL39/fwffA8b//xv/P2BPtzzHwCBjUQAAAAD/\n/yLFBrIBAAAA//9i1HhcwdhizX7u8NZNzyLbvT97bfrMf/QHI8evOwcSqGUJAAAA//9iYBB81iSw\npEE170Qrg5MIYydHqwdDQRMrAwcVrQAAAAD//2J4x7j9AAMDn8Q/BgYLBoaiAwwMjPdvMDBYM1Tv\noJodAAAAAP//Yqo/83+dxePWlxl3npsel9lvLfPcqlE9725C+acfVLMEAAAA//9i+s9gwCoaaGMR\nevta/58PTEWzr21hufPjA8N+qlnBwAAAAAD//2JiWLci5v1+HmFXDqcnULE/MxgYGBj+f6CaJQAA\nAAD//2Ji2FrkY3iYpYC5qDeGgeEMAwPDvwQBBoYvcTwOVLMEAAAA//9isDBgkP///0EOg9z35v//\nGc/eeW7BwPj5+QGZhANUswMAAAD//2JgqGBgYGBgqEMXlvhMPUsAAAAA//8iYDd1AAAAAP//AwDR\nw7IkEbzhVQAAAABJRU5ErkJggg==\n\"\"\"\n )\n )\n ),\n )\n return f\n", "path": "src/PIL/ImageFont.py" } ]
diff --git a/Tests/test_imagefont.py b/Tests/test_imagefont.py index 3388c205579..0ee3b979e83 100644 --- a/Tests/test_imagefont.py +++ b/Tests/test_imagefont.py @@ -420,6 +420,10 @@ def test_load_path_not_found(self): self.assertRaises(IOError, ImageFont.load_path, filename) self.assertRaises(IOError, ImageFont.truetype, filename) + def test_load_non_font_bytes(self): + with open("Tests/images/hopper.jpg", "rb") as f: + self.assertRaises(IOError, ImageFont.truetype, f) + def test_default_font(self): # Arrange txt = 'This is a "better than nothing" default font.' diff --git a/src/PIL/ImageFont.py b/src/PIL/ImageFont.py index 7074a70c01a..f43f95b9ac9 100644 --- a/src/PIL/ImageFont.py +++ b/src/PIL/ImageFont.py @@ -545,6 +545,8 @@ def freetype(font): try: return freetype(font) except IOError: + if not isPath(font): + raise ttf_filename = os.path.basename(font) dirs = [] diff --git a/src/_imagingft.c b/src/_imagingft.c index f6bd787ef5c..28e6d2b5e01 100644 --- a/src/_imagingft.c +++ b/src/_imagingft.c @@ -315,6 +315,7 @@ getfont(PyObject* self_, PyObject* args, PyObject* kw) if (error) { if (self->font_bytes) { PyMem_Free(self->font_bytes); + self->font_bytes = NULL; } Py_DECREF(self); return geterror(error);
vllm-project__vllm-2337
[v0.2.7] Release Tracker **ETA**: Jan 3rd - 4th ## Major changes TBD ## PRs to be merged before the release - [x] #2221 - [ ] ~~#2293~~ (deferred)
[ { "content": "\"\"\"vLLM: a high-throughput and memory-efficient inference engine for LLMs\"\"\"\n\nfrom vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs\nfrom vllm.engine.async_llm_engine import AsyncLLMEngine\nfrom vllm.engine.llm_engine import LLMEngine\nfrom vllm.engine.ray_utils import initialize_cluster\nfrom vllm.entrypoints.llm import LLM\nfrom vllm.outputs import CompletionOutput, RequestOutput\nfrom vllm.sampling_params import SamplingParams\n\n__version__ = \"0.2.6\"\n\n__all__ = [\n \"LLM\",\n \"SamplingParams\",\n \"RequestOutput\",\n \"CompletionOutput\",\n \"LLMEngine\",\n \"EngineArgs\",\n \"AsyncLLMEngine\",\n \"AsyncEngineArgs\",\n \"initialize_cluster\",\n]\n", "path": "vllm/__init__.py" } ]
[ { "content": "\"\"\"vLLM: a high-throughput and memory-efficient inference engine for LLMs\"\"\"\n\nfrom vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs\nfrom vllm.engine.async_llm_engine import AsyncLLMEngine\nfrom vllm.engine.llm_engine import LLMEngine\nfrom vllm.engine.ray_utils import initialize_cluster\nfrom vllm.entrypoints.llm import LLM\nfrom vllm.outputs import CompletionOutput, RequestOutput\nfrom vllm.sampling_params import SamplingParams\n\n__version__ = \"0.2.7\"\n\n__all__ = [\n \"LLM\",\n \"SamplingParams\",\n \"RequestOutput\",\n \"CompletionOutput\",\n \"LLMEngine\",\n \"EngineArgs\",\n \"AsyncLLMEngine\",\n \"AsyncEngineArgs\",\n \"initialize_cluster\",\n]\n", "path": "vllm/__init__.py" } ]
diff --git a/vllm/__init__.py b/vllm/__init__.py index e5cd1c2f333..327dfad0635 100644 --- a/vllm/__init__.py +++ b/vllm/__init__.py @@ -8,7 +8,7 @@ from vllm.outputs import CompletionOutput, RequestOutput from vllm.sampling_params import SamplingParams -__version__ = "0.2.6" +__version__ = "0.2.7" __all__ = [ "LLM",
meltano__meltano-7022
bug: Integration tests failing on main ### Meltano Version N/A ### Python Version NA ### Bug scope Other ### Operating System N/A ### Description Example failures on `main`: - https://github.com/meltano/meltano/actions/runs/3534445738 - https://github.com/meltano/meltano/actions/runs/3534480620 Example success on `release/2.10.0`: - https://github.com/meltano/meltano/actions/runs/3534468951 Affects all integration tests, and may be responsible for failures in the Pytest workflow: - https://github.com/meltano/meltano/actions/runs/3534001638/jobs/5930358463 - https://github.com/meltano/meltano/actions/runs/3534001638/jobs/5930359021 - https://github.com/meltano/meltano/actions/runs/3534001638/jobs/5930359587 ### Code _No response_
[ { "content": "\"\"\"Manage Python virtual environments.\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport hashlib\nimport logging\nimport os\nimport platform\nimport shutil\nimport subprocess\nimport sys\nfrom asyncio.subprocess import Process\nfrom collections import namedtuple\nfrom collections.abc import Iterable\nfrom pathlib import Path\n\nfrom meltano.core.error import AsyncSubprocessError\nfrom meltano.core.project import Project\n\nlogger = logging.getLogger(__name__)\n\nVenvSpecs = namedtuple(\"VenvSpecs\", (\"lib_dir\", \"bin_dir\", \"site_packages_dir\"))\n\nPOSIX = VenvSpecs(\n lib_dir=\"lib\",\n bin_dir=\"bin\",\n site_packages_dir=os.path.join(\n \"lib\",\n f\"python{'.'.join(str(part) for part in sys.version_info[:2])}\",\n \"site-packages\",\n ),\n)\n\nNT = VenvSpecs(\n lib_dir=\"Lib\",\n bin_dir=\"Scripts\",\n site_packages_dir=os.path.join(\"Lib\", \"site-packages\"),\n)\n\nPLATFORM_SPECS = {\"Linux\": POSIX, \"Darwin\": POSIX, \"Windows\": NT}\n\n\ndef venv_platform_specs():\n \"\"\"Get virtual environment sub-path info for the current platform.\n\n Raises:\n Exception: This platform is not supported.\n\n Returns:\n Virtual environment sub-path info for the current platform.\n \"\"\"\n system = platform.system()\n try:\n return PLATFORM_SPECS[system]\n except KeyError as ex:\n raise Exception(f\"Platform {system!r} not supported.\") from ex\n\n\nPIP_PACKAGES = (\"pip\", \"setuptools\", \"wheel\")\n\n\nclass VirtualEnv:\n \"\"\"Info about a single virtual environment.\"\"\"\n\n def __init__(self, root: Path):\n \"\"\"Initialize the `VirtualEnv` instance.\n\n Args:\n root: The root directory of the virtual environment.\n \"\"\"\n self.root = root.resolve()\n self.specs = venv_platform_specs()\n\n def __getattr__(self, key: str):\n \"\"\"Get a specific attribute from this instance.\n\n Used to provide `VenvSpecs` attributes for this specific virtual environment.\n\n Args:\n key: The attribute name. Must be one of the `VenvSpecs` attributes.\n\n Returns:\n The root directory of this virtual environment joined to the requested\n platform-specific path using this platform's `VenvSpecs` instance.\n \"\"\"\n return self.root / getattr(self.specs, key)\n\n def __str__(self):\n \"\"\"_summary_.\n\n Returns:\n _description_.\n \"\"\"\n return str(self.root)\n\n\nasync def exec_async(*args, **kwargs) -> Process:\n \"\"\"Run an executable asyncronously in a subprocess.\n\n Args:\n args: Positional arguments for `asyncio.create_subprocess_exec`.\n kwargs: Keyword arguments for `asyncio.create_subprocess_exec`.\n\n Raises:\n AsyncSubprocessError: The command failed.\n\n Returns:\n The subprocess.\n \"\"\"\n run = await asyncio.create_subprocess_exec(\n *args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n **kwargs,\n )\n await run.wait()\n\n if run.returncode != 0:\n raise AsyncSubprocessError(\"Command failed\", run)\n\n return run\n\n\ndef fingerprint(pip_install_args: Iterable[str]) -> str:\n \"\"\"Generate a hash identifying pip install args.\n\n Arguments are sorted and deduplicated before the hash is generated.\n\n Args:\n pip_install_args: Arguments for `pip install`.\n\n Returns:\n The SHA256 hash hex digest of the sorted set of pip install args.\n \"\"\"\n return hashlib.sha256(\" \".join(sorted(set(pip_install_args))).encode()).hexdigest()\n\n\nclass VenvService: # noqa: WPS214\n \"\"\"Manages virtual environments.\n\n The methods in this class are not threadsafe.\n \"\"\"\n\n def __init__(self, project: Project, namespace: str = \"\", name: str = \"\"):\n \"\"\"Initialize the `VenvService`.\n\n Args:\n project: The Meltano project.\n namespace: The namespace for the venv, e.g. a Plugin type.\n name: The name of the venv, e.g. a Plugin name.\n \"\"\"\n self.project = project\n self.namespace = namespace\n self.name = name\n self.venv = VirtualEnv(self.project.venvs_dir(namespace, name))\n self.plugin_fingerprint_path = self.venv.root / \".meltano_plugin_fingerprint\"\n\n async def install(self, pip_install_args: list[str], clean: bool = False) -> None:\n \"\"\"Configure a virtual environment, then run pip install with the given args.\n\n Args:\n pip_install_args: Arguments passed to `pip install`.\n clean: Whether to not attempt to use an existing virtual environment.\n \"\"\"\n if not clean and self.requires_clean_install(pip_install_args):\n logger.debug(\n f\"Packages for '{self.namespace}/{self.name}' have changed so performing a clean install.\"\n )\n clean = True\n\n self.clean_run_files()\n await self._pip_install(pip_install_args=pip_install_args, clean=clean)\n self.write_fingerprint(pip_install_args)\n\n def requires_clean_install(self, pip_install_args: list[str]) -> bool:\n \"\"\"Determine whether a clean install is needed.\n\n Args:\n pip_install_args: The arguments being passed to `pip install`, used\n for fingerprinting the installation.\n\n Returns:\n Whether virtual environment doesn't exist or can't be reused.\n \"\"\"\n # A generator function is used to perform the checks lazily\n def checks():\n # The Python installation used to create this venv no longer exists\n yield not self.exec_path(\"python\").exists()\n # The deprecated `meltano_venv.pth` feature is used by this venv\n yield self.venv.site_packages_dir.joinpath(\"meltano_venv.pth\").exists()\n # The fingerprint of the venv does not match the pip install args\n existing_fingerprint = self.read_fingerprint()\n yield existing_fingerprint is None\n yield existing_fingerprint != fingerprint(pip_install_args)\n\n return any(checks())\n\n def clean_run_files(self) -> None:\n \"\"\"Destroy cached configuration files, if they exist.\"\"\"\n try:\n shutil.rmtree(self.project.run_dir(self.name, make_dirs=False))\n except FileNotFoundError:\n logger.debug(\"No cached configuration files to remove\")\n\n def clean(self) -> None:\n \"\"\"Destroy the virtual environment, if it exists.\"\"\"\n try:\n shutil.rmtree(self.venv.root)\n logger.debug(\n \"Removed old virtual environment for '%s/%s'\", # noqa: WPS323\n self.namespace,\n self.name,\n )\n except FileNotFoundError:\n # If the VirtualEnv has never been created before do nothing\n logger.debug(\"No old virtual environment to remove\")\n\n async def create(self) -> Process:\n \"\"\"Create a new virtual environment.\n\n Raises:\n AsyncSubprocessError: The virtual environment could not be created.\n\n Returns:\n The Python process creating the virtual environment.\n \"\"\"\n logger.debug(f\"Creating virtual environment for '{self.namespace}/{self.name}'\")\n try:\n return await exec_async(sys.executable, \"-m\", \"venv\", str(self.venv))\n except AsyncSubprocessError as err:\n raise AsyncSubprocessError(\n f\"Could not create the virtualenv for '{self.namespace}/{self.name}'\",\n err.process,\n ) from err\n\n async def upgrade_pip(self) -> Process:\n \"\"\"Upgrade the `pip` package to the latest version in the virtual environment.\n\n Raises:\n AsyncSubprocessError: Failed to upgrade pip to the latest version.\n\n Returns:\n The process running `pip install --upgrade ...`.\n \"\"\"\n logger.debug(f\"Upgrading pip for '{self.namespace}/{self.name}'\")\n try:\n return await self._pip_install([\"--upgrade\", *PIP_PACKAGES])\n except AsyncSubprocessError as err:\n raise AsyncSubprocessError(\n \"Failed to upgrade pip to the latest version.\", err.process\n ) from err\n\n def read_fingerprint(self) -> str | None:\n \"\"\"Get the fingerprint of the existing virtual environment.\n\n Returns:\n The fingerprint of the existing virtual environment if it exists.\n `None` otherwise.\n \"\"\"\n if not self.plugin_fingerprint_path.exists():\n return None\n with open(self.plugin_fingerprint_path) as fingerprint_file:\n return fingerprint_file.read()\n\n def write_fingerprint(self, pip_install_args: list[str]) -> None:\n \"\"\"Save the fingerprint for this installation.\n\n Args:\n pip_install_args: The arguments being passed to `pip install`.\n \"\"\"\n with open(self.plugin_fingerprint_path, \"wt\") as fingerprint_file:\n fingerprint_file.write(fingerprint(pip_install_args))\n\n def exec_path(self, executable: str) -> Path:\n \"\"\"Return the absolute path for the given executable in the virtual environment.\n\n Args:\n executable: The path to the executable relative to the venv bin directory.\n\n Returns:\n The venv bin directory joined to the provided executable.\n \"\"\"\n absolute_executable = self.venv.bin_dir / executable\n if platform.system() != \"Windows\":\n return absolute_executable\n\n # On Windows, try using the '.exe' suffixed version if it exists. Use the\n # regular executable path as a fallback (and for backwards compatibility).\n absolute_executable_windows = absolute_executable.with_suffix(\".exe\")\n return (\n absolute_executable_windows\n if absolute_executable_windows.exists()\n else absolute_executable\n )\n\n async def _pip_install(\n self, pip_install_args: list[str], clean: bool = False\n ) -> Process:\n \"\"\"Install a package using `pip` in the proper virtual environment.\n\n Args:\n pip_install_args: The arguments to pass to `pip install`.\n clean: Whether the installation should be done in a clean venv.\n\n Raises:\n AsyncSubprocessError: The command failed.\n\n Returns:\n The process running `pip install` with the provided args.\n \"\"\"\n if clean:\n self.clean()\n await self.create()\n await self.upgrade_pip()\n\n pip_install_args_str = \" \".join(pip_install_args)\n log_msg_prefix = (\n f\"Upgrading with args {pip_install_args_str!r} in existing\"\n if \"--upgrade\" in pip_install_args\n else f\"Installing with args {pip_install_args_str!r} into\"\n )\n logger.debug(\n f\"{log_msg_prefix} virtual environment for '{self.namespace}/{self.name}'\"\n )\n\n try:\n return await exec_async(\n str(self.exec_path(\"python\")), \"-m\", \"pip\", \"install\", *pip_install_args\n )\n except AsyncSubprocessError as err:\n raise AsyncSubprocessError(\n f\"Failed to install plugin '{self.name}'.\", err.process\n ) from err\n", "path": "src/meltano/core/venv_service.py" } ]
[ { "content": "\"\"\"Manage Python virtual environments.\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport hashlib\nimport logging\nimport os\nimport platform\nimport shutil\nimport subprocess\nimport sys\nfrom asyncio.subprocess import Process\nfrom collections import namedtuple\nfrom collections.abc import Iterable\nfrom pathlib import Path\n\nfrom meltano.core.error import AsyncSubprocessError\nfrom meltano.core.project import Project\n\nlogger = logging.getLogger(__name__)\n\nVenvSpecs = namedtuple(\"VenvSpecs\", (\"lib_dir\", \"bin_dir\", \"site_packages_dir\"))\n\nPOSIX = VenvSpecs(\n lib_dir=\"lib\",\n bin_dir=\"bin\",\n site_packages_dir=os.path.join(\n \"lib\",\n f\"python{'.'.join(str(part) for part in sys.version_info[:2])}\",\n \"site-packages\",\n ),\n)\n\nNT = VenvSpecs(\n lib_dir=\"Lib\",\n bin_dir=\"Scripts\",\n site_packages_dir=os.path.join(\"Lib\", \"site-packages\"),\n)\n\nPLATFORM_SPECS = {\"Linux\": POSIX, \"Darwin\": POSIX, \"Windows\": NT}\n\n\ndef venv_platform_specs():\n \"\"\"Get virtual environment sub-path info for the current platform.\n\n Raises:\n Exception: This platform is not supported.\n\n Returns:\n Virtual environment sub-path info for the current platform.\n \"\"\"\n system = platform.system()\n try:\n return PLATFORM_SPECS[system]\n except KeyError as ex:\n raise Exception(f\"Platform {system!r} not supported.\") from ex\n\n\nPIP_PACKAGES = (\"pip\", \"setuptools==57.5.0\", \"wheel\")\n\n\nclass VirtualEnv:\n \"\"\"Info about a single virtual environment.\"\"\"\n\n def __init__(self, root: Path):\n \"\"\"Initialize the `VirtualEnv` instance.\n\n Args:\n root: The root directory of the virtual environment.\n \"\"\"\n self.root = root.resolve()\n self.specs = venv_platform_specs()\n\n def __getattr__(self, key: str):\n \"\"\"Get a specific attribute from this instance.\n\n Used to provide `VenvSpecs` attributes for this specific virtual environment.\n\n Args:\n key: The attribute name. Must be one of the `VenvSpecs` attributes.\n\n Returns:\n The root directory of this virtual environment joined to the requested\n platform-specific path using this platform's `VenvSpecs` instance.\n \"\"\"\n return self.root / getattr(self.specs, key)\n\n def __str__(self):\n \"\"\"_summary_.\n\n Returns:\n _description_.\n \"\"\"\n return str(self.root)\n\n\nasync def exec_async(*args, **kwargs) -> Process:\n \"\"\"Run an executable asyncronously in a subprocess.\n\n Args:\n args: Positional arguments for `asyncio.create_subprocess_exec`.\n kwargs: Keyword arguments for `asyncio.create_subprocess_exec`.\n\n Raises:\n AsyncSubprocessError: The command failed.\n\n Returns:\n The subprocess.\n \"\"\"\n run = await asyncio.create_subprocess_exec(\n *args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n **kwargs,\n )\n await run.wait()\n\n if run.returncode != 0:\n raise AsyncSubprocessError(\"Command failed\", run)\n\n return run\n\n\ndef fingerprint(pip_install_args: Iterable[str]) -> str:\n \"\"\"Generate a hash identifying pip install args.\n\n Arguments are sorted and deduplicated before the hash is generated.\n\n Args:\n pip_install_args: Arguments for `pip install`.\n\n Returns:\n The SHA256 hash hex digest of the sorted set of pip install args.\n \"\"\"\n return hashlib.sha256(\" \".join(sorted(set(pip_install_args))).encode()).hexdigest()\n\n\nclass VenvService: # noqa: WPS214\n \"\"\"Manages virtual environments.\n\n The methods in this class are not threadsafe.\n \"\"\"\n\n def __init__(self, project: Project, namespace: str = \"\", name: str = \"\"):\n \"\"\"Initialize the `VenvService`.\n\n Args:\n project: The Meltano project.\n namespace: The namespace for the venv, e.g. a Plugin type.\n name: The name of the venv, e.g. a Plugin name.\n \"\"\"\n self.project = project\n self.namespace = namespace\n self.name = name\n self.venv = VirtualEnv(self.project.venvs_dir(namespace, name))\n self.plugin_fingerprint_path = self.venv.root / \".meltano_plugin_fingerprint\"\n\n async def install(self, pip_install_args: list[str], clean: bool = False) -> None:\n \"\"\"Configure a virtual environment, then run pip install with the given args.\n\n Args:\n pip_install_args: Arguments passed to `pip install`.\n clean: Whether to not attempt to use an existing virtual environment.\n \"\"\"\n if not clean and self.requires_clean_install(pip_install_args):\n logger.debug(\n f\"Packages for '{self.namespace}/{self.name}' have changed so performing a clean install.\"\n )\n clean = True\n\n self.clean_run_files()\n await self._pip_install(pip_install_args=pip_install_args, clean=clean)\n self.write_fingerprint(pip_install_args)\n\n def requires_clean_install(self, pip_install_args: list[str]) -> bool:\n \"\"\"Determine whether a clean install is needed.\n\n Args:\n pip_install_args: The arguments being passed to `pip install`, used\n for fingerprinting the installation.\n\n Returns:\n Whether virtual environment doesn't exist or can't be reused.\n \"\"\"\n # A generator function is used to perform the checks lazily\n def checks():\n # The Python installation used to create this venv no longer exists\n yield not self.exec_path(\"python\").exists()\n # The deprecated `meltano_venv.pth` feature is used by this venv\n yield self.venv.site_packages_dir.joinpath(\"meltano_venv.pth\").exists()\n # The fingerprint of the venv does not match the pip install args\n existing_fingerprint = self.read_fingerprint()\n yield existing_fingerprint is None\n yield existing_fingerprint != fingerprint(pip_install_args)\n\n return any(checks())\n\n def clean_run_files(self) -> None:\n \"\"\"Destroy cached configuration files, if they exist.\"\"\"\n try:\n shutil.rmtree(self.project.run_dir(self.name, make_dirs=False))\n except FileNotFoundError:\n logger.debug(\"No cached configuration files to remove\")\n\n def clean(self) -> None:\n \"\"\"Destroy the virtual environment, if it exists.\"\"\"\n try:\n shutil.rmtree(self.venv.root)\n logger.debug(\n \"Removed old virtual environment for '%s/%s'\", # noqa: WPS323\n self.namespace,\n self.name,\n )\n except FileNotFoundError:\n # If the VirtualEnv has never been created before do nothing\n logger.debug(\"No old virtual environment to remove\")\n\n async def create(self) -> Process:\n \"\"\"Create a new virtual environment.\n\n Raises:\n AsyncSubprocessError: The virtual environment could not be created.\n\n Returns:\n The Python process creating the virtual environment.\n \"\"\"\n logger.debug(f\"Creating virtual environment for '{self.namespace}/{self.name}'\")\n try:\n return await exec_async(sys.executable, \"-m\", \"venv\", str(self.venv))\n except AsyncSubprocessError as err:\n raise AsyncSubprocessError(\n f\"Could not create the virtualenv for '{self.namespace}/{self.name}'\",\n err.process,\n ) from err\n\n async def upgrade_pip(self) -> Process:\n \"\"\"Upgrade the `pip` package to the latest version in the virtual environment.\n\n Raises:\n AsyncSubprocessError: Failed to upgrade pip to the latest version.\n\n Returns:\n The process running `pip install --upgrade ...`.\n \"\"\"\n logger.debug(f\"Upgrading pip for '{self.namespace}/{self.name}'\")\n try:\n return await self._pip_install([\"--upgrade\", *PIP_PACKAGES])\n except AsyncSubprocessError as err:\n raise AsyncSubprocessError(\n \"Failed to upgrade pip to the latest version.\", err.process\n ) from err\n\n def read_fingerprint(self) -> str | None:\n \"\"\"Get the fingerprint of the existing virtual environment.\n\n Returns:\n The fingerprint of the existing virtual environment if it exists.\n `None` otherwise.\n \"\"\"\n if not self.plugin_fingerprint_path.exists():\n return None\n with open(self.plugin_fingerprint_path) as fingerprint_file:\n return fingerprint_file.read()\n\n def write_fingerprint(self, pip_install_args: list[str]) -> None:\n \"\"\"Save the fingerprint for this installation.\n\n Args:\n pip_install_args: The arguments being passed to `pip install`.\n \"\"\"\n with open(self.plugin_fingerprint_path, \"wt\") as fingerprint_file:\n fingerprint_file.write(fingerprint(pip_install_args))\n\n def exec_path(self, executable: str) -> Path:\n \"\"\"Return the absolute path for the given executable in the virtual environment.\n\n Args:\n executable: The path to the executable relative to the venv bin directory.\n\n Returns:\n The venv bin directory joined to the provided executable.\n \"\"\"\n absolute_executable = self.venv.bin_dir / executable\n if platform.system() != \"Windows\":\n return absolute_executable\n\n # On Windows, try using the '.exe' suffixed version if it exists. Use the\n # regular executable path as a fallback (and for backwards compatibility).\n absolute_executable_windows = absolute_executable.with_suffix(\".exe\")\n return (\n absolute_executable_windows\n if absolute_executable_windows.exists()\n else absolute_executable\n )\n\n async def _pip_install(\n self, pip_install_args: list[str], clean: bool = False\n ) -> Process:\n \"\"\"Install a package using `pip` in the proper virtual environment.\n\n Args:\n pip_install_args: The arguments to pass to `pip install`.\n clean: Whether the installation should be done in a clean venv.\n\n Raises:\n AsyncSubprocessError: The command failed.\n\n Returns:\n The process running `pip install` with the provided args.\n \"\"\"\n if clean:\n self.clean()\n await self.create()\n await self.upgrade_pip()\n\n pip_install_args_str = \" \".join(pip_install_args)\n log_msg_prefix = (\n f\"Upgrading with args {pip_install_args_str!r} in existing\"\n if \"--upgrade\" in pip_install_args\n else f\"Installing with args {pip_install_args_str!r} into\"\n )\n logger.debug(\n f\"{log_msg_prefix} virtual environment for '{self.namespace}/{self.name}'\"\n )\n\n try:\n return await exec_async(\n str(self.exec_path(\"python\")), \"-m\", \"pip\", \"install\", *pip_install_args\n )\n except AsyncSubprocessError as err:\n raise AsyncSubprocessError(\n f\"Failed to install plugin '{self.name}'.\", err.process\n ) from err\n", "path": "src/meltano/core/venv_service.py" } ]
diff --git a/src/meltano/core/venv_service.py b/src/meltano/core/venv_service.py index cec36f627d..956bdc1315 100644 --- a/src/meltano/core/venv_service.py +++ b/src/meltano/core/venv_service.py @@ -57,7 +57,7 @@ def venv_platform_specs(): raise Exception(f"Platform {system!r} not supported.") from ex -PIP_PACKAGES = ("pip", "setuptools", "wheel") +PIP_PACKAGES = ("pip", "setuptools==57.5.0", "wheel") class VirtualEnv:
ivy-llc__ivy-13924
atan2 Implementing atan2 functionality for TensorFlow frontend. Solves https://github.com/unifyai/ivy/issues/1545
[ { "content": "# global\nimport ivy\nfrom ivy import with_supported_dtypes, with_unsupported_dtypes\nfrom ivy.functional.frontends.tensorflow import check_tensorflow_casting\nfrom ivy.functional.frontends.tensorflow.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_tf_dtype,\n to_ivy_dtype,\n)\n\n\n@to_ivy_arrays_and_back\ndef accumulate_n(inputs, input_type=None, shape=None, dtype=None, name=None):\n return ivy.astype(ivy.sum(ivy.array(inputs)), ivy.int64)\n\n\n@to_ivy_arrays_and_back\ndef add(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.add(x, y)\n\n\n@to_ivy_arrays_and_back\ndef exp(x, name=None):\n return ivy.exp(x)\n\n\n@to_ivy_arrays_and_back\ndef sqrt(x, name=None):\n return ivy.sqrt(x)\n\n\n@to_ivy_arrays_and_back\ndef negative(x, name=None):\n return ivy.negative(x)\n\n\n@to_ivy_arrays_and_back\ndef argmax(input, axis, output_type=None, name=None):\n output_type = to_ivy_dtype(output_type)\n if output_type in [\"uint16\", \"int16\", \"int32\", \"int64\"]:\n return ivy.astype(ivy.argmax(input, axis=axis), output_type)\n else:\n return ivy.astype(ivy.argmax(input, axis=axis), \"int64\")\n\n\n@to_ivy_arrays_and_back\ndef asinh(x, name=\"asinh\"):\n return ivy.asinh(x)\n\n\n@handle_tf_dtype\n@to_ivy_arrays_and_back\ndef confusion_matrix(\n labels, predictions, num_classes=None, weights=None, dtype=ivy.int32, name=None\n):\n labels = ivy.astype(\n ivy.squeeze(ivy.array(labels), axis=None), ivy.int64, copy=False\n )\n predictions = ivy.astype(\n ivy.squeeze(ivy.array(predictions), axis=None), ivy.int64, copy=False\n )\n # failsafe for (1,) array will be squeeze to 0-dim\n labels = ivy.expand_dims(labels, axis=-1) if labels.ndim == 0 else labels\n predictions = (\n ivy.expand_dims(predictions, axis=-1) if predictions.ndim == 0 else predictions\n )\n\n # Sanity check (potential optimization)\n ivy.utils.assertions.check_greater(\n labels, 0, allow_equal=True, message=\"labels contains negative values\"\n )\n ivy.utils.assertions.check_greater(\n predictions, 0, allow_equal=True, message=\"predictions contains negative values\"\n )\n\n if num_classes is None:\n num_classes = max(ivy.max(labels), ivy.max(predictions)) + 1\n else:\n num_classes_int64 = ivy.astype(ivy.array(num_classes), ivy.int64, copy=False)\n ivy.utils.assertions.check_less(\n labels, num_classes_int64, message=\"labels out of bound\"\n )\n ivy.utils.assertions.check_less(\n predictions, num_classes_int64, message=\"predictions out of bound\"\n )\n\n if weights is not None:\n weights = ivy.array(weights)\n ivy.utils.assertions.check_equal(\n ivy.shape(predictions),\n ivy.shape(weights),\n message=\"weights shape do not match predictions\",\n )\n weights = ivy.astype(weights, dtype, copy=False)\n\n shape = ivy.stack([num_classes, num_classes])\n indices = ivy.stack([labels, predictions], axis=1)\n values = ivy.ones_like(predictions, dtype=dtype) if weights is None else weights\n return ivy.scatter_nd(indices, values, shape=shape)\n\n\n@handle_tf_dtype\n@to_ivy_arrays_and_back\ndef count_nonzero(input, axis=None, keepdims=None, dtype=ivy.int64, name=None):\n x = ivy.array(input)\n if keepdims is None:\n keepdims = False\n zero = ivy.zeros(ivy.shape(x), dtype=x.dtype)\n return ivy.astype(\n ivy.sum(\n ivy.astype(ivy.not_equal(x, zero), ivy.int64),\n axis=axis,\n keepdims=keepdims,\n ),\n dtype,\n copy=False,\n )\n\n\ndef cumprod(x, axis, exclusive=False, reverse=False, name=None):\n return ivy.astype(\n ivy.cumprod(x, axis=axis, exclusive=exclusive, reverse=reverse), x.dtype\n )\n\n\ndef cumsum(x, axis, exclusive=False, reverse=False, name=None):\n return ivy.astype(\n ivy.cumsum(x, axis=axis, exclusive=exclusive, reverse=reverse), x.dtype\n )\n\n\n@to_ivy_arrays_and_back\ndef divide(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.divide(x, y)\n\n\n@to_ivy_arrays_and_back\ndef divide_no_nan(x, y, name=\"divide_no_nan\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.where(\n y == 0,\n ivy.array(0.0, dtype=ivy.promote_types(x.dtype, y.dtype)),\n x / y,\n )\n\n\n@to_ivy_arrays_and_back\ndef maximum(x, y, name=None):\n return ivy.maximum(x, y)\n\n\n@to_ivy_arrays_and_back\ndef erfcinv(x, name=\"erfcinv\"):\n return 1 / (1 - ivy.erf(x))\n\n\n@to_ivy_arrays_and_back\ndef is_non_decreasing(x, name=\"is_non_decreasing\"):\n if ivy.array(x).size < 2:\n return ivy.array(True)\n if ivy.array(x).size == 2:\n return ivy.array([x[0] <= x[1]])\n return ivy.all(ivy.less_equal(x, ivy.roll(x, -1)))\n\n\n@to_ivy_arrays_and_back\ndef is_strictly_increasing(x, name=\"is_strictly_increasing\"):\n if ivy.array(x).size < 2:\n return ivy.array(True)\n if ivy.array(x).size == 2:\n return ivy.array(x[0] < x[1])\n return ivy.all(ivy.less(x, ivy.roll(x, -1)))\n\n\n@to_ivy_arrays_and_back\ndef log_sigmoid(x, name=None):\n return -ivy.softplus(-x)\n\n\n@to_ivy_arrays_and_back\ndef logical_and(x, y, name=\"LogicalAnd\"):\n return ivy.logical_and(x, y)\n\n\n@to_ivy_arrays_and_back\ndef logical_xor(x, y, name=\"LogicalXor\"):\n return ivy.logical_xor(x, y)\n\n\n@to_ivy_arrays_and_back\ndef logical_or(x, y, name=\"logical_or\"):\n return ivy.logical_or(x, y)\n\n\n@to_ivy_arrays_and_back\ndef multiply(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.multiply(x, y)\n\n\n@to_ivy_arrays_and_back\ndef multiply_no_nan(x, y, name=\"multiply_no_nan\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.where(\n y == 0,\n ivy.array(0.0, dtype=ivy.promote_types(x.dtype, y.dtype)),\n x * y,\n )\n\n\n@to_ivy_arrays_and_back\ndef polyval(coeffs, x, name=None):\n ivy.utils.assertions.check_isinstance(coeffs, list)\n x = ivy.array(x)\n if len(coeffs) < 1:\n return ivy.zeros_like(x, dtype=x.dtype)\n coeffs = [ivy.array(_) for _ in coeffs]\n p = coeffs[0]\n for c in coeffs[1:]:\n p = c + p * x\n return p\n\n\n@to_ivy_arrays_and_back\ndef pow(x, y, name=\"pow\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.pow(x, y)\n\n\n@to_ivy_arrays_and_back\ndef reciprocal_no_nan(x, name=\"reciprocal_no_nan\"):\n return ivy.where(\n x == 0,\n ivy.array(0.0, dtype=x.dtype),\n ivy.ones_like(x, dtype=x.dtype) / x,\n )\n\n\n@to_ivy_arrays_and_back\ndef reduce_all(input_tensor, axis=None, keepdims=False, name=\"reduce_all\"):\n return ivy.all(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_any(input_tensor, axis=None, keepdims=False, name=\"reduce_any\"):\n return ivy.any(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_euclidean_norm(\n input_tensor, axis=None, keepdims=False, name=\"reduce_euclidean_norm\"\n):\n return ivy.vector_norm(\n input_tensor, axis=axis, keepdims=keepdims, ord=2\n ) # ord = '2' is the euclidean norm\n\n\n@to_ivy_arrays_and_back\ndef reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=\"reduce_logsumexp\"):\n # stable logsumexp trick\n max_input_tensor = ivy.max(input_tensor, axis=axis, keepdims=True)\n return (\n ivy.log(\n ivy.sum(\n ivy.exp(input_tensor - max_input_tensor),\n axis=axis,\n keepdims=keepdims,\n )\n )\n + max_input_tensor\n ).astype(input_tensor.dtype)\n\n\n@to_ivy_arrays_and_back\ndef reduce_max(input_tensor, axis=None, keepdims=False, name=\"reduce_max\"):\n return ivy.max(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_mean(input_tensor, axis=None, keepdims=False, name=\"reduce_mean\"):\n if ivy.exists(axis):\n axis = ivy.to_list(axis)\n return ivy.mean(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_min(input_tensor, axis=None, keepdims=False, name=\"reduce_min\"):\n return ivy.min(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_prod(input_tensor, axis=None, keepdims=False, name=\"reduce_prod\"):\n return ivy.prod(input_tensor, axis=axis, keepdims=keepdims).astype(\n input_tensor.dtype\n )\n\n\n@to_ivy_arrays_and_back\ndef reduce_std(input_tensor, axis=None, keepdims=False, name=\"reduce_std\"):\n return ivy.std(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_sum(input_tensor, axis=None, keepdims=False, name=\"reduce_sum\"):\n return ivy.sum(input_tensor, axis=axis, keepdims=keepdims).astype(\n input_tensor.dtype\n )\n\n\n@to_ivy_arrays_and_back\ndef reduce_variance(input_tensor, axis=None, keepdims=False, name=\"reduce_variance\"):\n return ivy.var(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef scalar_mul(scalar, x, name=\"scalar_mul\"):\n scalar, x = check_tensorflow_casting(scalar, x)\n return ivy.multiply(x, scalar).astype(x.dtype)\n\n\n@to_ivy_arrays_and_back\ndef subtract(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.subtract(x, y)\n\n\n@to_ivy_arrays_and_back\ndef squared_difference(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.square(ivy.subtract(x, y))\n\n\n@to_ivy_arrays_and_back\ndef tan(x, name=None):\n return ivy.tan(x)\n\n\n@to_ivy_arrays_and_back\ndef unsorted_segment_mean(\n data, segment_ids, num_segments, name=\"unsorted_segment_mean\"\n):\n ivy.utils.assertions.check_equal(list(segment_ids.shape), [list(data.shape)[0]])\n x = ivy.zeros(tuple([num_segments] + (list(data.shape))[1:]))\n count = ivy.zeros((num_segments,))\n for i in range((segment_ids).shape[0]):\n x[segment_ids[i]] = x[segment_ids[i]] + data[i]\n count[segment_ids[i]] += 1\n for j in range(num_segments):\n x[j] = ivy.divide(x[j], count[j])\n return x\n\n\n@to_ivy_arrays_and_back\ndef unsorted_segment_sqrt_n(\n data, segment_ids, num_segments, name=\"unsorted_segement_sqrt_n\"\n):\n ivy.utils.assertions.check_equal(list(segment_ids.shape), [list(data.shape)[0]])\n x = ivy.zeros(tuple([num_segments] + (list(data.shape))[1:]))\n count = ivy.zeros((num_segments,))\n for i in range((segment_ids).shape[0]):\n x[segment_ids[i]] = x[segment_ids[i]] + data[i]\n count[segment_ids[i]] += 1\n for j in range(num_segments):\n x[j] = ivy.divide(x[j], ivy.sqrt(count[j]))\n return x\n\n\n@to_ivy_arrays_and_back\ndef zero_fraction(value, name=\"zero_fraction\"):\n zero = ivy.zeros(tuple(list(value.shape)), dtype=ivy.float32)\n x = ivy.array(value, dtype=ivy.float32)\n count_zero = ivy.sum(ivy.equal(x, zero))\n count_nonzero = ivy.sum(ivy.not_equal(x, zero))\n return ivy.divide(count_zero, ivy.add(count_zero, count_nonzero))\n\n\n@to_ivy_arrays_and_back\ndef argmin(input, axis=None, output_type=\"int64\", name=None):\n output_type = to_ivy_dtype(output_type)\n if output_type in [\"int32\", \"int64\"]:\n return ivy.astype(ivy.argmin(input, axis=axis), output_type)\n else:\n return ivy.astype(ivy.argmin(input, axis=axis), \"int64\")\n\n\n@to_ivy_arrays_and_back\ndef truediv(x, y, name=\"truediv\"):\n x, y = check_tensorflow_casting(x, y)\n x_dtype = ivy.dtype(x)\n\n if ivy.current_backend_str() == \"torch\":\n if x_dtype in [ivy.int8, ivy.int16]:\n return ivy.divide(ivy.astype(x, ivy.float32), ivy.astype(y, ivy.float32))\n elif x_dtype in [ivy.int32, ivy.int64]:\n return ivy.divide(ivy.astype(x, ivy.float64), ivy.astype(y, ivy.float64))\n else:\n if x_dtype in [ivy.int8, ivy.uint8, ivy.int16, ivy.uint16]:\n return ivy.divide(ivy.astype(x, ivy.float32), ivy.astype(y, ivy.float32))\n elif x_dtype in [ivy.int32, ivy.uint32, ivy.int64, ivy.uint64]:\n return ivy.divide(ivy.astype(x, ivy.float64), ivy.astype(y, ivy.float64))\n return ivy.divide(x, y)\n\n\n@to_ivy_arrays_and_back\ndef equal(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef not_equal(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.not_equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef floor(x, name=None):\n return ivy.floor(x)\n\n\n@to_ivy_arrays_and_back\ndef ceil(x, name=None):\n return ivy.ceil(x)\n\n\n@to_ivy_arrays_and_back\ndef minimum(x, y, name=None):\n return ivy.minimum(x, y)\n\n\n@to_ivy_arrays_and_back\ndef sigmoid(x, name=None):\n return ivy.sigmoid(x)\n\n\n@with_supported_dtypes(\n {\"2.9.0 and below\": (\"float16\", \"float32\", \"float64\", \"complex64\", \"complex128\")},\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@to_ivy_arrays_and_back\ndef rsqrt(x, name=None):\n return ivy.reciprocal(ivy.sqrt(x))\n\n\n@to_ivy_arrays_and_back\ndef nextafter(x1, x2, name=None):\n return ivy.nextafter(x1, x2)\n\n\n@with_unsupported_dtypes(\n {\n \"1.2.0\": (\"float16\", \"complex64\", \"complex128\"),\n \"1.8.0 and below\": (\"float16\"),\n \"2.9.0 and below\": (\"int8\", \"int16\", \"uint8\", \"uint16\", \"uint32\", \"uint64\"),\n },\n \"tensorflow\",\n)\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@to_ivy_arrays_and_back\ndef log_softmax(logits, axis=None):\n return ivy.log_softmax(logits, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef asin(x, name=None):\n return ivy.asin(x)\n\n\n@to_ivy_arrays_and_back\ndef acos(x, name=\"acos\"):\n return ivy.acos(x)\n\n\n@to_ivy_arrays_and_back\ndef acosh(x, name=\"acosh\"):\n return ivy.acosh(x)\n\n\n@to_ivy_arrays_and_back\ndef square(x, name=None):\n return ivy.square(x)\n\n\n@to_ivy_arrays_and_back\ndef is_nan(x, name=None):\n return ivy.isnan(x)\n\n\n@with_supported_dtypes(\n {\n \"2.11.0 and below\": (\"bfloat16\", \"half\", \"float32\", \"float64\"),\n },\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef is_finite(x, name=None):\n return ivy.isfinite(x)\n\n\n@to_ivy_arrays_and_back\ndef atan(x, name=None):\n return ivy.atan(x)\n\n\n@to_ivy_arrays_and_back\ndef log(x, name=None):\n return ivy.log(x)\n\n\n@to_ivy_arrays_and_back\ndef add_n(inputs, name=None):\n return ivy.sum(inputs, dtype=inputs.dtype, axis=0)\n\n\n@to_ivy_arrays_and_back\ndef floormod(x, y, name=None):\n return ivy.remainder(x, y)\n\n\n@to_ivy_arrays_and_back\ndef less_equal(x, y, name=\"LessEqual\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.less_equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef greater(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.greater(x, y)\n\n\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@to_ivy_arrays_and_back\ndef sinh(x, name=None):\n return ivy.sinh(x)\n\n\n@to_ivy_arrays_and_back\ndef softmax(logits, axis=-1):\n return ivy.softmax(logits, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef xlogy(x, y, name=None):\n return ivy.xlogy(x, y)\n\n\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n", "path": "ivy/functional/frontends/tensorflow/math.py" } ]
[ { "content": "# global\nimport ivy\nfrom ivy import with_supported_dtypes, with_unsupported_dtypes\nfrom ivy.functional.frontends.tensorflow import check_tensorflow_casting\nfrom ivy.functional.frontends.tensorflow.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_tf_dtype,\n to_ivy_dtype,\n)\n\n\n@to_ivy_arrays_and_back\ndef accumulate_n(inputs, input_type=None, shape=None, dtype=None, name=None):\n return ivy.astype(ivy.sum(ivy.array(inputs)), ivy.int64)\n\n\n@to_ivy_arrays_and_back\ndef add(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.add(x, y)\n\n\n@to_ivy_arrays_and_back\ndef exp(x, name=None):\n return ivy.exp(x)\n\n\n@to_ivy_arrays_and_back\ndef sqrt(x, name=None):\n return ivy.sqrt(x)\n\n\n@to_ivy_arrays_and_back\ndef negative(x, name=None):\n return ivy.negative(x)\n\n\n@to_ivy_arrays_and_back\ndef argmax(input, axis, output_type=None, name=None):\n output_type = to_ivy_dtype(output_type)\n if output_type in [\"uint16\", \"int16\", \"int32\", \"int64\"]:\n return ivy.astype(ivy.argmax(input, axis=axis), output_type)\n else:\n return ivy.astype(ivy.argmax(input, axis=axis), \"int64\")\n\n\n@to_ivy_arrays_and_back\ndef asinh(x, name=\"asinh\"):\n return ivy.asinh(x)\n\n\n@handle_tf_dtype\n@to_ivy_arrays_and_back\ndef confusion_matrix(\n labels, predictions, num_classes=None, weights=None, dtype=ivy.int32, name=None\n):\n labels = ivy.astype(\n ivy.squeeze(ivy.array(labels), axis=None), ivy.int64, copy=False\n )\n predictions = ivy.astype(\n ivy.squeeze(ivy.array(predictions), axis=None), ivy.int64, copy=False\n )\n # failsafe for (1,) array will be squeeze to 0-dim\n labels = ivy.expand_dims(labels, axis=-1) if labels.ndim == 0 else labels\n predictions = (\n ivy.expand_dims(predictions, axis=-1) if predictions.ndim == 0 else predictions\n )\n\n # Sanity check (potential optimization)\n ivy.utils.assertions.check_greater(\n labels, 0, allow_equal=True, message=\"labels contains negative values\"\n )\n ivy.utils.assertions.check_greater(\n predictions, 0, allow_equal=True, message=\"predictions contains negative values\"\n )\n\n if num_classes is None:\n num_classes = max(ivy.max(labels), ivy.max(predictions)) + 1\n else:\n num_classes_int64 = ivy.astype(ivy.array(num_classes), ivy.int64, copy=False)\n ivy.utils.assertions.check_less(\n labels, num_classes_int64, message=\"labels out of bound\"\n )\n ivy.utils.assertions.check_less(\n predictions, num_classes_int64, message=\"predictions out of bound\"\n )\n\n if weights is not None:\n weights = ivy.array(weights)\n ivy.utils.assertions.check_equal(\n ivy.shape(predictions),\n ivy.shape(weights),\n message=\"weights shape do not match predictions\",\n )\n weights = ivy.astype(weights, dtype, copy=False)\n\n shape = ivy.stack([num_classes, num_classes])\n indices = ivy.stack([labels, predictions], axis=1)\n values = ivy.ones_like(predictions, dtype=dtype) if weights is None else weights\n return ivy.scatter_nd(indices, values, shape=shape)\n\n\n@handle_tf_dtype\n@to_ivy_arrays_and_back\ndef count_nonzero(input, axis=None, keepdims=None, dtype=ivy.int64, name=None):\n x = ivy.array(input)\n if keepdims is None:\n keepdims = False\n zero = ivy.zeros(ivy.shape(x), dtype=x.dtype)\n return ivy.astype(\n ivy.sum(\n ivy.astype(ivy.not_equal(x, zero), ivy.int64),\n axis=axis,\n keepdims=keepdims,\n ),\n dtype,\n copy=False,\n )\n\n\ndef cumprod(x, axis, exclusive=False, reverse=False, name=None):\n return ivy.astype(\n ivy.cumprod(x, axis=axis, exclusive=exclusive, reverse=reverse), x.dtype\n )\n\n\ndef cumsum(x, axis, exclusive=False, reverse=False, name=None):\n return ivy.astype(\n ivy.cumsum(x, axis=axis, exclusive=exclusive, reverse=reverse), x.dtype\n )\n\n\n@to_ivy_arrays_and_back\ndef divide(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.divide(x, y)\n\n\n@to_ivy_arrays_and_back\ndef divide_no_nan(x, y, name=\"divide_no_nan\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.where(\n y == 0,\n ivy.array(0.0, dtype=ivy.promote_types(x.dtype, y.dtype)),\n x / y,\n )\n\n\n@to_ivy_arrays_and_back\ndef maximum(x, y, name=None):\n return ivy.maximum(x, y)\n\n\n@to_ivy_arrays_and_back\ndef erfcinv(x, name=\"erfcinv\"):\n return 1 / (1 - ivy.erf(x))\n\n\n@to_ivy_arrays_and_back\ndef is_non_decreasing(x, name=\"is_non_decreasing\"):\n if ivy.array(x).size < 2:\n return ivy.array(True)\n if ivy.array(x).size == 2:\n return ivy.array([x[0] <= x[1]])\n return ivy.all(ivy.less_equal(x, ivy.roll(x, -1)))\n\n\n@to_ivy_arrays_and_back\ndef is_strictly_increasing(x, name=\"is_strictly_increasing\"):\n if ivy.array(x).size < 2:\n return ivy.array(True)\n if ivy.array(x).size == 2:\n return ivy.array(x[0] < x[1])\n return ivy.all(ivy.less(x, ivy.roll(x, -1)))\n\n\n@to_ivy_arrays_and_back\ndef log_sigmoid(x, name=None):\n return -ivy.softplus(-x)\n\n\n@to_ivy_arrays_and_back\ndef logical_and(x, y, name=\"LogicalAnd\"):\n return ivy.logical_and(x, y)\n\n\n@to_ivy_arrays_and_back\ndef logical_xor(x, y, name=\"LogicalXor\"):\n return ivy.logical_xor(x, y)\n\n\n@to_ivy_arrays_and_back\ndef logical_or(x, y, name=\"logical_or\"):\n return ivy.logical_or(x, y)\n\n\n@to_ivy_arrays_and_back\ndef multiply(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.multiply(x, y)\n\n\n@to_ivy_arrays_and_back\ndef multiply_no_nan(x, y, name=\"multiply_no_nan\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.where(\n y == 0,\n ivy.array(0.0, dtype=ivy.promote_types(x.dtype, y.dtype)),\n x * y,\n )\n\n\n@to_ivy_arrays_and_back\ndef polyval(coeffs, x, name=None):\n ivy.utils.assertions.check_isinstance(coeffs, list)\n x = ivy.array(x)\n if len(coeffs) < 1:\n return ivy.zeros_like(x, dtype=x.dtype)\n coeffs = [ivy.array(_) for _ in coeffs]\n p = coeffs[0]\n for c in coeffs[1:]:\n p = c + p * x\n return p\n\n\n@to_ivy_arrays_and_back\ndef pow(x, y, name=\"pow\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.pow(x, y)\n\n\n@to_ivy_arrays_and_back\ndef reciprocal_no_nan(x, name=\"reciprocal_no_nan\"):\n return ivy.where(\n x == 0,\n ivy.array(0.0, dtype=x.dtype),\n ivy.ones_like(x, dtype=x.dtype) / x,\n )\n\n\n@to_ivy_arrays_and_back\ndef reduce_all(input_tensor, axis=None, keepdims=False, name=\"reduce_all\"):\n return ivy.all(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_any(input_tensor, axis=None, keepdims=False, name=\"reduce_any\"):\n return ivy.any(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_euclidean_norm(\n input_tensor, axis=None, keepdims=False, name=\"reduce_euclidean_norm\"\n):\n return ivy.vector_norm(\n input_tensor, axis=axis, keepdims=keepdims, ord=2\n ) # ord = '2' is the euclidean norm\n\n\n@to_ivy_arrays_and_back\ndef reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=\"reduce_logsumexp\"):\n # stable logsumexp trick\n max_input_tensor = ivy.max(input_tensor, axis=axis, keepdims=True)\n return (\n ivy.log(\n ivy.sum(\n ivy.exp(input_tensor - max_input_tensor),\n axis=axis,\n keepdims=keepdims,\n )\n )\n + max_input_tensor\n ).astype(input_tensor.dtype)\n\n\n@to_ivy_arrays_and_back\ndef reduce_max(input_tensor, axis=None, keepdims=False, name=\"reduce_max\"):\n return ivy.max(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_mean(input_tensor, axis=None, keepdims=False, name=\"reduce_mean\"):\n if ivy.exists(axis):\n axis = ivy.to_list(axis)\n return ivy.mean(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_min(input_tensor, axis=None, keepdims=False, name=\"reduce_min\"):\n return ivy.min(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_prod(input_tensor, axis=None, keepdims=False, name=\"reduce_prod\"):\n return ivy.prod(input_tensor, axis=axis, keepdims=keepdims).astype(\n input_tensor.dtype\n )\n\n\n@to_ivy_arrays_and_back\ndef reduce_std(input_tensor, axis=None, keepdims=False, name=\"reduce_std\"):\n return ivy.std(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef reduce_sum(input_tensor, axis=None, keepdims=False, name=\"reduce_sum\"):\n return ivy.sum(input_tensor, axis=axis, keepdims=keepdims).astype(\n input_tensor.dtype\n )\n\n\n@to_ivy_arrays_and_back\ndef reduce_variance(input_tensor, axis=None, keepdims=False, name=\"reduce_variance\"):\n return ivy.var(input_tensor, axis=axis, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef scalar_mul(scalar, x, name=\"scalar_mul\"):\n scalar, x = check_tensorflow_casting(scalar, x)\n return ivy.multiply(x, scalar).astype(x.dtype)\n\n\n@to_ivy_arrays_and_back\ndef subtract(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.subtract(x, y)\n\n\n@to_ivy_arrays_and_back\ndef squared_difference(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.square(ivy.subtract(x, y))\n\n\n@to_ivy_arrays_and_back\ndef tan(x, name=None):\n return ivy.tan(x)\n\n\n@to_ivy_arrays_and_back\ndef unsorted_segment_mean(\n data, segment_ids, num_segments, name=\"unsorted_segment_mean\"\n):\n ivy.utils.assertions.check_equal(list(segment_ids.shape), [list(data.shape)[0]])\n x = ivy.zeros(tuple([num_segments] + (list(data.shape))[1:]))\n count = ivy.zeros((num_segments,))\n for i in range((segment_ids).shape[0]):\n x[segment_ids[i]] = x[segment_ids[i]] + data[i]\n count[segment_ids[i]] += 1\n for j in range(num_segments):\n x[j] = ivy.divide(x[j], count[j])\n return x\n\n\n@to_ivy_arrays_and_back\ndef unsorted_segment_sqrt_n(\n data, segment_ids, num_segments, name=\"unsorted_segement_sqrt_n\"\n):\n ivy.utils.assertions.check_equal(list(segment_ids.shape), [list(data.shape)[0]])\n x = ivy.zeros(tuple([num_segments] + (list(data.shape))[1:]))\n count = ivy.zeros((num_segments,))\n for i in range((segment_ids).shape[0]):\n x[segment_ids[i]] = x[segment_ids[i]] + data[i]\n count[segment_ids[i]] += 1\n for j in range(num_segments):\n x[j] = ivy.divide(x[j], ivy.sqrt(count[j]))\n return x\n\n\n@to_ivy_arrays_and_back\ndef zero_fraction(value, name=\"zero_fraction\"):\n zero = ivy.zeros(tuple(list(value.shape)), dtype=ivy.float32)\n x = ivy.array(value, dtype=ivy.float32)\n count_zero = ivy.sum(ivy.equal(x, zero))\n count_nonzero = ivy.sum(ivy.not_equal(x, zero))\n return ivy.divide(count_zero, ivy.add(count_zero, count_nonzero))\n\n\n@to_ivy_arrays_and_back\ndef argmin(input, axis=None, output_type=\"int64\", name=None):\n output_type = to_ivy_dtype(output_type)\n if output_type in [\"int32\", \"int64\"]:\n return ivy.astype(ivy.argmin(input, axis=axis), output_type)\n else:\n return ivy.astype(ivy.argmin(input, axis=axis), \"int64\")\n\n\n@to_ivy_arrays_and_back\ndef truediv(x, y, name=\"truediv\"):\n x, y = check_tensorflow_casting(x, y)\n x_dtype = ivy.dtype(x)\n\n if ivy.current_backend_str() == \"torch\":\n if x_dtype in [ivy.int8, ivy.int16]:\n return ivy.divide(ivy.astype(x, ivy.float32), ivy.astype(y, ivy.float32))\n elif x_dtype in [ivy.int32, ivy.int64]:\n return ivy.divide(ivy.astype(x, ivy.float64), ivy.astype(y, ivy.float64))\n else:\n if x_dtype in [ivy.int8, ivy.uint8, ivy.int16, ivy.uint16]:\n return ivy.divide(ivy.astype(x, ivy.float32), ivy.astype(y, ivy.float32))\n elif x_dtype in [ivy.int32, ivy.uint32, ivy.int64, ivy.uint64]:\n return ivy.divide(ivy.astype(x, ivy.float64), ivy.astype(y, ivy.float64))\n return ivy.divide(x, y)\n\n\n@to_ivy_arrays_and_back\ndef equal(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef not_equal(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.not_equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef floor(x, name=None):\n return ivy.floor(x)\n\n\n@to_ivy_arrays_and_back\ndef ceil(x, name=None):\n return ivy.ceil(x)\n\n\n@to_ivy_arrays_and_back\ndef minimum(x, y, name=None):\n return ivy.minimum(x, y)\n\n\n@to_ivy_arrays_and_back\ndef sigmoid(x, name=None):\n return ivy.sigmoid(x)\n\n\n@with_supported_dtypes(\n {\"2.9.0 and below\": (\"float16\", \"float32\", \"float64\", \"complex64\", \"complex128\")},\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@to_ivy_arrays_and_back\ndef rsqrt(x, name=None):\n return ivy.reciprocal(ivy.sqrt(x))\n\n\n@to_ivy_arrays_and_back\ndef nextafter(x1, x2, name=None):\n return ivy.nextafter(x1, x2)\n\n\n@with_unsupported_dtypes(\n {\n \"1.2.0\": (\"float16\", \"complex64\", \"complex128\"),\n \"1.8.0 and below\": (\"float16\"),\n \"2.9.0 and below\": (\"int8\", \"int16\", \"uint8\", \"uint16\", \"uint32\", \"uint64\"),\n },\n \"tensorflow\",\n)\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@to_ivy_arrays_and_back\ndef log_softmax(logits, axis=None):\n return ivy.log_softmax(logits, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef asin(x, name=None):\n return ivy.asin(x)\n\n\n@to_ivy_arrays_and_back\ndef acos(x, name=\"acos\"):\n return ivy.acos(x)\n\n\n@to_ivy_arrays_and_back\ndef acosh(x, name=\"acosh\"):\n return ivy.acosh(x)\n\n\n@to_ivy_arrays_and_back\ndef square(x, name=None):\n return ivy.square(x)\n\n\n@to_ivy_arrays_and_back\ndef is_nan(x, name=None):\n return ivy.isnan(x)\n\n\n@with_supported_dtypes(\n {\n \"2.11.0 and below\": (\"bfloat16\", \"half\", \"float32\", \"float64\"),\n },\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef is_finite(x, name=None):\n return ivy.isfinite(x)\n\n\n@to_ivy_arrays_and_back\ndef atan(x, name=None):\n return ivy.atan(x)\n\n\n@to_ivy_arrays_and_back\ndef atan2(y, x, name=None):\n return ivy.atan2(y, x)\n\n\n@to_ivy_arrays_and_back\ndef log(x, name=None):\n return ivy.log(x)\n\n\n@to_ivy_arrays_and_back\ndef add_n(inputs, name=None):\n return ivy.sum(inputs, dtype=inputs.dtype, axis=0)\n\n\n@to_ivy_arrays_and_back\ndef floormod(x, y, name=None):\n return ivy.remainder(x, y)\n\n\n@to_ivy_arrays_and_back\ndef less_equal(x, y, name=\"LessEqual\"):\n x, y = check_tensorflow_casting(x, y)\n return ivy.less_equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef greater(x, y, name=None):\n x, y = check_tensorflow_casting(x, y)\n return ivy.greater(x, y)\n\n\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@to_ivy_arrays_and_back\ndef sinh(x, name=None):\n return ivy.sinh(x)\n\n\n@to_ivy_arrays_and_back\ndef softmax(logits, axis=-1):\n return ivy.softmax(logits, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef xlogy(x, y, name=None):\n return ivy.xlogy(x, y)\n\n\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n", "path": "ivy/functional/frontends/tensorflow/math.py" } ]
diff --git a/ivy/functional/frontends/tensorflow/math.py b/ivy/functional/frontends/tensorflow/math.py index 71aa26155de7f..27eaf525ed1cd 100644 --- a/ivy/functional/frontends/tensorflow/math.py +++ b/ivy/functional/frontends/tensorflow/math.py @@ -512,6 +512,11 @@ def atan(x, name=None): return ivy.atan(x) +@to_ivy_arrays_and_back +def atan2(y, x, name=None): + return ivy.atan2(y, x) + + @to_ivy_arrays_and_back def log(x, name=None): return ivy.log(x) diff --git a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py index 6752026cce8bd..35c44ccce1a3d 100644 --- a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py +++ b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py @@ -2082,6 +2082,35 @@ def test_tensorflow_cosh( ) +# atan2 +@handle_frontend_test( + fn_tree="tensorflow.math.atan2", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True + ), + test_with_out=st.just(False), +) +def test_tensorflow_atan2( + *, + dtype_and_x, + frontend, + test_flags, + fn_tree, + on_device, +): + input_dtype, x = dtype_and_x + assume(not np.any(np.isclose(x[1], 0))) + helpers.test_frontend_function( + input_dtypes=input_dtype, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + y=x[0], + x=x[1], + ) + + # less_equal @handle_frontend_test( fn_tree="tensorflow.math.less_equal",
encode__uvicorn-324
TypeError: __init__() when run "uvicorn app:App" I'm working on Mac Os Sierra 10.12.6, python 3.7.2 and uvicorn via pip3 0.5.1. When I run the example uvicorn app:App get the following error: Traceback (most recent call last): File "/usr/local/bin/uvicorn", line 11, in <module> load_entry_point('uvicorn==0.5.1', 'console_scripts', 'uvicorn')() File "/usr/local/lib/python3.7/site-packages/pkg_resources/__init__.py", line 489, in load_entry_point return get_distribution(dist).load_entry_point(group, name) File "/usr/local/lib/python3.7/site-packages/pkg_resources/__init__.py", line 2793, in load_entry_point return ep.load() File "/usr/local/lib/python3.7/site-packages/pkg_resources/__init__.py", line 2411, in load return self.resolve() File "/usr/local/lib/python3.7/site-packages/pkg_resources/__init__.py", line 2417, in resolve module = __import__(self.module_name, fromlist=['__name__'], level=0) File "/usr/local/lib/python3.7/site-packages/uvicorn/__init__.py", line 2, in <module> from uvicorn.main import Server, main, run File "/usr/local/lib/python3.7/site-packages/uvicorn/main.py", line 212, in <module> ssl_ciphers: str, File "/usr/local/lib/python3.7/site-packages/click/decorators.py", line 170, in decorator _param_memo(f, OptionClass(param_decls, **attrs)) File "/usr/local/lib/python3.7/site-packages/click/core.py", line 1460, in __init__ Parameter.__init__(self, param_decls, type=type, **attrs) TypeError: __init__() got an unexpected keyword argument 'hidden' Thank you
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\nimport platform\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n path = os.path.join(package, '__init__.py')\n init_py = open(path, 'r', encoding='utf8').read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n return open('README.md', 'r', encoding='utf8').read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\nenv_marker = (\n \"sys_platform != 'win32'\"\n \" and sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'pypy'\"\n)\n\nrequirements = [\n \"click\",\n \"h11\",\n \"websockets>=6.0\",\n \"httptools;\" + env_marker,\n \"uvloop;\" + env_marker,\n]\n\n\nsetup(\n name='uvicorn',\n version=get_version('uvicorn'),\n url='https://github.com/encode/uvicorn',\n license='BSD',\n description='The lightning-fast ASGI server.',\n long_description=get_long_description(),\n long_description_content_type='text/markdown',\n author='Tom Christie',\n author_email='[email protected]',\n packages=get_packages('uvicorn'),\n install_requires=requirements,\n data_files = [(\"\", [\"LICENSE.md\"])],\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ],\n entry_points=\"\"\"\n [console_scripts]\n uvicorn=uvicorn.main:main\n \"\"\"\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\nimport platform\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n path = os.path.join(package, '__init__.py')\n init_py = open(path, 'r', encoding='utf8').read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n return open('README.md', 'r', encoding='utf8').read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\nenv_marker = (\n \"sys_platform != 'win32'\"\n \" and sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'pypy'\"\n)\n\nrequirements = [\n \"click==7.*\",\n \"h11==0.8.*\",\n \"websockets==7.*\",\n \"httptools==0.0.13 ;\" + env_marker,\n \"uvloop==0.12.* ;\" + env_marker,\n]\n\n\nsetup(\n name='uvicorn',\n version=get_version('uvicorn'),\n url='https://github.com/encode/uvicorn',\n license='BSD',\n description='The lightning-fast ASGI server.',\n long_description=get_long_description(),\n long_description_content_type='text/markdown',\n author='Tom Christie',\n author_email='[email protected]',\n packages=get_packages('uvicorn'),\n install_requires=requirements,\n data_files = [(\"\", [\"LICENSE.md\"])],\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ],\n entry_points=\"\"\"\n [console_scripts]\n uvicorn=uvicorn.main:main\n \"\"\"\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 3123b4a70..802cda43d 100755 --- a/setup.py +++ b/setup.py @@ -41,11 +41,11 @@ def get_packages(package): ) requirements = [ - "click", - "h11", - "websockets>=6.0", - "httptools;" + env_marker, - "uvloop;" + env_marker, + "click==7.*", + "h11==0.8.*", + "websockets==7.*", + "httptools==0.0.13 ;" + env_marker, + "uvloop==0.12.* ;" + env_marker, ]
django-cms__django-cms-2207
Fixture loading in Postgres Get the following error when loading json fixtures with Postgres and django 1.3.1 IntegrityError: duplicate key value violates unique constraint "cms_placeholder_pkey" Forked repository and created test case for this on https://github.com/mthornhill/django-cms to recreate 1. clone directory git clone https://[email protected]/mthornhill/django-cms.git 2. make a virtual environment cd django-cms virtualenv . --no-site-packages 3. run FixtureTestCase ./runtests.sh -d 13 --rebuild-env FixtureTestCase
[ { "content": "# -*- coding: utf-8 -*-\nfrom cms.utils.conf import get_cms_setting\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import signals\nfrom django.dispatch import Signal\n\nfrom cms.cache.permissions import clear_user_permission_cache, clear_permission_cache\nfrom cms.models import Page, Title, CMSPlugin, PagePermission, GlobalPagePermission, PageUser, PageUserGroup\n\nfrom menus.menu_pool import menu_pool\n\n# fired after page location is changed - is moved from one node to other\npage_moved = Signal(providing_args=[\"instance\"])\n\n# fired when some of nodes (Title) with applications gets saved\napplication_post_changed = Signal(providing_args=[\"instance\"])\n\n# fired after page gets published - copied to public model - there may be more\n# than one instances published before this signal gets called\npost_publish = Signal(providing_args=[\"instance\"])\n\n\ndef update_plugin_positions(**kwargs):\n plugin = kwargs['instance']\n plugins = CMSPlugin.objects.filter(language=plugin.language, placeholder=plugin.placeholder).order_by(\"position\")\n last = 0\n for p in plugins:\n if p.position != last:\n p.position = last\n p.save()\n last += 1\n\n\nsignals.post_delete.connect(update_plugin_positions, sender=CMSPlugin, dispatch_uid=\"cms.plugin.update_position\")\n\n\ndef update_title_paths(instance, **kwargs):\n \"\"\"Update child pages paths in case when page was moved.\n \"\"\"\n for title in instance.title_set.all():\n title.save()\n\n\npage_moved.connect(update_title_paths, sender=Page, dispatch_uid=\"cms.title.update_path\")\n\n\ndef update_title(title):\n slug = u'%s' % title.slug\n\n if title.page.is_home():\n title.path = ''\n elif not title.has_url_overwrite:\n title.path = u'%s' % slug\n parent_page_id = title.page.parent_id\n\n if parent_page_id:\n parent_title = Title.objects.get_title(parent_page_id,\n language=title.language, language_fallback=True)\n if parent_title:\n title.path = (u'%s/%s' % (parent_title.path, slug)).lstrip(\"/\")\n\n\ndef pre_save_title(instance, raw, **kwargs):\n \"\"\"Save old state to instance and setup path\n \"\"\"\n if not instance.page.publisher_is_draft:\n menu_pool.clear(instance.page.site_id)\n if instance.id and not hasattr(instance, \"tmp_path\"):\n instance.tmp_path = None\n instance.tmp_application_urls = None\n try:\n instance.tmp_path, instance.tmp_application_urls = \\\n Title.objects.filter(pk=instance.id).values_list('path', 'application_urls')[0]\n except IndexError:\n pass # no Titles exist for this page yet\n\n # Build path from parent page's path and slug\n if instance.has_url_overwrite and instance.path:\n instance.path = instance.path.strip(\" /\")\n else:\n update_title(instance)\n\n\nsignals.pre_save.connect(pre_save_title, sender=Title, dispatch_uid=\"cms.title.presave\")\n\n\ndef post_save_title(instance, raw, created, **kwargs):\n # Update descendants only if path changed\n application_changed = False\n prevent_descendants = hasattr(instance, 'tmp_prevent_descendant_update')\n if instance.path != getattr(instance, 'tmp_path', None) and not prevent_descendants:\n descendant_titles = Title.objects.filter(\n page__lft__gt=instance.page.lft,\n page__rght__lt=instance.page.rght,\n page__tree_id__exact=instance.page.tree_id,\n language=instance.language,\n has_url_overwrite=False, # TODO: what if child has no url overwrite?\n ).order_by('page__tree_id', 'page__parent', 'page__lft')\n\n for descendant_title in descendant_titles:\n descendant_title.path = '' # just reset path\n descendant_title.tmp_prevent_descendant_update = True\n if descendant_title.application_urls:\n application_changed = True\n descendant_title.save()\n\n if not prevent_descendants and \\\n (instance.application_urls != getattr(instance, 'tmp_application_urls', None) or application_changed):\n # fire it if we have some application linked to this page or some descendant\n application_post_changed.send(sender=Title, instance=instance)\n\n # remove temporary attributes\n if hasattr(instance, 'tmp_path'):\n del instance.tmp_path\n if hasattr(instance, 'tmp_application_urls'):\n del instance.tmp_application_urls\n if prevent_descendants:\n del instance.tmp_prevent_descendant_update\n\n\nsignals.post_save.connect(post_save_title, sender=Title, dispatch_uid=\"cms.title.postsave\")\n\n\ndef post_save_user(instance, raw, created, **kwargs):\n \"\"\"Signal called when new user is created, required only when CMS_PERMISSION.\n Assigns creator of the user to PageUserInfo model, so we know who had created\n this user account.\n \n requires: CurrentUserMiddleware\n \"\"\"\n from cms.utils.permissions import get_current_user\n # read current user from thread locals\n creator = get_current_user()\n if not creator or not created or creator.is_anonymous():\n return\n\n page_user = PageUser(user_ptr_id=instance.pk, created_by=creator)\n page_user.__dict__.update(instance.__dict__)\n page_user.save()\n\n\ndef post_save_user_group(instance, raw, created, **kwargs):\n \"\"\"The same like post_save_user, but for Group, required only when \n CMS_PERMISSION.\n Assigns creator of the group to PageUserGroupInfo model, so we know who had\n created this user account.\n \n requires: CurrentUserMiddleware\n \"\"\"\n from cms.utils.permissions import get_current_user\n # read current user from thread locals\n creator = get_current_user()\n if not creator or not created or creator.is_anonymous():\n return\n page_user = PageUserGroup(group_ptr_id=instance.pk, created_by=creator)\n page_user.__dict__.update(instance.__dict__)\n page_user.save()\n\n\nif get_cms_setting('PERMISSION'):\n # only if permissions are in use\n from django.contrib.auth.models import User, Group\n # register signals to user related models\n signals.post_save.connect(post_save_user, User)\n signals.post_save.connect(post_save_user_group, Group)\n\n\ndef pre_save_page(instance, raw, **kwargs):\n \"\"\"Assigns old_page attribute, so we can compare changes.\n \"\"\"\n instance.old_page = None\n try:\n instance.old_page = Page.objects.get(pk=instance.pk)\n except ObjectDoesNotExist:\n pass\n\n\ndef post_save_page_moderator(instance, raw, created, **kwargs):\n \"\"\"Helper post save signal.\n \"\"\"\n old_page = instance.old_page\n\n # tell moderator something was happen with this page\n from cms.utils.moderator import page_changed\n\n if not old_page:\n page_changed(instance, old_page)\n\n\ndef post_save_page(instance, **kwargs):\n if instance.old_page is None or instance.old_page.parent_id != instance.parent_id:\n for page in instance.get_descendants(include_self=True):\n for title in page.title_set.all():\n update_title(title)\n title.save()\n\n\ndef update_placeholders(instance, **kwargs):\n instance.rescan_placeholders()\n\n\ndef invalidate_menu_cache(instance, **kwargs):\n menu_pool.clear(instance.site_id)\n\n# tell moderator, there is something happening with this page\nsignals.pre_save.connect(pre_save_page, sender=Page, dispatch_uid=\"cms.page.presave\")\nsignals.post_save.connect(post_save_page_moderator, sender=Page, dispatch_uid=\"cms.page.postsave\")\nsignals.post_save.connect(post_save_page, sender=Page)\nsignals.post_save.connect(update_placeholders, sender=Page)\nsignals.pre_save.connect(invalidate_menu_cache, sender=Page)\nsignals.pre_delete.connect(invalidate_menu_cache, sender=Page)\n\n\ndef pre_save_user(instance, raw, **kwargs):\n clear_user_permission_cache(instance)\n\n\ndef pre_delete_user(instance, **kwargs):\n clear_user_permission_cache(instance)\n\n\ndef pre_save_group(instance, raw, **kwargs):\n if instance.pk:\n for user in instance.user_set.all():\n clear_user_permission_cache(user)\n\n\ndef pre_delete_group(instance, **kwargs):\n for user in instance.user_set.all():\n clear_user_permission_cache(user)\n\n\ndef _clear_users_permissions(instance):\n if instance.user:\n clear_user_permission_cache(instance.user)\n if instance.group:\n for user in instance.group.user_set.all():\n clear_user_permission_cache(user)\n\n\ndef pre_save_pagepermission(instance, raw, **kwargs):\n _clear_users_permissions(instance)\n\n\ndef pre_delete_pagepermission(instance, **kwargs):\n _clear_users_permissions(instance)\n\n\ndef pre_save_globalpagepermission(instance, raw, **kwargs):\n _clear_users_permissions(instance)\n menu_pool.clear(all=True)\n\n\ndef pre_delete_globalpagepermission(instance, **kwargs):\n _clear_users_permissions(instance)\n\n\ndef pre_save_delete_page(instance, **kwargs):\n clear_permission_cache()\n\n\nif get_cms_setting('PERMISSION'):\n signals.pre_save.connect(pre_save_user, sender=User)\n signals.pre_delete.connect(pre_delete_user, sender=User)\n\n signals.pre_save.connect(pre_save_user, sender=PageUser)\n signals.pre_delete.connect(pre_delete_user, sender=PageUser)\n\n signals.pre_save.connect(pre_save_group, sender=Group)\n signals.pre_delete.connect(pre_delete_group, sender=Group)\n\n signals.pre_save.connect(pre_save_group, sender=PageUserGroup)\n signals.pre_delete.connect(pre_delete_group, sender=PageUserGroup)\n\n signals.pre_save.connect(pre_save_pagepermission, sender=PagePermission)\n signals.pre_delete.connect(pre_delete_pagepermission, sender=PagePermission)\n\n signals.pre_save.connect(pre_save_globalpagepermission, sender=GlobalPagePermission)\n signals.pre_delete.connect(pre_delete_globalpagepermission, sender=GlobalPagePermission)\n\n signals.pre_save.connect(pre_save_delete_page, sender=Page)\n signals.pre_delete.connect(pre_save_delete_page, sender=Page)\n", "path": "cms/signals.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nfrom cms.utils.conf import get_cms_setting\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import signals\nfrom django.dispatch import Signal\n\nfrom cms.cache.permissions import clear_user_permission_cache, clear_permission_cache\nfrom cms.models import Page, Title, CMSPlugin, PagePermission, GlobalPagePermission, PageUser, PageUserGroup\n\nfrom menus.menu_pool import menu_pool\n\n# fired after page location is changed - is moved from one node to other\npage_moved = Signal(providing_args=[\"instance\"])\n\n# fired when some of nodes (Title) with applications gets saved\napplication_post_changed = Signal(providing_args=[\"instance\"])\n\n# fired after page gets published - copied to public model - there may be more\n# than one instances published before this signal gets called\npost_publish = Signal(providing_args=[\"instance\"])\n\n\ndef update_plugin_positions(**kwargs):\n plugin = kwargs['instance']\n plugins = CMSPlugin.objects.filter(language=plugin.language, placeholder=plugin.placeholder).order_by(\"position\")\n last = 0\n for p in plugins:\n if p.position != last:\n p.position = last\n p.save()\n last += 1\n\n\nsignals.post_delete.connect(update_plugin_positions, sender=CMSPlugin, dispatch_uid=\"cms.plugin.update_position\")\n\n\ndef update_title_paths(instance, **kwargs):\n \"\"\"Update child pages paths in case when page was moved.\n \"\"\"\n for title in instance.title_set.all():\n title.save()\n\n\npage_moved.connect(update_title_paths, sender=Page, dispatch_uid=\"cms.title.update_path\")\n\n\ndef update_title(title):\n slug = u'%s' % title.slug\n\n if title.page.is_home():\n title.path = ''\n elif not title.has_url_overwrite:\n title.path = u'%s' % slug\n parent_page_id = title.page.parent_id\n\n if parent_page_id:\n parent_title = Title.objects.get_title(parent_page_id,\n language=title.language, language_fallback=True)\n if parent_title:\n title.path = (u'%s/%s' % (parent_title.path, slug)).lstrip(\"/\")\n\n\ndef pre_save_title(instance, raw, **kwargs):\n \"\"\"Save old state to instance and setup path\n \"\"\"\n if not instance.page.publisher_is_draft:\n menu_pool.clear(instance.page.site_id)\n if instance.id and not hasattr(instance, \"tmp_path\"):\n instance.tmp_path = None\n instance.tmp_application_urls = None\n try:\n instance.tmp_path, instance.tmp_application_urls = \\\n Title.objects.filter(pk=instance.id).values_list('path', 'application_urls')[0]\n except IndexError:\n pass # no Titles exist for this page yet\n\n # Build path from parent page's path and slug\n if instance.has_url_overwrite and instance.path:\n instance.path = instance.path.strip(\" /\")\n else:\n update_title(instance)\n\n\nsignals.pre_save.connect(pre_save_title, sender=Title, dispatch_uid=\"cms.title.presave\")\n\n\ndef post_save_title(instance, raw, created, **kwargs):\n # Update descendants only if path changed\n application_changed = False\n prevent_descendants = hasattr(instance, 'tmp_prevent_descendant_update')\n if instance.path != getattr(instance, 'tmp_path', None) and not prevent_descendants:\n descendant_titles = Title.objects.filter(\n page__lft__gt=instance.page.lft,\n page__rght__lt=instance.page.rght,\n page__tree_id__exact=instance.page.tree_id,\n language=instance.language,\n has_url_overwrite=False, # TODO: what if child has no url overwrite?\n ).order_by('page__tree_id', 'page__parent', 'page__lft')\n\n for descendant_title in descendant_titles:\n descendant_title.path = '' # just reset path\n descendant_title.tmp_prevent_descendant_update = True\n if descendant_title.application_urls:\n application_changed = True\n descendant_title.save()\n\n if not prevent_descendants and \\\n (instance.application_urls != getattr(instance, 'tmp_application_urls', None) or application_changed):\n # fire it if we have some application linked to this page or some descendant\n application_post_changed.send(sender=Title, instance=instance)\n\n # remove temporary attributes\n if hasattr(instance, 'tmp_path'):\n del instance.tmp_path\n if hasattr(instance, 'tmp_application_urls'):\n del instance.tmp_application_urls\n if prevent_descendants:\n del instance.tmp_prevent_descendant_update\n\n\nsignals.post_save.connect(post_save_title, sender=Title, dispatch_uid=\"cms.title.postsave\")\n\n\ndef post_save_user(instance, raw, created, **kwargs):\n \"\"\"Signal called when new user is created, required only when CMS_PERMISSION.\n Assigns creator of the user to PageUserInfo model, so we know who had created\n this user account.\n \n requires: CurrentUserMiddleware\n \"\"\"\n from cms.utils.permissions import get_current_user\n # read current user from thread locals\n creator = get_current_user()\n if not creator or not created or creator.is_anonymous():\n return\n\n page_user = PageUser(user_ptr_id=instance.pk, created_by=creator)\n page_user.__dict__.update(instance.__dict__)\n page_user.save()\n\n\ndef post_save_user_group(instance, raw, created, **kwargs):\n \"\"\"The same like post_save_user, but for Group, required only when \n CMS_PERMISSION.\n Assigns creator of the group to PageUserGroupInfo model, so we know who had\n created this user account.\n \n requires: CurrentUserMiddleware\n \"\"\"\n from cms.utils.permissions import get_current_user\n # read current user from thread locals\n creator = get_current_user()\n if not creator or not created or creator.is_anonymous():\n return\n page_user = PageUserGroup(group_ptr_id=instance.pk, created_by=creator)\n page_user.__dict__.update(instance.__dict__)\n page_user.save()\n\n\nif get_cms_setting('PERMISSION'):\n # only if permissions are in use\n from django.contrib.auth.models import User, Group\n # register signals to user related models\n signals.post_save.connect(post_save_user, User)\n signals.post_save.connect(post_save_user_group, Group)\n\n\ndef pre_save_page(instance, raw, **kwargs):\n \"\"\"Assigns old_page attribute, so we can compare changes.\n \"\"\"\n instance.old_page = None\n try:\n instance.old_page = Page.objects.get(pk=instance.pk)\n except ObjectDoesNotExist:\n pass\n\n\ndef post_save_page_moderator(instance, raw, created, **kwargs):\n \"\"\"Helper post save signal.\n \"\"\"\n old_page = instance.old_page\n\n # tell moderator something was happen with this page\n from cms.utils.moderator import page_changed\n\n if not old_page:\n page_changed(instance, old_page)\n\n\ndef post_save_page(instance, **kwargs):\n if instance.old_page is None or instance.old_page.parent_id != instance.parent_id:\n for page in instance.get_descendants(include_self=True):\n for title in page.title_set.all():\n update_title(title)\n title.save()\n\n\ndef update_placeholders(instance, **kwargs):\n if not kwargs.get('raw'):\n instance.rescan_placeholders()\n\n\ndef invalidate_menu_cache(instance, **kwargs):\n menu_pool.clear(instance.site_id)\n\n# tell moderator, there is something happening with this page\nsignals.pre_save.connect(pre_save_page, sender=Page, dispatch_uid=\"cms.page.presave\")\nsignals.post_save.connect(post_save_page_moderator, sender=Page, dispatch_uid=\"cms.page.postsave\")\nsignals.post_save.connect(post_save_page, sender=Page)\nsignals.post_save.connect(update_placeholders, sender=Page)\nsignals.pre_save.connect(invalidate_menu_cache, sender=Page)\nsignals.pre_delete.connect(invalidate_menu_cache, sender=Page)\n\n\ndef pre_save_user(instance, raw, **kwargs):\n clear_user_permission_cache(instance)\n\n\ndef pre_delete_user(instance, **kwargs):\n clear_user_permission_cache(instance)\n\n\ndef pre_save_group(instance, raw, **kwargs):\n if instance.pk:\n for user in instance.user_set.all():\n clear_user_permission_cache(user)\n\n\ndef pre_delete_group(instance, **kwargs):\n for user in instance.user_set.all():\n clear_user_permission_cache(user)\n\n\ndef _clear_users_permissions(instance):\n if instance.user:\n clear_user_permission_cache(instance.user)\n if instance.group:\n for user in instance.group.user_set.all():\n clear_user_permission_cache(user)\n\n\ndef pre_save_pagepermission(instance, raw, **kwargs):\n _clear_users_permissions(instance)\n\n\ndef pre_delete_pagepermission(instance, **kwargs):\n _clear_users_permissions(instance)\n\n\ndef pre_save_globalpagepermission(instance, raw, **kwargs):\n _clear_users_permissions(instance)\n menu_pool.clear(all=True)\n\n\ndef pre_delete_globalpagepermission(instance, **kwargs):\n _clear_users_permissions(instance)\n\n\ndef pre_save_delete_page(instance, **kwargs):\n clear_permission_cache()\n\n\nif get_cms_setting('PERMISSION'):\n signals.pre_save.connect(pre_save_user, sender=User)\n signals.pre_delete.connect(pre_delete_user, sender=User)\n\n signals.pre_save.connect(pre_save_user, sender=PageUser)\n signals.pre_delete.connect(pre_delete_user, sender=PageUser)\n\n signals.pre_save.connect(pre_save_group, sender=Group)\n signals.pre_delete.connect(pre_delete_group, sender=Group)\n\n signals.pre_save.connect(pre_save_group, sender=PageUserGroup)\n signals.pre_delete.connect(pre_delete_group, sender=PageUserGroup)\n\n signals.pre_save.connect(pre_save_pagepermission, sender=PagePermission)\n signals.pre_delete.connect(pre_delete_pagepermission, sender=PagePermission)\n\n signals.pre_save.connect(pre_save_globalpagepermission, sender=GlobalPagePermission)\n signals.pre_delete.connect(pre_delete_globalpagepermission, sender=GlobalPagePermission)\n\n signals.pre_save.connect(pre_save_delete_page, sender=Page)\n signals.pre_delete.connect(pre_save_delete_page, sender=Page)\n", "path": "cms/signals.py" } ]
diff --git a/cms/signals.py b/cms/signals.py index 9bfd860128d..8b7ade88b31 100644 --- a/cms/signals.py +++ b/cms/signals.py @@ -196,7 +196,8 @@ def post_save_page(instance, **kwargs): def update_placeholders(instance, **kwargs): - instance.rescan_placeholders() + if not kwargs.get('raw'): + instance.rescan_placeholders() def invalidate_menu_cache(instance, **kwargs): diff --git a/cms/tests/__init__.py b/cms/tests/__init__.py index 6abf5811f45..f78fa4e62f4 100644 --- a/cms/tests/__init__.py +++ b/cms/tests/__init__.py @@ -31,8 +31,9 @@ from cms.tests.urlutils import * from cms.tests.views import * from cms.tests.management import * +from cms.tests.fixture_loading import * from cms.tests.menu_page_viewperm import * from cms.tests.menu_page_viewperm_staff import * from cms.tests.nested_plugins import * from cms.tests.check import * -from cms.tests.no_i18n import * \ No newline at end of file +from cms.tests.no_i18n import * diff --git a/cms/tests/fixture_loading.py b/cms/tests/fixture_loading.py new file mode 100644 index 00000000000..b5aa7db1623 --- /dev/null +++ b/cms/tests/fixture_loading.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +import tempfile +import codecs + +try: + from cStringIO import StringIO +except: + from io import StringIO + +from django.core.management import call_command + +from cms.test_utils.fixtures.navextenders import NavextendersFixture +from cms.test_utils.testcases import SettingsOverrideTestCase +from cms.models import Page + + +class FixtureTestCase(NavextendersFixture, SettingsOverrideTestCase): + + def test_fixture_load(self): + """ + This test dumps a live set of pages, cleanup the database and load it + again. + This makes fixtures unnecessary and it's easier to maintain. + """ + output = StringIO() + dump = tempfile.mkstemp(".json") + call_command('dumpdata', 'cms', indent=3, stdout=output) + Page.objects.all().delete() + output.seek(0) + with codecs.open(dump[1], 'w', 'utf-8') as dumpfile: + dumpfile.write(output.read()) + + self.assertEqual(0, Page.objects.count()) + # Transaction disable, otherwise the connection it the test would be + # isolated from the data loaded in the different command connection + call_command('loaddata', dump[1], commit=False, stdout=output) + self.assertEqual(10, Page.objects.count())
kubeflow__pipelines-2610
kfp 0.1.35 tar.gz in pypi.org is missing diagnose_me directory **What happened:** The 0.1.35 release of kfp available on pypi.org (i.e. what is installed via `pip3 install kfp`) seems to be missing the `kfp/cli/diagnose_me` directory containing the diagnose_me modules required by the cli. The release hosted on github contains these files. This is the tar.gz file hosted on pypi: https://files.pythonhosted.org/packages/e8/02/51dbeae211ddf1c931b2d1613db90856b7d94a53c1d9f704593dfa6253ae/kfp-0.1.35.tar.gz If you try to install and run kfp 0.1.35 via pip it causes an error: ``` Traceback (most recent call last): File "/Users/shenderson/venvs/kubeflow/bin/kfp", line 5, in <module> from kfp.__main__ import main File "/Users/shenderson/venvs/kubeflow/lib/python3.7/site-packages/kfp/__main__.py", line 15, in <module> from .cli.cli import main File "/Users/shenderson/venvs/kubeflow/lib/python3.7/site-packages/kfp/cli/cli.py", line 21, in <module> from .diagnose_me_cli import diagnose_me File "/Users/shenderson/venvs/kubeflow/lib/python3.7/site-packages/kfp/cli/diagnose_me_cli.py", line 6, in <module> from .diagnose_me import dev_env ModuleNotFoundError: No module named 'kfp.cli.diagnose_me' ``` **What did you expect to happen:** All kfp modules including the diagnose_me package to be installed. **What steps did you take:** * Run `pip3 install --upgrade --force --no-cache-dir kfp` * Run `kfp`
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\nfrom setuptools import setup\n\nNAME = 'kfp'\n#VERSION = .... Change the version in kfp/__init__.py\n\nREQUIRES = [\n 'urllib3>=1.15,<1.25', #Fixing the version conflict with the \"requests\" package\n 'six >= 1.10',\n 'certifi',\n 'python-dateutil',\n 'PyYAML',\n 'google-cloud-storage>=1.13.0',\n 'kubernetes>=8.0.0, <=9.0.0',\n 'PyJWT>=1.6.4',\n 'cryptography>=2.4.2',\n 'google-auth>=1.6.1',\n 'requests_toolbelt>=0.8.0',\n 'cloudpickle==1.1.1',\n 'kfp-server-api >= 0.1.18, <= 0.1.25', #Update the upper version whenever a new version of the kfp-server-api package is released. Update the lower version when there is a breaking change in kfp-server-api.\n 'argo-models == 2.2.1a', #2.2.1a is equivalent to argo 2.2.1\n 'jsonschema >= 3.0.1',\n 'tabulate == 0.8.3',\n 'click == 7.0',\n 'Deprecated',\n]\n\ndef find_version(*file_path_parts):\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *file_path_parts), 'r') as fp:\n version_file_text = fp.read()\n\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file_text,\n re.M,\n )\n if version_match:\n return version_match.group(1)\n\n raise RuntimeError(\"Unable to find version string.\")\n\nsetup(\n name=NAME,\n version=find_version(\"kfp\", \"__init__.py\"),\n description='KubeFlow Pipelines SDK',\n author='google',\n install_requires=REQUIRES,\n packages=[\n 'kfp',\n 'kfp.cli',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n 'kfp.components.structures.kubernetes',\n 'kfp.containers',\n 'kfp.dsl',\n 'kfp.notebook',\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.5.3',\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'dsl-compile = kfp.compiler.main:main', 'kfp=kfp.__main__:main'\n ]\n })\n", "path": "sdk/python/setup.py" } ]
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\nfrom setuptools import setup\n\nNAME = 'kfp'\n#VERSION = .... Change the version in kfp/__init__.py\n\nREQUIRES = [\n 'urllib3>=1.15,<1.25', #Fixing the version conflict with the \"requests\" package\n 'six >= 1.10',\n 'certifi',\n 'python-dateutil',\n 'PyYAML',\n 'google-cloud-storage>=1.13.0',\n 'kubernetes>=8.0.0, <=9.0.0',\n 'PyJWT>=1.6.4',\n 'cryptography>=2.4.2',\n 'google-auth>=1.6.1',\n 'requests_toolbelt>=0.8.0',\n 'cloudpickle==1.1.1',\n 'kfp-server-api >= 0.1.18, <= 0.1.25', #Update the upper version whenever a new version of the kfp-server-api package is released. Update the lower version when there is a breaking change in kfp-server-api.\n 'argo-models == 2.2.1a', #2.2.1a is equivalent to argo 2.2.1\n 'jsonschema >= 3.0.1',\n 'tabulate == 0.8.3',\n 'click == 7.0',\n 'Deprecated',\n]\n\ndef find_version(*file_path_parts):\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *file_path_parts), 'r') as fp:\n version_file_text = fp.read()\n\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file_text,\n re.M,\n )\n if version_match:\n return version_match.group(1)\n\n raise RuntimeError(\"Unable to find version string.\")\n\nsetup(\n name=NAME,\n version=find_version(\"kfp\", \"__init__.py\"),\n description='KubeFlow Pipelines SDK',\n author='google',\n install_requires=REQUIRES,\n packages=[\n 'kfp',\n 'kfp.cli',\n 'kfp.cli.diagnose_me',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n 'kfp.components.structures.kubernetes',\n 'kfp.containers',\n 'kfp.dsl',\n 'kfp.notebook',\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.5.3',\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'dsl-compile = kfp.compiler.main:main', 'kfp=kfp.__main__:main'\n ]\n })\n", "path": "sdk/python/setup.py" } ]
diff --git a/sdk/python/setup.py b/sdk/python/setup.py index 46f51e5c08c..f656fa72808 100644 --- a/sdk/python/setup.py +++ b/sdk/python/setup.py @@ -64,6 +64,7 @@ def find_version(*file_path_parts): packages=[ 'kfp', 'kfp.cli', + 'kfp.cli.diagnose_me', 'kfp.compiler', 'kfp.components', 'kfp.components.structures',
pyqtgraph__pyqtgraph-1242
Bugfix: PlotCurveItem.sigClicked emits MouseClickEvent Hi all, currently `PlotCurveItem.sigClicked` emits `self`, a `PlotCurveItem`: ``` def mouseClickEvent(self, ev): if not self.clickable or ev.button() != QtCore.Qt.LeftButton: return if self.mouseShape().contains(ev.pos()): ev.accept() self.sigClicked.emit(self) ``` Since it can be useful to get the MouseClickEvent in the GraphicsView I suggest to either emit `self.sigClicked.emit(ev)` or `self.sigClicked.emit(self, ev)` cheers!
[ { "content": "# -*- coding: utf-8 -*-\nfrom ..Qt import QtGui, QtCore\ntry:\n from ..Qt import QtOpenGL\n HAVE_OPENGL = True\nexcept:\n HAVE_OPENGL = False\n\nimport numpy as np\nfrom .GraphicsObject import GraphicsObject\nfrom .. import functions as fn\nfrom ..Point import Point\nimport struct, sys\nfrom .. import getConfigOption\nfrom .. import debug\n\n__all__ = ['PlotCurveItem']\nclass PlotCurveItem(GraphicsObject):\n\n\n \"\"\"\n Class representing a single plot curve. Instances of this class are created\n automatically as part of PlotDataItem; these rarely need to be instantiated\n directly.\n\n Features:\n\n - Fast data update\n - Fill under curve\n - Mouse interaction\n\n ==================== ===============================================\n **Signals:**\n sigPlotChanged(self) Emitted when the data being plotted has changed\n sigClicked(self) Emitted when the curve is clicked\n ==================== ===============================================\n \"\"\"\n\n sigPlotChanged = QtCore.Signal(object)\n sigClicked = QtCore.Signal(object)\n\n def __init__(self, *args, **kargs):\n \"\"\"\n Forwards all arguments to :func:`setData <pyqtgraph.PlotCurveItem.setData>`.\n\n Some extra arguments are accepted as well:\n\n ============== =======================================================\n **Arguments:**\n parent The parent GraphicsObject (optional)\n clickable If True, the item will emit sigClicked when it is\n clicked on. Defaults to False.\n ============== =======================================================\n \"\"\"\n GraphicsObject.__init__(self, kargs.get('parent', None))\n self.clear()\n\n ## this is disastrous for performance.\n #self.setCacheMode(QtGui.QGraphicsItem.DeviceCoordinateCache)\n\n self.metaData = {}\n self.opts = {\n 'shadowPen': None,\n 'fillLevel': None,\n 'fillOutline': False,\n 'brush': None,\n 'stepMode': False,\n 'name': None,\n 'antialias': getConfigOption('antialias'),\n 'connect': 'all',\n 'mouseWidth': 8, # width of shape responding to mouse click\n 'compositionMode': None,\n }\n if 'pen' not in kargs:\n self.opts['pen'] = fn.mkPen('w')\n self.setClickable(kargs.get('clickable', False))\n self.setData(*args, **kargs)\n\n def implements(self, interface=None):\n ints = ['plotData']\n if interface is None:\n return ints\n return interface in ints\n\n def name(self):\n return self.opts.get('name', None)\n\n def setClickable(self, s, width=None):\n \"\"\"Sets whether the item responds to mouse clicks.\n\n The *width* argument specifies the width in pixels orthogonal to the\n curve that will respond to a mouse click.\n \"\"\"\n self.clickable = s\n if width is not None:\n self.opts['mouseWidth'] = width\n self._mouseShape = None\n self._boundingRect = None\n\n def setCompositionMode(self, mode):\n \"\"\"Change the composition mode of the item (see QPainter::CompositionMode\n in the Qt documentation). This is useful when overlaying multiple items.\n\n ============================================ ============================================================\n **Most common arguments:**\n QtGui.QPainter.CompositionMode_SourceOver Default; image replaces the background if it\n is opaque. Otherwise, it uses the alpha channel to blend\n the image with the background.\n QtGui.QPainter.CompositionMode_Overlay The image color is mixed with the background color to\n reflect the lightness or darkness of the background.\n QtGui.QPainter.CompositionMode_Plus Both the alpha and color of the image and background pixels\n are added together.\n QtGui.QPainter.CompositionMode_Multiply The output is the image color multiplied by the background.\n ============================================ ============================================================\n \"\"\"\n self.opts['compositionMode'] = mode\n self.update()\n\n def getData(self):\n return self.xData, self.yData\n\n def dataBounds(self, ax, frac=1.0, orthoRange=None):\n ## Need this to run as fast as possible.\n ## check cache first:\n cache = self._boundsCache[ax]\n if cache is not None and cache[0] == (frac, orthoRange):\n return cache[1]\n\n (x, y) = self.getData()\n if x is None or len(x) == 0:\n return (None, None)\n\n if ax == 0:\n d = x\n d2 = y\n elif ax == 1:\n d = y\n d2 = x\n\n ## If an orthogonal range is specified, mask the data now\n if orthoRange is not None:\n mask = (d2 >= orthoRange[0]) * (d2 <= orthoRange[1])\n d = d[mask]\n #d2 = d2[mask]\n\n if len(d) == 0:\n return (None, None)\n\n ## Get min/max (or percentiles) of the requested data range\n if frac >= 1.0:\n # include complete data range\n # first try faster nanmin/max function, then cut out infs if needed.\n b = (np.nanmin(d), np.nanmax(d))\n if any(np.isinf(b)):\n mask = np.isfinite(d)\n d = d[mask]\n if len(d) == 0:\n return (None, None)\n b = (d.min(), d.max())\n\n elif frac <= 0.0:\n raise Exception(\"Value for parameter 'frac' must be > 0. (got %s)\" % str(frac))\n else:\n # include a percentile of data range\n mask = np.isfinite(d)\n d = d[mask]\n b = np.percentile(d, [50 * (1 - frac), 50 * (1 + frac)])\n\n ## adjust for fill level\n if ax == 1 and self.opts['fillLevel'] not in [None, 'enclosed']:\n b = (min(b[0], self.opts['fillLevel']), max(b[1], self.opts['fillLevel']))\n\n ## Add pen width only if it is non-cosmetic.\n pen = self.opts['pen']\n spen = self.opts['shadowPen']\n if not pen.isCosmetic():\n b = (b[0] - pen.widthF()*0.7072, b[1] + pen.widthF()*0.7072)\n if spen is not None and not spen.isCosmetic() and spen.style() != QtCore.Qt.NoPen:\n b = (b[0] - spen.widthF()*0.7072, b[1] + spen.widthF()*0.7072)\n\n self._boundsCache[ax] = [(frac, orthoRange), b]\n return b\n\n def pixelPadding(self):\n pen = self.opts['pen']\n spen = self.opts['shadowPen']\n w = 0\n if pen.isCosmetic():\n w += pen.widthF()*0.7072\n if spen is not None and spen.isCosmetic() and spen.style() != QtCore.Qt.NoPen:\n w = max(w, spen.widthF()*0.7072)\n if self.clickable:\n w = max(w, self.opts['mouseWidth']//2 + 1)\n return w\n\n def boundingRect(self):\n if self._boundingRect is None:\n (xmn, xmx) = self.dataBounds(ax=0)\n (ymn, ymx) = self.dataBounds(ax=1)\n if xmn is None or ymn is None:\n return QtCore.QRectF()\n\n px = py = 0.0\n pxPad = self.pixelPadding()\n if pxPad > 0:\n # determine length of pixel in local x, y directions\n px, py = self.pixelVectors()\n try:\n px = 0 if px is None else px.length()\n except OverflowError:\n px = 0\n try:\n py = 0 if py is None else py.length()\n except OverflowError:\n py = 0\n\n # return bounds expanded by pixel size\n px *= pxPad\n py *= pxPad\n #px += self._maxSpotWidth * 0.5\n #py += self._maxSpotWidth * 0.5\n self._boundingRect = QtCore.QRectF(xmn-px, ymn-py, (2*px)+xmx-xmn, (2*py)+ymx-ymn)\n\n return self._boundingRect\n\n def viewTransformChanged(self):\n self.invalidateBounds()\n self.prepareGeometryChange()\n\n #def boundingRect(self):\n #if self._boundingRect is None:\n #(x, y) = self.getData()\n #if x is None or y is None or len(x) == 0 or len(y) == 0:\n #return QtCore.QRectF()\n\n\n #if self.opts['shadowPen'] is not None:\n #lineWidth = (max(self.opts['pen'].width(), self.opts['shadowPen'].width()) + 1)\n #else:\n #lineWidth = (self.opts['pen'].width()+1)\n\n\n #pixels = self.pixelVectors()\n #if pixels == (None, None):\n #pixels = [Point(0,0), Point(0,0)]\n\n #xmin = x.min()\n #xmax = x.max()\n #ymin = y.min()\n #ymax = y.max()\n\n #if self.opts['fillLevel'] is not None:\n #ymin = min(ymin, self.opts['fillLevel'])\n #ymax = max(ymax, self.opts['fillLevel'])\n\n #xmin -= pixels[0].x() * lineWidth\n #xmax += pixels[0].x() * lineWidth\n #ymin -= abs(pixels[1].y()) * lineWidth\n #ymax += abs(pixels[1].y()) * lineWidth\n\n #self._boundingRect = QtCore.QRectF(xmin, ymin, xmax-xmin, ymax-ymin)\n #return self._boundingRect\n\n\n def invalidateBounds(self):\n self._boundingRect = None\n self._boundsCache = [None, None]\n\n def setPen(self, *args, **kargs):\n \"\"\"Set the pen used to draw the curve.\"\"\"\n self.opts['pen'] = fn.mkPen(*args, **kargs)\n self.invalidateBounds()\n self.update()\n\n def setShadowPen(self, *args, **kargs):\n \"\"\"Set the shadow pen used to draw behind the primary pen.\n This pen must have a larger width than the primary\n pen to be visible.\n \"\"\"\n self.opts['shadowPen'] = fn.mkPen(*args, **kargs)\n self.invalidateBounds()\n self.update()\n\n def setBrush(self, *args, **kargs):\n \"\"\"Set the brush used when filling the area under the curve\"\"\"\n self.opts['brush'] = fn.mkBrush(*args, **kargs)\n self.invalidateBounds()\n self.update()\n\n def setFillLevel(self, level):\n \"\"\"Set the level filled to when filling under the curve\"\"\"\n self.opts['fillLevel'] = level\n self.fillPath = None\n self.invalidateBounds()\n self.update()\n\n def setData(self, *args, **kargs):\n \"\"\"\n =============== ========================================================\n **Arguments:**\n x, y (numpy arrays) Data to show\n pen Pen to use when drawing. Any single argument accepted by\n :func:`mkPen <pyqtgraph.mkPen>` is allowed.\n shadowPen Pen for drawing behind the primary pen. Usually this\n is used to emphasize the curve by providing a\n high-contrast border. Any single argument accepted by\n :func:`mkPen <pyqtgraph.mkPen>` is allowed.\n fillLevel (float or None) Fill the area 'under' the curve to\n *fillLevel*\n fillOutline (bool) If True, an outline surrounding the *fillLevel*\n area is drawn.\n brush QBrush to use when filling. Any single argument accepted\n by :func:`mkBrush <pyqtgraph.mkBrush>` is allowed.\n antialias (bool) Whether to use antialiasing when drawing. This\n is disabled by default because it decreases performance.\n stepMode If True, two orthogonal lines are drawn for each sample\n as steps. This is commonly used when drawing histograms.\n Note that in this case, len(x) == len(y) + 1\n connect Argument specifying how vertexes should be connected\n by line segments. Default is \"all\", indicating full\n connection. \"pairs\" causes only even-numbered segments\n to be drawn. \"finite\" causes segments to be omitted if\n they are attached to nan or inf values. For any other\n connectivity, specify an array of boolean values.\n compositionMode See :func:`setCompositionMode\n <pyqtgraph.PlotCurveItem.setCompositionMode>`.\n =============== ========================================================\n\n If non-keyword arguments are used, they will be interpreted as\n setData(y) for a single argument and setData(x, y) for two\n arguments.\n\n\n \"\"\"\n self.updateData(*args, **kargs)\n\n def updateData(self, *args, **kargs):\n profiler = debug.Profiler()\n\n if 'compositionMode' in kargs:\n self.setCompositionMode(kargs['compositionMode'])\n\n if len(args) == 1:\n kargs['y'] = args[0]\n elif len(args) == 2:\n kargs['x'] = args[0]\n kargs['y'] = args[1]\n\n if 'y' not in kargs or kargs['y'] is None:\n kargs['y'] = np.array([])\n if 'x' not in kargs or kargs['x'] is None:\n kargs['x'] = np.arange(len(kargs['y']))\n\n for k in ['x', 'y']:\n data = kargs[k]\n if isinstance(data, list):\n data = np.array(data)\n kargs[k] = data\n if not isinstance(data, np.ndarray) or data.ndim > 1:\n raise Exception(\"Plot data must be 1D ndarray.\")\n if data.dtype.kind == 'c':\n raise Exception(\"Can not plot complex data types.\")\n\n profiler(\"data checks\")\n\n #self.setCacheMode(QtGui.QGraphicsItem.NoCache) ## Disabling and re-enabling the cache works around a bug in Qt 4.6 causing the cached results to display incorrectly\n ## Test this bug with test_PlotWidget and zoom in on the animated plot\n self.yData = kargs['y'].view(np.ndarray)\n self.xData = kargs['x'].view(np.ndarray)\n \n self.invalidateBounds()\n self.prepareGeometryChange()\n self.informViewBoundsChanged()\n\n profiler('copy')\n\n if 'stepMode' in kargs:\n self.opts['stepMode'] = kargs['stepMode']\n\n if self.opts['stepMode'] is True:\n if len(self.xData) != len(self.yData)+1: ## allow difference of 1 for step mode plots\n raise Exception(\"len(X) must be len(Y)+1 since stepMode=True (got %s and %s)\" % (self.xData.shape, self.yData.shape))\n else:\n if self.xData.shape != self.yData.shape: ## allow difference of 1 for step mode plots\n raise Exception(\"X and Y arrays must be the same shape--got %s and %s.\" % (self.xData.shape, self.yData.shape))\n\n self.path = None\n self.fillPath = None\n self._mouseShape = None\n #self.xDisp = self.yDisp = None\n\n if 'name' in kargs:\n self.opts['name'] = kargs['name']\n if 'connect' in kargs:\n self.opts['connect'] = kargs['connect']\n if 'pen' in kargs:\n self.setPen(kargs['pen'])\n if 'shadowPen' in kargs:\n self.setShadowPen(kargs['shadowPen'])\n if 'fillLevel' in kargs:\n self.setFillLevel(kargs['fillLevel'])\n if 'fillOutline' in kargs:\n self.opts['fillOutline'] = kargs['fillOutline']\n if 'brush' in kargs:\n self.setBrush(kargs['brush'])\n if 'antialias' in kargs:\n self.opts['antialias'] = kargs['antialias']\n\n\n profiler('set')\n self.update()\n profiler('update')\n self.sigPlotChanged.emit(self)\n profiler('emit')\n\n def generatePath(self, x, y):\n if self.opts['stepMode']:\n ## each value in the x/y arrays generates 2 points.\n x2 = np.empty((len(x),2), dtype=x.dtype)\n x2[:] = x[:,np.newaxis]\n if self.opts['fillLevel'] is None:\n x = x2.reshape(x2.size)[1:-1]\n y2 = np.empty((len(y),2), dtype=y.dtype)\n y2[:] = y[:,np.newaxis]\n y = y2.reshape(y2.size)\n else:\n ## If we have a fill level, add two extra points at either end\n x = x2.reshape(x2.size)\n y2 = np.empty((len(y)+2,2), dtype=y.dtype)\n y2[1:-1] = y[:,np.newaxis]\n y = y2.reshape(y2.size)[1:-1]\n y[0] = self.opts['fillLevel']\n y[-1] = self.opts['fillLevel']\n\n path = fn.arrayToQPath(x, y, connect=self.opts['connect'])\n\n return path\n\n\n def getPath(self):\n if self.path is None:\n x,y = self.getData()\n if x is None or len(x) == 0 or y is None or len(y) == 0:\n self.path = QtGui.QPainterPath()\n else:\n self.path = self.generatePath(*self.getData())\n self.fillPath = None\n self._mouseShape = None\n\n return self.path\n\n @debug.warnOnException ## raising an exception here causes crash\n def paint(self, p, opt, widget):\n profiler = debug.Profiler()\n if self.xData is None or len(self.xData) == 0:\n return\n\n if HAVE_OPENGL and getConfigOption('enableExperimental') and isinstance(widget, QtOpenGL.QGLWidget):\n self.paintGL(p, opt, widget)\n return\n\n x = None\n y = None\n path = self.getPath()\n profiler('generate path')\n\n if self._exportOpts is not False:\n aa = self._exportOpts.get('antialias', True)\n else:\n aa = self.opts['antialias']\n\n p.setRenderHint(p.Antialiasing, aa)\n\n cmode = self.opts['compositionMode']\n if cmode is not None:\n p.setCompositionMode(cmode)\n\n if self.opts['brush'] is not None and self.opts['fillLevel'] is not None:\n if self.fillPath is None:\n if x is None:\n x,y = self.getData()\n p2 = QtGui.QPainterPath(self.path)\n if self.opts['fillLevel'] != 'enclosed':\n p2.lineTo(x[-1], self.opts['fillLevel'])\n p2.lineTo(x[0], self.opts['fillLevel'])\n p2.lineTo(x[0], y[0])\n p2.closeSubpath()\n self.fillPath = p2\n\n profiler('generate fill path')\n p.fillPath(self.fillPath, self.opts['brush'])\n profiler('draw fill path')\n\n sp = self.opts['shadowPen']\n cp = self.opts['pen']\n\n ## Copy pens and apply alpha adjustment\n #sp = QtGui.QPen(self.opts['shadowPen'])\n #cp = QtGui.QPen(self.opts['pen'])\n #for pen in [sp, cp]:\n #if pen is None:\n #continue\n #c = pen.color()\n #c.setAlpha(c.alpha() * self.opts['alphaHint'])\n #pen.setColor(c)\n ##pen.setCosmetic(True)\n\n if sp is not None and sp.style() != QtCore.Qt.NoPen:\n p.setPen(sp)\n p.drawPath(path)\n p.setPen(cp)\n if self.opts['fillOutline'] and self.fillPath is not None:\n p.drawPath(self.fillPath)\n else:\n p.drawPath(path)\n profiler('drawPath')\n\n #print \"Render hints:\", int(p.renderHints())\n #p.setPen(QtGui.QPen(QtGui.QColor(255,0,0)))\n #p.drawRect(self.boundingRect())\n\n def paintGL(self, p, opt, widget):\n p.beginNativePainting()\n import OpenGL.GL as gl\n\n ## set clipping viewport\n view = self.getViewBox()\n if view is not None:\n rect = view.mapRectToItem(self, view.boundingRect())\n #gl.glViewport(int(rect.x()), int(rect.y()), int(rect.width()), int(rect.height()))\n\n #gl.glTranslate(-rect.x(), -rect.y(), 0)\n\n gl.glEnable(gl.GL_STENCIL_TEST)\n gl.glColorMask(gl.GL_FALSE, gl.GL_FALSE, gl.GL_FALSE, gl.GL_FALSE) # disable drawing to frame buffer\n gl.glDepthMask(gl.GL_FALSE) # disable drawing to depth buffer\n gl.glStencilFunc(gl.GL_NEVER, 1, 0xFF)\n gl.glStencilOp(gl.GL_REPLACE, gl.GL_KEEP, gl.GL_KEEP)\n\n ## draw stencil pattern\n gl.glStencilMask(0xFF)\n gl.glClear(gl.GL_STENCIL_BUFFER_BIT)\n gl.glBegin(gl.GL_TRIANGLES)\n gl.glVertex2f(rect.x(), rect.y())\n gl.glVertex2f(rect.x()+rect.width(), rect.y())\n gl.glVertex2f(rect.x(), rect.y()+rect.height())\n gl.glVertex2f(rect.x()+rect.width(), rect.y()+rect.height())\n gl.glVertex2f(rect.x()+rect.width(), rect.y())\n gl.glVertex2f(rect.x(), rect.y()+rect.height())\n gl.glEnd()\n\n gl.glColorMask(gl.GL_TRUE, gl.GL_TRUE, gl.GL_TRUE, gl.GL_TRUE)\n gl.glDepthMask(gl.GL_TRUE)\n gl.glStencilMask(0x00)\n gl.glStencilFunc(gl.GL_EQUAL, 1, 0xFF)\n\n try:\n x, y = self.getData()\n pos = np.empty((len(x), 2))\n pos[:,0] = x\n pos[:,1] = y\n gl.glEnableClientState(gl.GL_VERTEX_ARRAY)\n try:\n gl.glVertexPointerf(pos)\n pen = fn.mkPen(self.opts['pen'])\n color = pen.color()\n gl.glColor4f(color.red()/255., color.green()/255., color.blue()/255., color.alpha()/255.)\n width = pen.width()\n if pen.isCosmetic() and width < 1:\n width = 1\n gl.glPointSize(width)\n gl.glEnable(gl.GL_LINE_SMOOTH)\n gl.glEnable(gl.GL_BLEND)\n gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)\n gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)\n gl.glDrawArrays(gl.GL_LINE_STRIP, 0, int(pos.size / pos.shape[-1]))\n finally:\n gl.glDisableClientState(gl.GL_VERTEX_ARRAY)\n finally:\n p.endNativePainting()\n\n def clear(self):\n self.xData = None ## raw values\n self.yData = None\n self.xDisp = None ## display values (after log / fft)\n self.yDisp = None\n self.path = None\n self.fillPath = None\n self._mouseShape = None\n self._mouseBounds = None\n self._boundsCache = [None, None]\n #del self.xData, self.yData, self.xDisp, self.yDisp, self.path\n\n def mouseShape(self):\n \"\"\"\n Return a QPainterPath representing the clickable shape of the curve\n\n \"\"\"\n if self._mouseShape is None:\n view = self.getViewBox()\n if view is None:\n return QtGui.QPainterPath()\n stroker = QtGui.QPainterPathStroker()\n path = self.getPath()\n path = self.mapToItem(view, path)\n stroker.setWidth(self.opts['mouseWidth'])\n mousePath = stroker.createStroke(path)\n self._mouseShape = self.mapFromItem(view, mousePath)\n return self._mouseShape\n\n def mouseClickEvent(self, ev):\n if not self.clickable or ev.button() != QtCore.Qt.LeftButton:\n return\n if self.mouseShape().contains(ev.pos()):\n ev.accept()\n self.sigClicked.emit(self)\n\n\n\nclass ROIPlotItem(PlotCurveItem):\n \"\"\"Plot curve that monitors an ROI and image for changes to automatically replot.\"\"\"\n def __init__(self, roi, data, img, axes=(0,1), xVals=None, color=None):\n self.roi = roi\n self.roiData = data\n self.roiImg = img\n self.axes = axes\n self.xVals = xVals\n PlotCurveItem.__init__(self, self.getRoiData(), x=self.xVals, color=color)\n #roi.connect(roi, QtCore.SIGNAL('regionChanged'), self.roiChangedEvent)\n roi.sigRegionChanged.connect(self.roiChangedEvent)\n #self.roiChangedEvent()\n\n def getRoiData(self):\n d = self.roi.getArrayRegion(self.roiData, self.roiImg, axes=self.axes)\n if d is None:\n return\n while d.ndim > 1:\n d = d.mean(axis=1)\n return d\n\n def roiChangedEvent(self):\n d = self.getRoiData()\n self.updateData(d, self.xVals)\n", "path": "pyqtgraph/graphicsItems/PlotCurveItem.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nfrom ..Qt import QtGui, QtCore\ntry:\n from ..Qt import QtOpenGL\n HAVE_OPENGL = True\nexcept:\n HAVE_OPENGL = False\n\nimport numpy as np\nfrom .GraphicsObject import GraphicsObject\nfrom .. import functions as fn\nfrom ..Point import Point\nimport struct, sys\nfrom .. import getConfigOption\nfrom .. import debug\n\n__all__ = ['PlotCurveItem']\nclass PlotCurveItem(GraphicsObject):\n\n\n \"\"\"\n Class representing a single plot curve. Instances of this class are created\n automatically as part of PlotDataItem; these rarely need to be instantiated\n directly.\n\n Features:\n\n - Fast data update\n - Fill under curve\n - Mouse interaction\n\n ==================== ===============================================\n **Signals:**\n sigPlotChanged(self) Emitted when the data being plotted has changed\n sigClicked(self) Emitted when the curve is clicked\n ==================== ===============================================\n \"\"\"\n\n sigPlotChanged = QtCore.Signal(object)\n sigClicked = QtCore.Signal(object)\n\n def __init__(self, *args, **kargs):\n \"\"\"\n Forwards all arguments to :func:`setData <pyqtgraph.PlotCurveItem.setData>`.\n\n Some extra arguments are accepted as well:\n\n ============== =======================================================\n **Arguments:**\n parent The parent GraphicsObject (optional)\n clickable If True, the item will emit sigClicked when it is\n clicked on. Defaults to False.\n ============== =======================================================\n \"\"\"\n GraphicsObject.__init__(self, kargs.get('parent', None))\n self.clear()\n\n ## this is disastrous for performance.\n #self.setCacheMode(QtGui.QGraphicsItem.DeviceCoordinateCache)\n\n self.metaData = {}\n self.opts = {\n 'shadowPen': None,\n 'fillLevel': None,\n 'fillOutline': False,\n 'brush': None,\n 'stepMode': False,\n 'name': None,\n 'antialias': getConfigOption('antialias'),\n 'connect': 'all',\n 'mouseWidth': 8, # width of shape responding to mouse click\n 'compositionMode': None,\n }\n if 'pen' not in kargs:\n self.opts['pen'] = fn.mkPen('w')\n self.setClickable(kargs.get('clickable', False))\n self.setData(*args, **kargs)\n\n def implements(self, interface=None):\n ints = ['plotData']\n if interface is None:\n return ints\n return interface in ints\n\n def name(self):\n return self.opts.get('name', None)\n\n def setClickable(self, s, width=None):\n \"\"\"Sets whether the item responds to mouse clicks.\n\n The *width* argument specifies the width in pixels orthogonal to the\n curve that will respond to a mouse click.\n \"\"\"\n self.clickable = s\n if width is not None:\n self.opts['mouseWidth'] = width\n self._mouseShape = None\n self._boundingRect = None\n\n def setCompositionMode(self, mode):\n \"\"\"Change the composition mode of the item (see QPainter::CompositionMode\n in the Qt documentation). This is useful when overlaying multiple items.\n\n ============================================ ============================================================\n **Most common arguments:**\n QtGui.QPainter.CompositionMode_SourceOver Default; image replaces the background if it\n is opaque. Otherwise, it uses the alpha channel to blend\n the image with the background.\n QtGui.QPainter.CompositionMode_Overlay The image color is mixed with the background color to\n reflect the lightness or darkness of the background.\n QtGui.QPainter.CompositionMode_Plus Both the alpha and color of the image and background pixels\n are added together.\n QtGui.QPainter.CompositionMode_Multiply The output is the image color multiplied by the background.\n ============================================ ============================================================\n \"\"\"\n self.opts['compositionMode'] = mode\n self.update()\n\n def getData(self):\n return self.xData, self.yData\n\n def dataBounds(self, ax, frac=1.0, orthoRange=None):\n ## Need this to run as fast as possible.\n ## check cache first:\n cache = self._boundsCache[ax]\n if cache is not None and cache[0] == (frac, orthoRange):\n return cache[1]\n\n (x, y) = self.getData()\n if x is None or len(x) == 0:\n return (None, None)\n\n if ax == 0:\n d = x\n d2 = y\n elif ax == 1:\n d = y\n d2 = x\n\n ## If an orthogonal range is specified, mask the data now\n if orthoRange is not None:\n mask = (d2 >= orthoRange[0]) * (d2 <= orthoRange[1])\n d = d[mask]\n #d2 = d2[mask]\n\n if len(d) == 0:\n return (None, None)\n\n ## Get min/max (or percentiles) of the requested data range\n if frac >= 1.0:\n # include complete data range\n # first try faster nanmin/max function, then cut out infs if needed.\n b = (np.nanmin(d), np.nanmax(d))\n if any(np.isinf(b)):\n mask = np.isfinite(d)\n d = d[mask]\n if len(d) == 0:\n return (None, None)\n b = (d.min(), d.max())\n\n elif frac <= 0.0:\n raise Exception(\"Value for parameter 'frac' must be > 0. (got %s)\" % str(frac))\n else:\n # include a percentile of data range\n mask = np.isfinite(d)\n d = d[mask]\n b = np.percentile(d, [50 * (1 - frac), 50 * (1 + frac)])\n\n ## adjust for fill level\n if ax == 1 and self.opts['fillLevel'] not in [None, 'enclosed']:\n b = (min(b[0], self.opts['fillLevel']), max(b[1], self.opts['fillLevel']))\n\n ## Add pen width only if it is non-cosmetic.\n pen = self.opts['pen']\n spen = self.opts['shadowPen']\n if not pen.isCosmetic():\n b = (b[0] - pen.widthF()*0.7072, b[1] + pen.widthF()*0.7072)\n if spen is not None and not spen.isCosmetic() and spen.style() != QtCore.Qt.NoPen:\n b = (b[0] - spen.widthF()*0.7072, b[1] + spen.widthF()*0.7072)\n\n self._boundsCache[ax] = [(frac, orthoRange), b]\n return b\n\n def pixelPadding(self):\n pen = self.opts['pen']\n spen = self.opts['shadowPen']\n w = 0\n if pen.isCosmetic():\n w += pen.widthF()*0.7072\n if spen is not None and spen.isCosmetic() and spen.style() != QtCore.Qt.NoPen:\n w = max(w, spen.widthF()*0.7072)\n if self.clickable:\n w = max(w, self.opts['mouseWidth']//2 + 1)\n return w\n\n def boundingRect(self):\n if self._boundingRect is None:\n (xmn, xmx) = self.dataBounds(ax=0)\n (ymn, ymx) = self.dataBounds(ax=1)\n if xmn is None or ymn is None:\n return QtCore.QRectF()\n\n px = py = 0.0\n pxPad = self.pixelPadding()\n if pxPad > 0:\n # determine length of pixel in local x, y directions\n px, py = self.pixelVectors()\n try:\n px = 0 if px is None else px.length()\n except OverflowError:\n px = 0\n try:\n py = 0 if py is None else py.length()\n except OverflowError:\n py = 0\n\n # return bounds expanded by pixel size\n px *= pxPad\n py *= pxPad\n #px += self._maxSpotWidth * 0.5\n #py += self._maxSpotWidth * 0.5\n self._boundingRect = QtCore.QRectF(xmn-px, ymn-py, (2*px)+xmx-xmn, (2*py)+ymx-ymn)\n\n return self._boundingRect\n\n def viewTransformChanged(self):\n self.invalidateBounds()\n self.prepareGeometryChange()\n\n #def boundingRect(self):\n #if self._boundingRect is None:\n #(x, y) = self.getData()\n #if x is None or y is None or len(x) == 0 or len(y) == 0:\n #return QtCore.QRectF()\n\n\n #if self.opts['shadowPen'] is not None:\n #lineWidth = (max(self.opts['pen'].width(), self.opts['shadowPen'].width()) + 1)\n #else:\n #lineWidth = (self.opts['pen'].width()+1)\n\n\n #pixels = self.pixelVectors()\n #if pixels == (None, None):\n #pixels = [Point(0,0), Point(0,0)]\n\n #xmin = x.min()\n #xmax = x.max()\n #ymin = y.min()\n #ymax = y.max()\n\n #if self.opts['fillLevel'] is not None:\n #ymin = min(ymin, self.opts['fillLevel'])\n #ymax = max(ymax, self.opts['fillLevel'])\n\n #xmin -= pixels[0].x() * lineWidth\n #xmax += pixels[0].x() * lineWidth\n #ymin -= abs(pixels[1].y()) * lineWidth\n #ymax += abs(pixels[1].y()) * lineWidth\n\n #self._boundingRect = QtCore.QRectF(xmin, ymin, xmax-xmin, ymax-ymin)\n #return self._boundingRect\n\n\n def invalidateBounds(self):\n self._boundingRect = None\n self._boundsCache = [None, None]\n\n def setPen(self, *args, **kargs):\n \"\"\"Set the pen used to draw the curve.\"\"\"\n self.opts['pen'] = fn.mkPen(*args, **kargs)\n self.invalidateBounds()\n self.update()\n\n def setShadowPen(self, *args, **kargs):\n \"\"\"Set the shadow pen used to draw behind the primary pen.\n This pen must have a larger width than the primary\n pen to be visible.\n \"\"\"\n self.opts['shadowPen'] = fn.mkPen(*args, **kargs)\n self.invalidateBounds()\n self.update()\n\n def setBrush(self, *args, **kargs):\n \"\"\"Set the brush used when filling the area under the curve\"\"\"\n self.opts['brush'] = fn.mkBrush(*args, **kargs)\n self.invalidateBounds()\n self.update()\n\n def setFillLevel(self, level):\n \"\"\"Set the level filled to when filling under the curve\"\"\"\n self.opts['fillLevel'] = level\n self.fillPath = None\n self.invalidateBounds()\n self.update()\n\n def setData(self, *args, **kargs):\n \"\"\"\n =============== ========================================================\n **Arguments:**\n x, y (numpy arrays) Data to show\n pen Pen to use when drawing. Any single argument accepted by\n :func:`mkPen <pyqtgraph.mkPen>` is allowed.\n shadowPen Pen for drawing behind the primary pen. Usually this\n is used to emphasize the curve by providing a\n high-contrast border. Any single argument accepted by\n :func:`mkPen <pyqtgraph.mkPen>` is allowed.\n fillLevel (float or None) Fill the area 'under' the curve to\n *fillLevel*\n fillOutline (bool) If True, an outline surrounding the *fillLevel*\n area is drawn.\n brush QBrush to use when filling. Any single argument accepted\n by :func:`mkBrush <pyqtgraph.mkBrush>` is allowed.\n antialias (bool) Whether to use antialiasing when drawing. This\n is disabled by default because it decreases performance.\n stepMode If True, two orthogonal lines are drawn for each sample\n as steps. This is commonly used when drawing histograms.\n Note that in this case, len(x) == len(y) + 1\n connect Argument specifying how vertexes should be connected\n by line segments. Default is \"all\", indicating full\n connection. \"pairs\" causes only even-numbered segments\n to be drawn. \"finite\" causes segments to be omitted if\n they are attached to nan or inf values. For any other\n connectivity, specify an array of boolean values.\n compositionMode See :func:`setCompositionMode\n <pyqtgraph.PlotCurveItem.setCompositionMode>`.\n =============== ========================================================\n\n If non-keyword arguments are used, they will be interpreted as\n setData(y) for a single argument and setData(x, y) for two\n arguments.\n\n\n \"\"\"\n self.updateData(*args, **kargs)\n\n def updateData(self, *args, **kargs):\n profiler = debug.Profiler()\n\n if 'compositionMode' in kargs:\n self.setCompositionMode(kargs['compositionMode'])\n\n if len(args) == 1:\n kargs['y'] = args[0]\n elif len(args) == 2:\n kargs['x'] = args[0]\n kargs['y'] = args[1]\n\n if 'y' not in kargs or kargs['y'] is None:\n kargs['y'] = np.array([])\n if 'x' not in kargs or kargs['x'] is None:\n kargs['x'] = np.arange(len(kargs['y']))\n\n for k in ['x', 'y']:\n data = kargs[k]\n if isinstance(data, list):\n data = np.array(data)\n kargs[k] = data\n if not isinstance(data, np.ndarray) or data.ndim > 1:\n raise Exception(\"Plot data must be 1D ndarray.\")\n if data.dtype.kind == 'c':\n raise Exception(\"Can not plot complex data types.\")\n\n profiler(\"data checks\")\n\n #self.setCacheMode(QtGui.QGraphicsItem.NoCache) ## Disabling and re-enabling the cache works around a bug in Qt 4.6 causing the cached results to display incorrectly\n ## Test this bug with test_PlotWidget and zoom in on the animated plot\n self.yData = kargs['y'].view(np.ndarray)\n self.xData = kargs['x'].view(np.ndarray)\n \n self.invalidateBounds()\n self.prepareGeometryChange()\n self.informViewBoundsChanged()\n\n profiler('copy')\n\n if 'stepMode' in kargs:\n self.opts['stepMode'] = kargs['stepMode']\n\n if self.opts['stepMode'] is True:\n if len(self.xData) != len(self.yData)+1: ## allow difference of 1 for step mode plots\n raise Exception(\"len(X) must be len(Y)+1 since stepMode=True (got %s and %s)\" % (self.xData.shape, self.yData.shape))\n else:\n if self.xData.shape != self.yData.shape: ## allow difference of 1 for step mode plots\n raise Exception(\"X and Y arrays must be the same shape--got %s and %s.\" % (self.xData.shape, self.yData.shape))\n\n self.path = None\n self.fillPath = None\n self._mouseShape = None\n #self.xDisp = self.yDisp = None\n\n if 'name' in kargs:\n self.opts['name'] = kargs['name']\n if 'connect' in kargs:\n self.opts['connect'] = kargs['connect']\n if 'pen' in kargs:\n self.setPen(kargs['pen'])\n if 'shadowPen' in kargs:\n self.setShadowPen(kargs['shadowPen'])\n if 'fillLevel' in kargs:\n self.setFillLevel(kargs['fillLevel'])\n if 'fillOutline' in kargs:\n self.opts['fillOutline'] = kargs['fillOutline']\n if 'brush' in kargs:\n self.setBrush(kargs['brush'])\n if 'antialias' in kargs:\n self.opts['antialias'] = kargs['antialias']\n\n\n profiler('set')\n self.update()\n profiler('update')\n self.sigPlotChanged.emit(self)\n profiler('emit')\n\n def generatePath(self, x, y):\n if self.opts['stepMode']:\n ## each value in the x/y arrays generates 2 points.\n x2 = np.empty((len(x),2), dtype=x.dtype)\n x2[:] = x[:,np.newaxis]\n if self.opts['fillLevel'] is None:\n x = x2.reshape(x2.size)[1:-1]\n y2 = np.empty((len(y),2), dtype=y.dtype)\n y2[:] = y[:,np.newaxis]\n y = y2.reshape(y2.size)\n else:\n ## If we have a fill level, add two extra points at either end\n x = x2.reshape(x2.size)\n y2 = np.empty((len(y)+2,2), dtype=y.dtype)\n y2[1:-1] = y[:,np.newaxis]\n y = y2.reshape(y2.size)[1:-1]\n y[0] = self.opts['fillLevel']\n y[-1] = self.opts['fillLevel']\n\n path = fn.arrayToQPath(x, y, connect=self.opts['connect'])\n\n return path\n\n\n def getPath(self):\n if self.path is None:\n x,y = self.getData()\n if x is None or len(x) == 0 or y is None or len(y) == 0:\n self.path = QtGui.QPainterPath()\n else:\n self.path = self.generatePath(*self.getData())\n self.fillPath = None\n self._mouseShape = None\n\n return self.path\n\n @debug.warnOnException ## raising an exception here causes crash\n def paint(self, p, opt, widget):\n profiler = debug.Profiler()\n if self.xData is None or len(self.xData) == 0:\n return\n\n if HAVE_OPENGL and getConfigOption('enableExperimental') and isinstance(widget, QtOpenGL.QGLWidget):\n self.paintGL(p, opt, widget)\n return\n\n x = None\n y = None\n path = self.getPath()\n profiler('generate path')\n\n if self._exportOpts is not False:\n aa = self._exportOpts.get('antialias', True)\n else:\n aa = self.opts['antialias']\n\n p.setRenderHint(p.Antialiasing, aa)\n\n cmode = self.opts['compositionMode']\n if cmode is not None:\n p.setCompositionMode(cmode)\n\n if self.opts['brush'] is not None and self.opts['fillLevel'] is not None:\n if self.fillPath is None:\n if x is None:\n x,y = self.getData()\n p2 = QtGui.QPainterPath(self.path)\n if self.opts['fillLevel'] != 'enclosed':\n p2.lineTo(x[-1], self.opts['fillLevel'])\n p2.lineTo(x[0], self.opts['fillLevel'])\n p2.lineTo(x[0], y[0])\n p2.closeSubpath()\n self.fillPath = p2\n\n profiler('generate fill path')\n p.fillPath(self.fillPath, self.opts['brush'])\n profiler('draw fill path')\n\n sp = self.opts['shadowPen']\n cp = self.opts['pen']\n\n ## Copy pens and apply alpha adjustment\n #sp = QtGui.QPen(self.opts['shadowPen'])\n #cp = QtGui.QPen(self.opts['pen'])\n #for pen in [sp, cp]:\n #if pen is None:\n #continue\n #c = pen.color()\n #c.setAlpha(c.alpha() * self.opts['alphaHint'])\n #pen.setColor(c)\n ##pen.setCosmetic(True)\n\n if sp is not None and sp.style() != QtCore.Qt.NoPen:\n p.setPen(sp)\n p.drawPath(path)\n p.setPen(cp)\n if self.opts['fillOutline'] and self.fillPath is not None:\n p.drawPath(self.fillPath)\n else:\n p.drawPath(path)\n profiler('drawPath')\n\n #print \"Render hints:\", int(p.renderHints())\n #p.setPen(QtGui.QPen(QtGui.QColor(255,0,0)))\n #p.drawRect(self.boundingRect())\n\n def paintGL(self, p, opt, widget):\n p.beginNativePainting()\n import OpenGL.GL as gl\n\n ## set clipping viewport\n view = self.getViewBox()\n if view is not None:\n rect = view.mapRectToItem(self, view.boundingRect())\n #gl.glViewport(int(rect.x()), int(rect.y()), int(rect.width()), int(rect.height()))\n\n #gl.glTranslate(-rect.x(), -rect.y(), 0)\n\n gl.glEnable(gl.GL_STENCIL_TEST)\n gl.glColorMask(gl.GL_FALSE, gl.GL_FALSE, gl.GL_FALSE, gl.GL_FALSE) # disable drawing to frame buffer\n gl.glDepthMask(gl.GL_FALSE) # disable drawing to depth buffer\n gl.glStencilFunc(gl.GL_NEVER, 1, 0xFF)\n gl.glStencilOp(gl.GL_REPLACE, gl.GL_KEEP, gl.GL_KEEP)\n\n ## draw stencil pattern\n gl.glStencilMask(0xFF)\n gl.glClear(gl.GL_STENCIL_BUFFER_BIT)\n gl.glBegin(gl.GL_TRIANGLES)\n gl.glVertex2f(rect.x(), rect.y())\n gl.glVertex2f(rect.x()+rect.width(), rect.y())\n gl.glVertex2f(rect.x(), rect.y()+rect.height())\n gl.glVertex2f(rect.x()+rect.width(), rect.y()+rect.height())\n gl.glVertex2f(rect.x()+rect.width(), rect.y())\n gl.glVertex2f(rect.x(), rect.y()+rect.height())\n gl.glEnd()\n\n gl.glColorMask(gl.GL_TRUE, gl.GL_TRUE, gl.GL_TRUE, gl.GL_TRUE)\n gl.glDepthMask(gl.GL_TRUE)\n gl.glStencilMask(0x00)\n gl.glStencilFunc(gl.GL_EQUAL, 1, 0xFF)\n\n try:\n x, y = self.getData()\n pos = np.empty((len(x), 2))\n pos[:,0] = x\n pos[:,1] = y\n gl.glEnableClientState(gl.GL_VERTEX_ARRAY)\n try:\n gl.glVertexPointerf(pos)\n pen = fn.mkPen(self.opts['pen'])\n color = pen.color()\n gl.glColor4f(color.red()/255., color.green()/255., color.blue()/255., color.alpha()/255.)\n width = pen.width()\n if pen.isCosmetic() and width < 1:\n width = 1\n gl.glPointSize(width)\n gl.glEnable(gl.GL_LINE_SMOOTH)\n gl.glEnable(gl.GL_BLEND)\n gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)\n gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)\n gl.glDrawArrays(gl.GL_LINE_STRIP, 0, int(pos.size / pos.shape[-1]))\n finally:\n gl.glDisableClientState(gl.GL_VERTEX_ARRAY)\n finally:\n p.endNativePainting()\n\n def clear(self):\n self.xData = None ## raw values\n self.yData = None\n self.xDisp = None ## display values (after log / fft)\n self.yDisp = None\n self.path = None\n self.fillPath = None\n self._mouseShape = None\n self._mouseBounds = None\n self._boundsCache = [None, None]\n #del self.xData, self.yData, self.xDisp, self.yDisp, self.path\n\n def mouseShape(self):\n \"\"\"\n Return a QPainterPath representing the clickable shape of the curve\n\n \"\"\"\n if self._mouseShape is None:\n view = self.getViewBox()\n if view is None:\n return QtGui.QPainterPath()\n stroker = QtGui.QPainterPathStroker()\n path = self.getPath()\n path = self.mapToItem(view, path)\n stroker.setWidth(self.opts['mouseWidth'])\n mousePath = stroker.createStroke(path)\n self._mouseShape = self.mapFromItem(view, mousePath)\n return self._mouseShape\n\n def mouseClickEvent(self, ev):\n if not self.clickable or ev.button() != QtCore.Qt.LeftButton:\n return\n if self.mouseShape().contains(ev.pos()):\n ev.accept()\n self.sigClicked.emit(self, ev)\n\n\n\nclass ROIPlotItem(PlotCurveItem):\n \"\"\"Plot curve that monitors an ROI and image for changes to automatically replot.\"\"\"\n def __init__(self, roi, data, img, axes=(0,1), xVals=None, color=None):\n self.roi = roi\n self.roiData = data\n self.roiImg = img\n self.axes = axes\n self.xVals = xVals\n PlotCurveItem.__init__(self, self.getRoiData(), x=self.xVals, color=color)\n #roi.connect(roi, QtCore.SIGNAL('regionChanged'), self.roiChangedEvent)\n roi.sigRegionChanged.connect(self.roiChangedEvent)\n #self.roiChangedEvent()\n\n def getRoiData(self):\n d = self.roi.getArrayRegion(self.roiData, self.roiImg, axes=self.axes)\n if d is None:\n return\n while d.ndim > 1:\n d = d.mean(axis=1)\n return d\n\n def roiChangedEvent(self):\n d = self.getRoiData()\n self.updateData(d, self.xVals)\n", "path": "pyqtgraph/graphicsItems/PlotCurveItem.py" } ]
diff --git a/pyqtgraph/graphicsItems/PlotCurveItem.py b/pyqtgraph/graphicsItems/PlotCurveItem.py index c3a58da2ed..b6c6d21653 100644 --- a/pyqtgraph/graphicsItems/PlotCurveItem.py +++ b/pyqtgraph/graphicsItems/PlotCurveItem.py @@ -613,7 +613,7 @@ def mouseClickEvent(self, ev): return if self.mouseShape().contains(ev.pos()): ev.accept() - self.sigClicked.emit(self) + self.sigClicked.emit(self, ev)
rasterio__rasterio-1692
more explicit NotImplementedError messages in read mode ? In wanting to set a GeoTIFF's CRS, I encountered [this](https://github.com/mapbox/rasterio/blob/master/rasterio/_base.pyx#L516) NotImplementedError when trying to run the following code: ``` with rasterio.open(filepath) as src: src.crs = "EPSG:3857" ``` Though in retrospect it is obvious the above will fail without explicitly specifying the proper mode , i.e. `'r+'` in this case, I was momentarily thrown off by the error and assumed something was wrong with my approach. Would a more explicit error message be useful here?
[ { "content": "\"\"\"Errors and Warnings.\"\"\"\n\nfrom click import FileError\n\n\nclass RasterioError(Exception):\n \"\"\"Root exception class\"\"\"\n\n\nclass WindowError(RasterioError):\n \"\"\"Raised when errors occur during window operations\"\"\"\n\n\nclass CRSError(ValueError):\n \"\"\"Raised when a CRS string or mapping is invalid or cannot serve\n to define a coordinate transformation.\"\"\"\n\n\nclass EnvError(RasterioError):\n \"\"\"Raised when the state of GDAL/AWS environment cannot be created\n or modified.\"\"\"\n\n\nclass DriverRegistrationError(ValueError):\n \"\"\"Raised when a format driver is requested but is not registered.\"\"\"\n\n\nclass FileOverwriteError(FileError):\n \"\"\"Raised when Rasterio's CLI refuses to clobber output files.\"\"\"\n\n def __init__(self, message):\n \"\"\"Raise FileOverwriteError with message as hint.\"\"\"\n super(FileOverwriteError, self).__init__('', hint=message)\n\n\nclass RasterioIOError(IOError):\n \"\"\"Raised when a dataset cannot be opened using one of the\n registered format drivers.\"\"\"\n\n\nclass NodataShadowWarning(UserWarning):\n \"\"\"Warn that a dataset's nodata attribute is shadowing its alpha band.\"\"\"\n\n def __str__(self):\n return (\"The dataset's nodata attribute is shadowing \"\n \"the alpha band. All masks will be determined \"\n \"by the nodata attribute\")\n\n\nclass NotGeoreferencedWarning(UserWarning):\n \"\"\"Warn that a dataset isn't georeferenced.\"\"\"\n\n\nclass GDALBehaviorChangeException(RuntimeError):\n \"\"\"Raised when GDAL's behavior differs from the given arguments. For\n example, antimeridian cutting is always on as of GDAL 2.2.0. Users\n expecting it to be off will be presented with a MultiPolygon when the\n rest of their code expects a Polygon.\n\n # Raises an exception on GDAL >= 2.2.0\n rasterio.warp.transform_geometry(\n src_crs, dst_crs, antimeridian_cutting=False)\n \"\"\"\n\n\nclass GDALOptionNotImplementedError(RasterioError):\n \"\"\"A dataset opening or dataset creation option can't be supported\n\n This will be raised from Rasterio's shim modules. For example, when\n a user passes arguments to open_dataset() that can't be evaluated\n by GDAL 1.x.\n \"\"\"\n\nclass GDALVersionError(RasterioError):\n \"\"\"Raised if the runtime version of GDAL does not meet the required\n version of GDAL.\"\"\"\n\n\nclass WindowEvaluationError(ValueError):\n \"\"\"Raised when window evaluation fails\"\"\"\n\n\nclass RasterioDeprecationWarning(UserWarning):\n \"\"\"Rasterio module deprecations\"\"\"\n\n\nclass RasterBlockError(RasterioError):\n \"\"\"Raised when raster block access fails\"\"\"\n\n\nclass BandOverviewError(UserWarning):\n \"\"\"Raised when a band overview access fails.\"\"\"\n\n\nclass WarpOptionsError(RasterioError):\n \"\"\"Raised when options for a warp operation are invalid\"\"\"\n\n\nclass UnsupportedOperation(RasterioError):\n \"\"\"Raised when reading from a file opened in 'w' mode\"\"\"\n\n\nclass OverviewCreationError(RasterioError):\n \"\"\"Raised when creation of an overview fails\"\"\"\n", "path": "rasterio/errors.py" } ]
[ { "content": "\"\"\"Errors and Warnings.\"\"\"\n\nfrom click import FileError\n\n\nclass RasterioError(Exception):\n \"\"\"Root exception class\"\"\"\n\n\nclass WindowError(RasterioError):\n \"\"\"Raised when errors occur during window operations\"\"\"\n\n\nclass CRSError(ValueError):\n \"\"\"Raised when a CRS string or mapping is invalid or cannot serve\n to define a coordinate transformation.\"\"\"\n\n\nclass EnvError(RasterioError):\n \"\"\"Raised when the state of GDAL/AWS environment cannot be created\n or modified.\"\"\"\n\n\nclass DriverRegistrationError(ValueError):\n \"\"\"Raised when a format driver is requested but is not registered.\"\"\"\n\n\nclass FileOverwriteError(FileError):\n \"\"\"Raised when Rasterio's CLI refuses to clobber output files.\"\"\"\n\n def __init__(self, message):\n \"\"\"Raise FileOverwriteError with message as hint.\"\"\"\n super(FileOverwriteError, self).__init__('', hint=message)\n\n\nclass RasterioIOError(IOError):\n \"\"\"Raised when a dataset cannot be opened using one of the\n registered format drivers.\"\"\"\n\n\nclass NodataShadowWarning(UserWarning):\n \"\"\"Warn that a dataset's nodata attribute is shadowing its alpha band.\"\"\"\n\n def __str__(self):\n return (\"The dataset's nodata attribute is shadowing \"\n \"the alpha band. All masks will be determined \"\n \"by the nodata attribute\")\n\n\nclass NotGeoreferencedWarning(UserWarning):\n \"\"\"Warn that a dataset isn't georeferenced.\"\"\"\n\n\nclass GDALBehaviorChangeException(RuntimeError):\n \"\"\"Raised when GDAL's behavior differs from the given arguments. For\n example, antimeridian cutting is always on as of GDAL 2.2.0. Users\n expecting it to be off will be presented with a MultiPolygon when the\n rest of their code expects a Polygon.\n\n # Raises an exception on GDAL >= 2.2.0\n rasterio.warp.transform_geometry(\n src_crs, dst_crs, antimeridian_cutting=False)\n \"\"\"\n\n\nclass GDALOptionNotImplementedError(RasterioError):\n \"\"\"A dataset opening or dataset creation option can't be supported\n\n This will be raised from Rasterio's shim modules. For example, when\n a user passes arguments to open_dataset() that can't be evaluated\n by GDAL 1.x.\n \"\"\"\n\nclass GDALVersionError(RasterioError):\n \"\"\"Raised if the runtime version of GDAL does not meet the required\n version of GDAL.\"\"\"\n\n\nclass WindowEvaluationError(ValueError):\n \"\"\"Raised when window evaluation fails\"\"\"\n\n\nclass RasterioDeprecationWarning(UserWarning):\n \"\"\"Rasterio module deprecations\"\"\"\n\n\nclass RasterBlockError(RasterioError):\n \"\"\"Raised when raster block access fails\"\"\"\n\n\nclass BandOverviewError(UserWarning):\n \"\"\"Raised when a band overview access fails.\"\"\"\n\n\nclass WarpOptionsError(RasterioError):\n \"\"\"Raised when options for a warp operation are invalid\"\"\"\n\n\nclass UnsupportedOperation(RasterioError):\n \"\"\"Raised when reading from a file opened in 'w' mode\"\"\"\n\n\nclass OverviewCreationError(RasterioError):\n \"\"\"Raised when creation of an overview fails\"\"\"\n\n\nclass DatasetAttributeError(RasterioError, NotImplementedError):\n \"\"\"Raised when dataset attributes are misused\"\"\"\n", "path": "rasterio/errors.py" } ]
diff --git a/CHANGES.txt b/CHANGES.txt index cf785e5e0..6216af653 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -4,6 +4,9 @@ Changes 1.0.23 (TBD) ------------ +- Attempts to set attributes of datasets opened in "r" mode now raise a custom + DatasetAttributeError. This exception derives from both RasterioError and + NotImplementedError, which maintains backwards compatibility (#1676). - Block sizes are no longer guarded when creating untiled datasets (#1689). - CRS objects are now hashable and equivalent CRS objects have the same hash value (#1684). diff --git a/rasterio/_base.pyx b/rasterio/_base.pyx index 91d778a11..2a828386e 100644 --- a/rasterio/_base.pyx +++ b/rasterio/_base.pyx @@ -26,6 +26,7 @@ from rasterio.enums import ( ColorInterp, Compression, Interleaving, MaskFlags, PhotometricInterp) from rasterio.env import Env, env_ctx_if_needed from rasterio.errors import ( + DatasetAttributeError, RasterioIOError, CRSError, DriverRegistrationError, NotGeoreferencedWarning, RasterBlockError, BandOverviewError) from rasterio.profiles import Profile @@ -450,7 +451,7 @@ cdef class DatasetBase(object): return self.get_nodatavals() def _set_nodatavals(self, value): - raise NotImplementedError + raise DatasetAttributeError("read-only attribute") property nodata: """The dataset's single nodata value @@ -513,7 +514,7 @@ cdef class DatasetBase(object): for x in self._mask_flags()) def _set_crs(self, value): - raise NotImplementedError + raise DatasetAttributeError("read-only attribute") property crs: """The dataset's coordinate reference system @@ -533,16 +534,16 @@ cdef class DatasetBase(object): self._set_crs(value) def _set_all_descriptions(self, value): - raise NotImplementedError + raise DatasetAttributeError("read-only attribute") def _set_all_scales(self, value): - raise NotImplementedError + raise DatasetAttributeError("read-only attribute") def _set_all_offsets(self, value): - raise NotImplementedError + raise DatasetAttributeError("read-only attribute") def _set_all_units(self, value): - raise NotImplementedError + raise DatasetAttributeError("read-only attribute") property descriptions: """Descriptions for each dataset band @@ -563,7 +564,7 @@ cdef class DatasetBase(object): self._set_all_descriptions(value) def write_transform(self, value): - raise NotImplementedError + raise DatasetAttributeError("read-only attribute") property transform: """The dataset's georeferencing transformation matrix @@ -1184,7 +1185,7 @@ cdef class DatasetBase(object): for i in range(num_gcps)], crs) def _set_gcps(self, values): - raise NotImplementedError + raise DatasetAttributeError("read-only attribute") property gcps: """ground control points and their coordinate reference system. diff --git a/rasterio/errors.py b/rasterio/errors.py index 97f63303f..cf91f8456 100644 --- a/rasterio/errors.py +++ b/rasterio/errors.py @@ -102,3 +102,7 @@ class UnsupportedOperation(RasterioError): class OverviewCreationError(RasterioError): """Raised when creation of an overview fails""" + + +class DatasetAttributeError(RasterioError, NotImplementedError): + """Raised when dataset attributes are misused""" diff --git a/tests/test_dataset.py b/tests/test_dataset.py index 2b106caf5..0086b7e99 100644 --- a/tests/test_dataset.py +++ b/tests/test_dataset.py @@ -11,7 +11,7 @@ import rasterio from rasterio.enums import Compression -from rasterio.errors import RasterioIOError +from rasterio.errors import RasterioIOError, DatasetAttributeError from rasterio.transform import Affine @@ -65,3 +65,16 @@ def test_tiled_dataset_blocksize_guard(tmp_path): rasterio.open( tmp_file, "w", driver="GTiff", count=1, height=13, width=13, dtype="uint8", crs="epsg:3857", transform=Affine.identity(), tiled=True, blockxsize=256, blockysize=256) + +def test_dataset_readonly_attributes(path_rgb_byte_tif): + """Attempts to set read-only attributes fail with DatasetAttributeError""" + with pytest.raises(DatasetAttributeError): + with rasterio.open(path_rgb_byte_tif) as dataset: + dataset.crs = "foo" + + +def test_dataset_readonly_attributes(path_rgb_byte_tif): + """Attempts to set read-only attributes still fail with NotImplementedError""" + with pytest.raises(NotImplementedError): + with rasterio.open(path_rgb_byte_tif) as dataset: + dataset.crs = "foo"
pyjanitor-devs__pyjanitor-1175
[BUG] pandas 1.5.x `_MergeOperation` doesn't have `copy` keyword anymore Raised errors from [the latest testing env](https://github.com/pyjanitor-devs/pyjanitor/actions/runs/3255090961/jobs/5344044127#step:5:1909) which pandas version is 1.5.0. The pandas version of [environment-dev.yml](https://github.com/pyjanitor-devs/pyjanitor/blob/dev/environment-dev.yml#L36) is 1.3.5, so it would raise any errors. ```python ___________________________ test_extension_array_eq ____________________________ [gw1] linux -- Python 3.10.6 /usr/share/miniconda3/envs/test/bin/python def test_extension_array_eq(): """Extension arrays when matching on equality.""" df1 = pd.DataFrame( {"id": [1, 1, 1, 2, 2, 3], "value_1": [2, 5, 7, 1, 3, 4]} ) df1 = df1.astype({"value_1": "Int64"}) df2 = pd.DataFrame( { "id": [1, 1, 1, 1, 2, 2, 2, 3], "value_2A": [0, 3, 7, 12, 0, 2, 3, 1], "value_2B": [1, 5, 9, 15, 1, 4, 6, 3], } ) df2 = df2.astype({"value_2A": "Int64"}) > expected = df1.conditional_join( df2, ("id", "id", "=="), ("value_1", "value_2A", ">"), use_numba=False, sort_by_appearance=False, ) tests/functions/test_conditional_join.py:2962: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /usr/share/miniconda3/envs/test/lib/python3.10/site-packages/pandas_flavor/register.py:29: in __call__ return method(self._obj, *args, **kwargs) janitor/functions/conditional_join.py:150: in conditional_join return _conditional_join_compute( janitor/functions/conditional_join.py:419: in _conditional_join_compute result = _multiple_conditional_join_eq(df, right, conditions, keep) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ df = id value_1 0 1 2 1 1 5 2 1 7 3 2 1 4 2 3 5 3 4 right = id value_2A value_2B 0 1 0 1 1 1 3 5 2 1 7 9 3 1 12 15 4 2 0 1 5 2 2 4 6 2 3 6 7 3 1 3 conditions = (('id', 'id', '=='), ('value_1', 'value_2A', '>')), keep = 'all' def _multiple_conditional_join_eq( df: pd.DataFrame, right: pd.DataFrame, conditions: list, keep: str ) -> tuple: """ Get indices for multiple conditions, if any of the conditions has an `==` operator. Returns a tuple of (df_index, right_index) """ eqs = [ (left_on, right_on) for left_on, right_on, op in conditions if op == _JoinOperator.STRICTLY_EQUAL.value ] left_on, right_on = zip(*eqs) left_on = [*left_on] right_on = [*right_on] rest = ( (df[left_on], right[right_on], op) for left_on, right_on, op in conditions if op != _JoinOperator.STRICTLY_EQUAL.value ) > left_index, right_index = _MergeOperation( df, right, left_on=left_on, right_on=right_on, sort=False, copy=False, )._get_join_indexers() E TypeError: _MergeOperation.__init__() got an unexpected keyword argument 'copy' janitor/functions/conditional_join.py:899: TypeError ``` closed to #1143
[ { "content": "import operator\nfrom enum import Enum\nfrom typing import Union, Any, Optional, Hashable, Literal\n\nimport numpy as np\nimport pandas as pd\nimport pandas_flavor as pf\nfrom pandas.core.dtypes.common import (\n is_categorical_dtype,\n is_datetime64_dtype,\n is_dtype_equal,\n is_extension_array_dtype,\n is_numeric_dtype,\n is_string_dtype,\n)\n\nfrom pandas.core.reshape.merge import _MergeOperation\n\nfrom janitor.utils import check, check_column\nfrom janitor.functions.utils import _convert_to_numpy_array\n\n\[email protected]_dataframe_method\ndef conditional_join(\n df: pd.DataFrame,\n right: Union[pd.DataFrame, pd.Series],\n *conditions,\n how: Literal[\"inner\", \"left\", \"right\"] = \"inner\",\n sort_by_appearance: bool = False,\n df_columns: Optional[Any] = None,\n right_columns: Optional[Any] = None,\n keep: Literal[\"first\", \"last\", \"all\"] = \"all\",\n use_numba: bool = False,\n) -> pd.DataFrame:\n \"\"\"\n\n The conditional_join function operates similarly to `pd.merge`,\n but allows joins on inequality operators,\n or a combination of equi and non-equi joins.\n\n Joins solely on equality are not supported.\n\n If the join is solely on equality, `pd.merge` function\n covers that; if you are interested in nearest joins, or rolling joins,\n then `pd.merge_asof` covers that.\n There is also pandas' IntervalIndex, which is efficient for range joins,\n especially if the intervals do not overlap.\n\n Column selection in `df_columns` and `right_columns` is possible using the\n [`select_columns`][janitor.functions.select_columns.select_columns] syntax.\n\n For strictly non-equi joins,\n involving either `>`, `<`, `>=`, `<=` operators,\n performance could be improved by setting `use_numba` to `True`.\n This assumes that `numba` is installed.\n\n To preserve row order, set `sort_by_appearance` to `True`.\n\n This function returns rows, if any, where values from `df` meet the\n condition(s) for values from `right`. The conditions are passed in\n as a variable argument of tuples, where the tuple is of\n the form `(left_on, right_on, op)`; `left_on` is the column\n label from `df`, `right_on` is the column label from `right`,\n while `op` is the operator. For multiple conditions, the and(`&`)\n operator is used to combine the results of the individual conditions.\n\n The operator can be any of `==`, `!=`, `<=`, `<`, `>=`, `>`.\n\n The join is done only on the columns.\n MultiIndex columns are not supported.\n\n For non-equi joins, only numeric and date columns are supported.\n\n Only `inner`, `left`, and `right` joins are supported.\n\n If the columns from `df` and `right` have nothing in common,\n a single index column is returned; else, a MultiIndex column\n is returned.\n\n Example:\n\n >>> import pandas as pd\n >>> import janitor\n >>> df1 = pd.DataFrame({\"value_1\": [2, 5, 7, 1, 3, 4]})\n >>> df2 = pd.DataFrame({\"value_2A\": [0, 3, 7, 12, 0, 2, 3, 1],\n ... \"value_2B\": [1, 5, 9, 15, 1, 4, 6, 3],\n ... })\n >>> df1\n value_1\n 0 2\n 1 5\n 2 7\n 3 1\n 4 3\n 5 4\n >>> df2\n value_2A value_2B\n 0 0 1\n 1 3 5\n 2 7 9\n 3 12 15\n 4 0 1\n 5 2 4\n 6 3 6\n 7 1 3\n >>> df1.conditional_join(\n ... df2,\n ... (\"value_1\", \"value_2A\", \">\"),\n ... (\"value_1\", \"value_2B\", \"<\")\n ... )\n value_1 value_2A value_2B\n 0 2 1 3\n 1 5 3 6\n 2 3 2 4\n 3 4 3 5\n 4 4 3 6\n\n\n :param df: A pandas DataFrame.\n :param right: Named Series or DataFrame to join to.\n :param conditions: Variable argument of tuple(s) of the form\n `(left_on, right_on, op)`, where `left_on` is the column\n label from `df`, `right_on` is the column label from `right`,\n while `op` is the operator. The operator can be any of\n `==`, `!=`, `<=`, `<`, `>=`, `>`. For multiple conditions,\n the and(`&`) operator is used to combine the results\n of the individual conditions.\n :param how: Indicates the type of join to be performed.\n It can be one of `inner`, `left`, `right`.\n Full outer join is not supported. Defaults to `inner`.\n :param sort_by_appearance: Default is `False`.\n This is useful for scenarios where the user wants\n the original order maintained.\n If `True` and `how = left`, the row order from the left dataframe\n is preserved; if `True` and `how = right`, the row order\n from the right dataframe is preserved.\n :param df_columns: Columns to select from `df`.\n It can be a single column or a list of columns.\n It is also possible to rename the output columns via a dictionary.\n :param right_columns: Columns to select from `right`.\n It can be a single column or a list of columns.\n It is also possible to rename the output columns via a dictionary.\n :param keep: Choose whether to return the first match,\n last match or all matches. Default is `all`.\n :param use_numba: Use numba, if installed, to accelerate the computation.\n Default is `False`.\n :returns: A pandas DataFrame of the two merged Pandas objects.\n \"\"\"\n\n return _conditional_join_compute(\n df,\n right,\n conditions,\n how,\n sort_by_appearance,\n df_columns,\n right_columns,\n keep,\n use_numba,\n )\n\n\nclass _JoinOperator(Enum):\n \"\"\"\n List of operators used in conditional_join.\n \"\"\"\n\n GREATER_THAN = \">\"\n LESS_THAN = \"<\"\n GREATER_THAN_OR_EQUAL = \">=\"\n LESS_THAN_OR_EQUAL = \"<=\"\n STRICTLY_EQUAL = \"==\"\n NOT_EQUAL = \"!=\"\n\n\noperator_map = {\n _JoinOperator.STRICTLY_EQUAL.value: operator.eq,\n _JoinOperator.LESS_THAN.value: operator.lt,\n _JoinOperator.LESS_THAN_OR_EQUAL.value: operator.le,\n _JoinOperator.GREATER_THAN.value: operator.gt,\n _JoinOperator.GREATER_THAN_OR_EQUAL.value: operator.ge,\n _JoinOperator.NOT_EQUAL.value: operator.ne,\n}\n\n\nless_than_join_types = {\n _JoinOperator.LESS_THAN.value,\n _JoinOperator.LESS_THAN_OR_EQUAL.value,\n}\ngreater_than_join_types = {\n _JoinOperator.GREATER_THAN.value,\n _JoinOperator.GREATER_THAN_OR_EQUAL.value,\n}\n\n\ndef _check_operator(op: str):\n \"\"\"\n Check that operator is one of\n `>`, `>=`, `==`, `!=`, `<`, `<=`.\n\n Used in `conditional_join`.\n \"\"\"\n sequence_of_operators = {op.value for op in _JoinOperator}\n if op not in sequence_of_operators:\n raise ValueError(\n \"The conditional join operator \"\n f\"should be one of {sequence_of_operators}\"\n )\n\n\ndef _conditional_join_preliminary_checks(\n df: pd.DataFrame,\n right: Union[pd.DataFrame, pd.Series],\n conditions: tuple,\n how: str,\n sort_by_appearance: bool,\n df_columns: Any,\n right_columns: Any,\n keep: str,\n use_numba: bool,\n) -> tuple:\n \"\"\"\n Preliminary checks for conditional_join are conducted here.\n\n Checks include differences in number of column levels,\n length of conditions, existence of columns in dataframe, etc.\n \"\"\"\n\n check(\"right\", right, [pd.DataFrame, pd.Series])\n\n df = df[:]\n right = right[:]\n\n if isinstance(right, pd.Series):\n if not right.name:\n raise ValueError(\n \"Unnamed Series are not supported for conditional_join.\"\n )\n right = right.to_frame()\n\n if df.columns.nlevels != right.columns.nlevels:\n raise ValueError(\n \"The number of column levels \"\n \"from the left and right frames must match. \"\n \"The number of column levels from the left dataframe \"\n f\"is {df.columns.nlevels}, while the number of column levels \"\n f\"from the right dataframe is {right.columns.nlevels}.\"\n )\n\n if not conditions:\n raise ValueError(\"Kindly provide at least one join condition.\")\n\n for condition in conditions:\n check(\"condition\", condition, [tuple])\n len_condition = len(condition)\n if len_condition != 3:\n raise ValueError(\n \"condition should have only three elements; \"\n f\"{condition} however is of length {len_condition}.\"\n )\n\n for left_on, right_on, op in conditions:\n check(\"left_on\", left_on, [Hashable])\n check(\"right_on\", right_on, [Hashable])\n check(\"operator\", op, [str])\n check_column(df, [left_on])\n check_column(right, [right_on])\n _check_operator(op)\n\n if all(\n (op == _JoinOperator.STRICTLY_EQUAL.value for *_, op in conditions)\n ):\n raise ValueError(\"Equality only joins are not supported.\")\n\n check(\"how\", how, [str])\n\n if how not in {\"inner\", \"left\", \"right\"}:\n raise ValueError(\"'how' should be one of 'inner', 'left' or 'right'.\")\n\n check(\"sort_by_appearance\", sort_by_appearance, [bool])\n\n if (df.columns.nlevels > 1) and (\n isinstance(df_columns, dict) or isinstance(right_columns, dict)\n ):\n raise ValueError(\n \"Column renaming with a dictionary is not supported \"\n \"for MultiIndex columns.\"\n )\n\n check(\"keep\", keep, [str])\n\n if keep not in {\"all\", \"first\", \"last\"}:\n raise ValueError(\"'keep' should be one of 'all', 'first', 'last'.\")\n\n check(\"use_numba\", use_numba, [bool])\n\n return (\n df,\n right,\n conditions,\n how,\n sort_by_appearance,\n df_columns,\n right_columns,\n keep,\n use_numba,\n )\n\n\ndef _conditional_join_type_check(\n left_column: pd.Series, right_column: pd.Series, op: str\n) -> None:\n \"\"\"\n Raise error if column type is not any of numeric or datetime or string.\n \"\"\"\n\n permitted_types = {\n is_datetime64_dtype,\n is_numeric_dtype,\n is_string_dtype,\n is_categorical_dtype,\n }\n for func in permitted_types:\n if func(left_column):\n break\n else:\n raise ValueError(\n \"conditional_join only supports \"\n \"string, category, numeric, or date dtypes (without timezone) - \"\n f\"'{left_column.name} is of type {left_column.dtype}.\"\n )\n\n lk_is_cat = is_categorical_dtype(left_column)\n rk_is_cat = is_categorical_dtype(right_column)\n\n if lk_is_cat & rk_is_cat:\n if not left_column.array._categories_match_up_to_permutation(\n right_column.array\n ):\n raise ValueError(\n f\"'{left_column.name}' and '{right_column.name}' \"\n \"should have the same categories, and the same order.\"\n )\n elif not is_dtype_equal(left_column, right_column):\n raise ValueError(\n f\"Both columns should have the same type - \"\n f\"'{left_column.name}' has {left_column.dtype} type;\"\n f\"'{right_column.name}' has {right_column.dtype} type.\"\n )\n\n if (op in less_than_join_types.union(greater_than_join_types)) & (\n (is_string_dtype(left_column) | is_categorical_dtype(left_column))\n ):\n raise ValueError(\n \"non-equi joins are supported \"\n \"only for datetime and numeric dtypes. \"\n f\"{left_column.name} in condition \"\n f\"({left_column.name}, {right_column.name}, {op}) \"\n f\"has a dtype {left_column.dtype}.\"\n )\n\n return None\n\n\ndef _conditional_join_compute(\n df: pd.DataFrame,\n right: pd.DataFrame,\n conditions: list,\n how: str,\n sort_by_appearance: bool,\n df_columns: Any,\n right_columns: Any,\n keep: str,\n use_numba: bool,\n) -> pd.DataFrame:\n \"\"\"\n This is where the actual computation\n for the conditional join takes place.\n A pandas DataFrame is returned.\n \"\"\"\n\n (\n df,\n right,\n conditions,\n how,\n sort_by_appearance,\n df_columns,\n right_columns,\n keep,\n use_numba,\n ) = _conditional_join_preliminary_checks(\n df,\n right,\n conditions,\n how,\n sort_by_appearance,\n df_columns,\n right_columns,\n keep,\n use_numba,\n )\n\n eq_check = False\n le_lt_check = False\n for condition in conditions:\n left_on, right_on, op = condition\n _conditional_join_type_check(df[left_on], right[right_on], op)\n if op == _JoinOperator.STRICTLY_EQUAL.value:\n eq_check = True\n elif op in less_than_join_types.union(greater_than_join_types):\n le_lt_check = True\n\n df.index = range(len(df))\n right.index = range(len(right))\n\n if len(conditions) > 1:\n if eq_check:\n result = _multiple_conditional_join_eq(df, right, conditions, keep)\n elif le_lt_check:\n result = _multiple_conditional_join_le_lt(\n df, right, conditions, keep, use_numba\n )\n else:\n result = _multiple_conditional_join_ne(\n df, right, conditions, keep, use_numba\n )\n else:\n left_on, right_on, op = conditions[0]\n result = _generic_func_cond_join(\n df[left_on],\n right[right_on],\n op,\n False,\n keep,\n use_numba,\n )\n\n if result is None:\n result = np.array([], dtype=np.intp), np.array([], dtype=np.intp)\n\n return _create_frame(\n df,\n right,\n *result,\n how,\n sort_by_appearance,\n df_columns,\n right_columns,\n )\n\n\ndef _keep_output(keep: str, left: np.ndarray, right: np.ndarray):\n \"\"\"return indices for left and right index based on the value of `keep`.\"\"\"\n if keep == \"all\":\n return left, right\n grouped = pd.Series(right).groupby(left)\n if keep == \"first\":\n grouped = grouped.min()\n return grouped.index, grouped.array\n grouped = grouped.max()\n return grouped.index, grouped.array\n\n\ndef _less_than_indices(\n left: pd.Series,\n right: pd.Series,\n strict: bool,\n keep: str,\n) -> tuple:\n \"\"\"\n Use binary search to get indices where left\n is less than or equal to right.\n\n If strict is True, then only indices\n where `left` is less than\n (but not equal to) `right` are returned.\n\n A tuple of integer indexes\n for left and right is returned.\n \"\"\"\n\n # no point going through all the hassle\n if left.min() > right.max():\n return None\n\n any_nulls = pd.isna(left)\n if any_nulls.all():\n return None\n if any_nulls.any():\n left = left[~any_nulls]\n any_nulls = pd.isna(right)\n if any_nulls.all():\n return None\n if any_nulls.any():\n right = right[~any_nulls]\n any_nulls = any_nulls.any()\n right_is_sorted = right.is_monotonic_increasing\n if not right_is_sorted:\n right = right.sort_values(kind=\"stable\")\n\n left_index = left.index._values\n left = left._values\n right_index = right.index._values\n right = right._values\n\n search_indices = right.searchsorted(left, side=\"left\")\n\n # if any of the positions in `search_indices`\n # is equal to the length of `right_keys`\n # that means the respective position in `left`\n # has no values from `right` that are less than\n # or equal, and should therefore be discarded\n len_right = right.size\n rows_equal = search_indices == len_right\n\n if rows_equal.any():\n left = left[~rows_equal]\n left_index = left_index[~rows_equal]\n search_indices = search_indices[~rows_equal]\n\n # the idea here is that if there are any equal values\n # shift to the right to the immediate next position\n # that is not equal\n if strict:\n rows_equal = right[search_indices]\n rows_equal = left == rows_equal\n # replace positions where rows are equal\n # with positions from searchsorted('right')\n # positions from searchsorted('right') will never\n # be equal and will be the furthermost in terms of position\n # example : right -> [2, 2, 2, 3], and we need\n # positions where values are not equal for 2;\n # the furthermost will be 3, and searchsorted('right')\n # will return position 3.\n if rows_equal.any():\n replacements = right.searchsorted(left, side=\"right\")\n # now we can safely replace values\n # with strictly less than positions\n search_indices = np.where(rows_equal, replacements, search_indices)\n # check again if any of the values\n # have become equal to length of right\n # and get rid of them\n rows_equal = search_indices == len_right\n\n if rows_equal.any():\n left = left[~rows_equal]\n left_index = left_index[~rows_equal]\n search_indices = search_indices[~rows_equal]\n\n if not search_indices.size:\n return None\n if right_is_sorted and (keep == \"first\"):\n if any_nulls:\n return left_index, right_index[search_indices]\n return left_index, search_indices\n right = [right_index[ind:len_right] for ind in search_indices]\n if keep == \"first\":\n right = [arr.min() for arr in right]\n return left_index, right\n if keep == \"last\":\n right = [arr.max() for arr in right]\n return left_index, right\n right = np.concatenate(right)\n left = np.repeat(left_index, len_right - search_indices)\n return left, right\n\n\ndef _greater_than_indices(\n left: pd.Series,\n right: pd.Series,\n strict: bool,\n multiple_conditions: bool,\n keep: str,\n) -> tuple:\n \"\"\"\n Use binary search to get indices where left\n is greater than or equal to right.\n\n If strict is True, then only indices\n where `left` is greater than\n (but not equal to) `right` are returned.\n\n if multiple_conditions is False, a tuple of integer indexes\n for left and right is returned;\n else a tuple of the index for left, right, as well\n as the positions of left in right is returned.\n \"\"\"\n\n # quick break, avoiding the hassle\n if left.max() < right.min():\n return None\n\n any_nulls = pd.isna(left)\n if any_nulls.all():\n return None\n if any_nulls.any():\n left = left[~any_nulls]\n any_nulls = pd.isna(right)\n if any_nulls.all():\n return None\n if any_nulls.any():\n right = right[~any_nulls]\n any_nulls = any_nulls.any()\n right_is_sorted = right.is_monotonic_increasing\n if not right_is_sorted:\n right = right.sort_values(kind=\"stable\")\n\n left_index = left.index._values\n left = left._values\n right_index = right.index._values\n right = right._values\n\n search_indices = right.searchsorted(left, side=\"right\")\n # if any of the positions in `search_indices`\n # is equal to 0 (less than 1), it implies that\n # left[position] is not greater than any value\n # in right\n rows_equal = search_indices < 1\n if rows_equal.any():\n left = left[~rows_equal]\n left_index = left_index[~rows_equal]\n search_indices = search_indices[~rows_equal]\n\n # the idea here is that if there are any equal values\n # shift downwards to the immediate next position\n # that is not equal\n if strict:\n rows_equal = right[search_indices - 1]\n rows_equal = left == rows_equal\n # replace positions where rows are equal with\n # searchsorted('left');\n # however there can be scenarios where positions\n # from searchsorted('left') would still be equal;\n # in that case, we shift down by 1\n if rows_equal.any():\n replacements = right.searchsorted(left, side=\"left\")\n # return replacements\n # `left` might result in values equal to len right\n replacements = np.where(\n replacements == right.size, replacements - 1, replacements\n )\n # now we can safely replace values\n # with strictly greater than positions\n search_indices = np.where(rows_equal, replacements, search_indices)\n # any value less than 1 should be discarded\n # since the lowest value for binary search\n # with side='right' should be 1\n rows_equal = search_indices < 1\n if rows_equal.any():\n left = left[~rows_equal]\n left_index = left_index[~rows_equal]\n search_indices = search_indices[~rows_equal]\n\n if not search_indices.size:\n return None\n\n if multiple_conditions:\n return left_index, right_index, search_indices\n if right_is_sorted and (keep == \"last\"):\n if any_nulls:\n return left_index, right_index[search_indices - 1]\n return left_index, search_indices - 1\n right = [right_index[:ind] for ind in search_indices]\n if keep == \"first\":\n right = [arr.min() for arr in right]\n return left_index, right\n if keep == \"last\":\n right = [arr.max() for arr in right]\n return left_index, right\n right = np.concatenate(right)\n left = np.repeat(left_index, search_indices)\n return left, right\n\n\ndef _not_equal_indices(left: pd.Series, right: pd.Series, keep: str) -> tuple:\n \"\"\"\n Use binary search to get indices where\n `left` is exactly not equal to `right`.\n\n It is a combination of strictly less than\n and strictly greater than indices.\n\n A tuple of integer indexes for left and right\n is returned.\n \"\"\"\n\n dummy = np.array([], dtype=int)\n\n # deal with nulls\n l1_nulls = dummy\n r1_nulls = dummy\n l2_nulls = dummy\n r2_nulls = dummy\n any_left_nulls = left.isna()\n any_right_nulls = right.isna()\n if any_left_nulls.any():\n l1_nulls = left.index[any_left_nulls.array]\n l1_nulls = l1_nulls.to_numpy(copy=False)\n r1_nulls = right.index\n # avoid NAN duplicates\n if any_right_nulls.any():\n r1_nulls = r1_nulls[~any_right_nulls.array]\n r1_nulls = r1_nulls.to_numpy(copy=False)\n nulls_count = l1_nulls.size\n # blow up nulls to match length of right\n l1_nulls = np.tile(l1_nulls, r1_nulls.size)\n # ensure length of right matches left\n if nulls_count > 1:\n r1_nulls = np.repeat(r1_nulls, nulls_count)\n if any_right_nulls.any():\n r2_nulls = right.index[any_right_nulls.array]\n r2_nulls = r2_nulls.to_numpy(copy=False)\n l2_nulls = left.index\n nulls_count = r2_nulls.size\n # blow up nulls to match length of left\n r2_nulls = np.tile(r2_nulls, l2_nulls.size)\n # ensure length of left matches right\n if nulls_count > 1:\n l2_nulls = np.repeat(l2_nulls, nulls_count)\n\n l1_nulls = np.concatenate([l1_nulls, l2_nulls])\n r1_nulls = np.concatenate([r1_nulls, r2_nulls])\n\n outcome = _less_than_indices(left, right, strict=True, keep=keep)\n\n if outcome is None:\n lt_left = dummy\n lt_right = dummy\n else:\n lt_left, lt_right = outcome\n\n outcome = _greater_than_indices(\n left, right, strict=True, multiple_conditions=False, keep=keep\n )\n\n if outcome is None:\n gt_left = dummy\n gt_right = dummy\n else:\n gt_left, gt_right = outcome\n\n left = np.concatenate([lt_left, gt_left, l1_nulls])\n right = np.concatenate([lt_right, gt_right, r1_nulls])\n\n if (not left.size) & (not right.size):\n return None\n return _keep_output(keep, left, right)\n\n\ndef _generic_func_cond_join(\n left: pd.Series,\n right: pd.Series,\n op: str,\n multiple_conditions: bool,\n keep: str,\n use_numba: bool,\n) -> tuple:\n \"\"\"\n Generic function to call any of the individual functions\n (_less_than_indices, _greater_than_indices,\n or _not_equal_indices).\n \"\"\"\n strict = False\n\n if op in {\n _JoinOperator.GREATER_THAN.value,\n _JoinOperator.LESS_THAN.value,\n _JoinOperator.NOT_EQUAL.value,\n }:\n strict = True\n\n if use_numba:\n if op in less_than_join_types:\n op_code = 1\n elif op in greater_than_join_types:\n op_code = 0\n else:\n op_code = -1\n from janitor.functions._numba import _numba_single_join\n\n return _numba_single_join(left, right, strict, keep, op_code)\n\n if op in less_than_join_types:\n return _less_than_indices(left, right, strict, keep)\n if op in greater_than_join_types:\n return _greater_than_indices(\n left, right, strict, multiple_conditions, keep\n )\n if op == _JoinOperator.NOT_EQUAL.value:\n return _not_equal_indices(left, right, keep)\n\n\ndef _generate_indices(\n left_index: np.ndarray,\n right_index: np.ndarray,\n conditions: list[tuple[pd.Series, pd.Series, str]],\n) -> tuple:\n \"\"\"\n Run a for loop to get the final indices.\n This iteratively goes through each condition,\n builds a boolean array,\n and gets indices for rows that meet the condition requirements.\n `conditions` is a list of tuples, where a tuple is of the form:\n `(Series from df, Series from right, operator)`.\n \"\"\"\n\n for condition in conditions:\n left, right, op = condition\n left = left._values[left_index]\n right = right._values[right_index]\n op = operator_map[op]\n mask = op(left, right)\n if not mask.any():\n return None\n if is_extension_array_dtype(mask):\n mask = mask.to_numpy(dtype=bool, na_value=False)\n if not mask.all():\n left_index = left_index[mask]\n right_index = right_index[mask]\n\n return left_index, right_index\n\n\ndef _multiple_conditional_join_ne(\n df: pd.DataFrame,\n right: pd.DataFrame,\n conditions: list[tuple[pd.Series, pd.Series, str]],\n keep: str,\n use_numba: bool,\n) -> tuple:\n \"\"\"\n Get indices for multiple conditions,\n where all the operators are `!=`.\n\n Returns a tuple of (left_index, right_index)\n \"\"\"\n\n # currently, there is no optimization option here\n # not equal typically combines less than\n # and greater than, so a lot more rows are returned\n # than just less than or greater than\n\n # here we get indices for the first condition in conditions\n # then use those indices to get the final indices,\n # using _generate_indices\n first, *rest = conditions\n left_on, right_on, op = first\n\n # get indices from the first condition\n indices = _generic_func_cond_join(\n df[left_on],\n right[right_on],\n op,\n multiple_conditions=False,\n keep=\"all\",\n use_numba=use_numba,\n )\n if indices is None:\n return None\n\n rest = (\n (df[left_on], right[right_on], op) for left_on, right_on, op in rest\n )\n\n indices = _generate_indices(*indices, rest)\n\n if not indices:\n return None\n\n return _keep_output(keep, *indices)\n\n\ndef _multiple_conditional_join_eq(\n df: pd.DataFrame, right: pd.DataFrame, conditions: list, keep: str\n) -> tuple:\n \"\"\"\n Get indices for multiple conditions,\n if any of the conditions has an `==` operator.\n\n Returns a tuple of (df_index, right_index)\n \"\"\"\n eqs = [\n (left_on, right_on)\n for left_on, right_on, op in conditions\n if op == _JoinOperator.STRICTLY_EQUAL.value\n ]\n\n left_on, right_on = zip(*eqs)\n left_on = [*left_on]\n right_on = [*right_on]\n\n rest = (\n (df[left_on], right[right_on], op)\n for left_on, right_on, op in conditions\n if op != _JoinOperator.STRICTLY_EQUAL.value\n )\n\n left_index, right_index = _MergeOperation(\n df,\n right,\n left_on=left_on,\n right_on=right_on,\n sort=False,\n copy=False,\n )._get_join_indexers()\n\n if not left_index.size:\n return None\n\n indices = _generate_indices(left_index, right_index, rest)\n\n if not indices:\n return None\n\n return _keep_output(keep, *indices)\n\n\ndef _multiple_conditional_join_le_lt(\n df: pd.DataFrame,\n right: pd.DataFrame,\n conditions: list,\n keep: str,\n use_numba: bool,\n) -> tuple:\n \"\"\"\n Get indices for multiple conditions,\n where `>/>=` or `</<=` is present,\n and there is no `==` operator.\n\n Returns a tuple of (df_index, right_index)\n \"\"\"\n if use_numba:\n from janitor.functions._numba import _numba_pair_le_lt\n\n pairs = [\n condition\n for condition in conditions\n if condition[-1] != _JoinOperator.NOT_EQUAL.value\n ]\n conditions = [\n condition\n for condition in conditions\n if condition[-1] == _JoinOperator.NOT_EQUAL.value\n ]\n if len(pairs) > 2:\n patch = pairs[2:]\n conditions.extend(patch)\n pairs = pairs[:2]\n if len(pairs) < 2:\n # combine with != condition\n # say we have ('start', 'ID', '<='), ('end', 'ID', '!=')\n # we convert conditions to :\n # ('start', 'ID', '<='), ('end', 'ID', '>'), ('end', 'ID', '<')\n # subsequently we run the numba pair fn on the pairs:\n # ('start', 'ID', '<=') & ('end', 'ID', '>')\n # ('start', 'ID', '<=') & ('end', 'ID', '<')\n # finally unionize the outcome of the pairs\n # this only works if there is no null in the != condition\n # thanks to Hypothesis tests for pointing this out\n left_on, right_on, op = conditions[0]\n # check for nulls in the patch\n # and follow this path, only if there are no nulls\n if df[left_on].notna().all() & right[right_on].notna().all():\n patch = (\n left_on,\n right_on,\n _JoinOperator.GREATER_THAN.value,\n ), (\n left_on,\n right_on,\n _JoinOperator.LESS_THAN.value,\n )\n pairs.extend(patch)\n first, middle, last = pairs\n pairs = [(first, middle), (first, last)]\n indices = [\n _numba_pair_le_lt(df, right, pair) for pair in pairs\n ]\n indices = [arr for arr in indices if arr is not None]\n if not indices:\n indices = None\n elif len(indices) == 1:\n indices = indices[0]\n else:\n indices = zip(*indices)\n indices = map(np.concatenate, indices)\n conditions = conditions[1:]\n else:\n left_on, right_on, op = pairs[0]\n indices = _generic_func_cond_join(\n df[left_on],\n right[right_on],\n op,\n multiple_conditions=False,\n keep=\"all\",\n use_numba=True,\n )\n else:\n indices = _numba_pair_le_lt(df, right, pairs)\n else:\n # there is an opportunity for optimization for range joins\n # which is usually `lower_value < value < upper_value`\n # or `lower_value < a` and `b < upper_value`\n # intervalindex is not used here, as there are scenarios\n # where there will be overlapping intervals;\n # intervalindex does not offer an efficient way to get\n # the indices for overlaps\n # also, intervalindex covers only the first option\n # i.e => `lower_value < value < upper_value`\n # it does not extend to range joins for different columns\n # i.e => `lower_value < a` and `b < upper_value`\n # the option used for range joins is a simple form\n # dependent on sorting and extensible to overlaps\n # as well as the second option:\n # i.e =>`lower_value < a` and `b < upper_value`\n # range joins are also the more common types of non-equi joins\n # the other joins do not have an optimisation opportunity\n # within this space, as far as I know,\n # so a blowup of all the rows is unavoidable.\n\n # The numba version offers optimisations\n # for all types of non-equi joins\n # and is generally much faster\n\n # first step is to get two conditions, if possible\n # where one has a less than operator\n # and the other has a greater than operator\n # get the indices from that\n # and then build the remaining indices,\n # using _generate_indices function\n # the aim of this for loop is to see if there is\n # the possiblity of a range join, and if there is,\n # then use the optimised path\n le_lt = None\n ge_gt = None\n # keep the first match for le_lt or ge_gt\n for condition in conditions:\n *_, op = condition\n if op in less_than_join_types:\n if le_lt:\n continue\n le_lt = condition\n elif op in greater_than_join_types:\n if ge_gt:\n continue\n ge_gt = condition\n if le_lt and ge_gt:\n break\n\n # optimised path\n if le_lt and ge_gt:\n conditions = [\n condition\n for condition in conditions\n if condition not in (ge_gt, le_lt)\n ]\n\n indices = _range_indices(df, right, ge_gt, le_lt)\n\n # no optimised path\n # blow up the rows and prune\n else:\n if le_lt:\n conditions = [\n condition for condition in conditions if condition != le_lt\n ]\n left_on, right_on, op = le_lt\n else:\n conditions = [\n condition for condition in conditions if condition != ge_gt\n ]\n left_on, right_on, op = ge_gt\n\n indices = _generic_func_cond_join(\n df[left_on],\n right[right_on],\n op,\n multiple_conditions=False,\n keep=\"all\",\n use_numba=False,\n )\n\n if not indices:\n return None\n\n if conditions:\n conditions = (\n (df[left_on], right[right_on], op)\n for left_on, right_on, op in conditions\n )\n\n indices = _generate_indices(*indices, conditions)\n if not indices:\n return None\n\n return _keep_output(keep, *indices)\n\n\ndef _range_indices(\n df: pd.DataFrame,\n right: pd.DataFrame,\n first: tuple,\n second: tuple,\n):\n \"\"\"\n Retrieve index positions for range/interval joins.\n\n Idea inspired by article:\n https://www.vertica.com/blog/what-is-a-range-join-and-why-is-it-so-fastba-p223413/\n\n Returns a tuple of (left_index, right_index)\n \"\"\"\n # summary of code for range join:\n # get the positions where start_left is >/>= start_right\n # then within the positions,\n # get the positions where end_left is </<= end_right\n # this should reduce the search space\n\n left_on, right_on, op = first\n left_c = df[left_on]\n right_c = right[right_on]\n left_on, right_on, _ = second\n # get rid of any nulls\n # this is helpful as we can convert extension arrays to numpy arrays safely\n # and simplify the search logic below\n any_nulls = pd.isna(df[left_on])\n if any_nulls.any():\n left_c = left_c[~any_nulls]\n any_nulls = pd.isna(right[right_on])\n if any_nulls.any():\n right_c = right_c[~any_nulls]\n\n strict = False\n if op == _JoinOperator.GREATER_THAN.value:\n strict = True\n\n outcome = _greater_than_indices(\n left_c,\n right_c,\n strict,\n multiple_conditions=True,\n keep=\"all\",\n )\n\n if outcome is None:\n return None\n\n left_index, right_index, search_indices = outcome\n left_on, right_on, op = second\n right_c = right.loc[right_index, right_on]\n left_c = df.loc[left_index, left_on]\n\n left_c = left_c._values\n right_c = right_c._values\n left_c, right_c = _convert_to_numpy_array(left_c, right_c)\n op = operator_map[op]\n pos = np.empty(left_c.size, dtype=np.intp)\n\n # better served in a compiled environment\n # where we can break early\n # parallelise the operation, as well as\n # avoid the restrictive fixed size approach of numpy\n # which isnt particularly helpful in a for loop\n for ind in range(left_c.size):\n out = op(left_c[ind], right_c)\n pos[ind] = np.argmax(out)\n\n # no point searching within (a, b)\n # if a == b\n # since range(a, b) yields none\n keep_rows = pos < search_indices\n\n if not keep_rows.any():\n return None\n\n if not keep_rows.all():\n left_index = left_index[keep_rows]\n pos = pos[keep_rows]\n search_indices = search_indices[keep_rows]\n\n repeater = search_indices - pos\n right_index = [\n right_index[start:end] for start, end in zip(pos, search_indices)\n ]\n\n right_index = np.concatenate(right_index)\n left_index = np.repeat(left_index, repeater)\n # here we search for actual positions\n # where left_c is </<= right_c\n # safe to index the arrays, since we are picking the positions\n # which are all in the original `df` and `right`\n # doing this allows some speed gains\n # while still ensuring correctness\n left_c = df[left_on]._values[left_index]\n right_c = right[right_on]._values[right_index]\n ext_arr = is_extension_array_dtype(left_c)\n\n mask = op(left_c, right_c)\n\n if ext_arr:\n mask = mask.to_numpy(dtype=bool, na_value=False)\n\n if not mask.all():\n left_index = left_index[mask]\n right_index = right_index[mask]\n\n return left_index, right_index\n\n\ndef _cond_join_select_columns(columns: Any, df: pd.DataFrame):\n \"\"\"\n Select columns in a DataFrame.\n Optionally rename the columns while selecting.\n Returns a Pandas DataFrame.\n \"\"\"\n\n df = df.select_columns(columns)\n\n if isinstance(columns, dict):\n df.columns = [columns.get(name, name) for name in df]\n\n return df\n\n\ndef _create_multiindex_column(df: pd.DataFrame, right: pd.DataFrame):\n \"\"\"\n Create a MultiIndex column for conditional_join.\n \"\"\"\n header = [np.array([\"left\"]).repeat(df.columns.size)]\n columns = [\n df.columns.get_level_values(n) for n in range(df.columns.nlevels)\n ]\n header.extend(columns)\n df.columns = pd.MultiIndex.from_arrays(header)\n header = [np.array([\"right\"]).repeat(right.columns.size)]\n columns = [\n right.columns.get_level_values(n) for n in range(right.columns.nlevels)\n ]\n header.extend(columns)\n right.columns = pd.MultiIndex.from_arrays(header)\n return df, right\n\n\ndef _create_frame(\n df: pd.DataFrame,\n right: pd.DataFrame,\n left_index: np.ndarray,\n right_index: np.ndarray,\n how: str,\n sort_by_appearance: bool,\n df_columns: Any,\n right_columns: Any,\n):\n \"\"\"\n Create final dataframe\n \"\"\"\n if df_columns:\n df = _cond_join_select_columns(df_columns, df)\n\n if right_columns:\n right = _cond_join_select_columns(right_columns, right)\n\n if set(df.columns).intersection(right.columns):\n df, right = _create_multiindex_column(df, right)\n\n if sort_by_appearance or (left_index.size == 0):\n if how in {\"inner\", \"left\"}:\n right = right.take(right_index)\n right.index = left_index\n else:\n df = df.take(left_index)\n df.index = right_index\n df = pd.merge(\n df,\n right,\n left_index=True,\n right_index=True,\n sort=False,\n copy=False,\n how=how,\n )\n df.index = range(len(df))\n return df\n\n def _inner(\n df: pd.DataFrame,\n right: pd.DataFrame,\n left_index: pd.DataFrame,\n right_index: pd.DataFrame,\n ) -> pd.DataFrame:\n \"\"\"Create DataFrame for inner join\"\"\"\n df = {key: value._values[left_index] for key, value in df.items()}\n right = {\n key: value._values[right_index] for key, value in right.items()\n }\n df.update(right)\n return pd.DataFrame(df, copy=False)\n\n if how == \"inner\":\n return _inner(df, right, left_index, right_index)\n\n if how == \"left\":\n df_ = np.bincount(left_index, minlength=df.index.size) == 0\n df_ = df_.nonzero()[0]\n if not df_.size:\n return _inner(df, right, left_index, right_index)\n df_ = df.take(df_)\n df = _inner(df, right, left_index, right_index)\n return pd.concat([df, df_], ignore_index=True)\n if how == \"right\":\n right_ = np.bincount(right_index, minlength=right.index.size) == 0\n right_ = right_.nonzero()[0]\n if not right_.size:\n return _inner(df, right, left_index, right_index)\n right_ = right.take(right_)\n right = _inner(df, right, left_index, right_index)\n return pd.concat([right, right_], ignore_index=True)\n", "path": "janitor/functions/conditional_join.py" } ]
[ { "content": "import operator\nfrom enum import Enum\nfrom typing import Union, Any, Optional, Hashable, Literal\n\nimport numpy as np\nimport pandas as pd\nimport pandas_flavor as pf\nfrom pandas.core.dtypes.common import (\n is_categorical_dtype,\n is_datetime64_dtype,\n is_dtype_equal,\n is_extension_array_dtype,\n is_numeric_dtype,\n is_string_dtype,\n)\n\nfrom pandas.core.reshape.merge import _MergeOperation\n\nfrom janitor.utils import check, check_column\nfrom janitor.functions.utils import _convert_to_numpy_array\n\n\[email protected]_dataframe_method\ndef conditional_join(\n df: pd.DataFrame,\n right: Union[pd.DataFrame, pd.Series],\n *conditions,\n how: Literal[\"inner\", \"left\", \"right\"] = \"inner\",\n sort_by_appearance: bool = False,\n df_columns: Optional[Any] = None,\n right_columns: Optional[Any] = None,\n keep: Literal[\"first\", \"last\", \"all\"] = \"all\",\n use_numba: bool = False,\n) -> pd.DataFrame:\n \"\"\"\n\n The conditional_join function operates similarly to `pd.merge`,\n but allows joins on inequality operators,\n or a combination of equi and non-equi joins.\n\n Joins solely on equality are not supported.\n\n If the join is solely on equality, `pd.merge` function\n covers that; if you are interested in nearest joins, or rolling joins,\n then `pd.merge_asof` covers that.\n There is also pandas' IntervalIndex, which is efficient for range joins,\n especially if the intervals do not overlap.\n\n Column selection in `df_columns` and `right_columns` is possible using the\n [`select_columns`][janitor.functions.select_columns.select_columns] syntax.\n\n For strictly non-equi joins,\n involving either `>`, `<`, `>=`, `<=` operators,\n performance could be improved by setting `use_numba` to `True`.\n This assumes that `numba` is installed.\n\n To preserve row order, set `sort_by_appearance` to `True`.\n\n This function returns rows, if any, where values from `df` meet the\n condition(s) for values from `right`. The conditions are passed in\n as a variable argument of tuples, where the tuple is of\n the form `(left_on, right_on, op)`; `left_on` is the column\n label from `df`, `right_on` is the column label from `right`,\n while `op` is the operator. For multiple conditions, the and(`&`)\n operator is used to combine the results of the individual conditions.\n\n The operator can be any of `==`, `!=`, `<=`, `<`, `>=`, `>`.\n\n The join is done only on the columns.\n MultiIndex columns are not supported.\n\n For non-equi joins, only numeric and date columns are supported.\n\n Only `inner`, `left`, and `right` joins are supported.\n\n If the columns from `df` and `right` have nothing in common,\n a single index column is returned; else, a MultiIndex column\n is returned.\n\n Example:\n\n >>> import pandas as pd\n >>> import janitor\n >>> df1 = pd.DataFrame({\"value_1\": [2, 5, 7, 1, 3, 4]})\n >>> df2 = pd.DataFrame({\"value_2A\": [0, 3, 7, 12, 0, 2, 3, 1],\n ... \"value_2B\": [1, 5, 9, 15, 1, 4, 6, 3],\n ... })\n >>> df1\n value_1\n 0 2\n 1 5\n 2 7\n 3 1\n 4 3\n 5 4\n >>> df2\n value_2A value_2B\n 0 0 1\n 1 3 5\n 2 7 9\n 3 12 15\n 4 0 1\n 5 2 4\n 6 3 6\n 7 1 3\n >>> df1.conditional_join(\n ... df2,\n ... (\"value_1\", \"value_2A\", \">\"),\n ... (\"value_1\", \"value_2B\", \"<\")\n ... )\n value_1 value_2A value_2B\n 0 2 1 3\n 1 5 3 6\n 2 3 2 4\n 3 4 3 5\n 4 4 3 6\n\n\n :param df: A pandas DataFrame.\n :param right: Named Series or DataFrame to join to.\n :param conditions: Variable argument of tuple(s) of the form\n `(left_on, right_on, op)`, where `left_on` is the column\n label from `df`, `right_on` is the column label from `right`,\n while `op` is the operator. The operator can be any of\n `==`, `!=`, `<=`, `<`, `>=`, `>`. For multiple conditions,\n the and(`&`) operator is used to combine the results\n of the individual conditions.\n :param how: Indicates the type of join to be performed.\n It can be one of `inner`, `left`, `right`.\n Full outer join is not supported. Defaults to `inner`.\n :param sort_by_appearance: Default is `False`.\n This is useful for scenarios where the user wants\n the original order maintained.\n If `True` and `how = left`, the row order from the left dataframe\n is preserved; if `True` and `how = right`, the row order\n from the right dataframe is preserved.\n :param df_columns: Columns to select from `df`.\n It can be a single column or a list of columns.\n It is also possible to rename the output columns via a dictionary.\n :param right_columns: Columns to select from `right`.\n It can be a single column or a list of columns.\n It is also possible to rename the output columns via a dictionary.\n :param keep: Choose whether to return the first match,\n last match or all matches. Default is `all`.\n :param use_numba: Use numba, if installed, to accelerate the computation.\n Default is `False`.\n :returns: A pandas DataFrame of the two merged Pandas objects.\n \"\"\"\n\n return _conditional_join_compute(\n df,\n right,\n conditions,\n how,\n sort_by_appearance,\n df_columns,\n right_columns,\n keep,\n use_numba,\n )\n\n\nclass _JoinOperator(Enum):\n \"\"\"\n List of operators used in conditional_join.\n \"\"\"\n\n GREATER_THAN = \">\"\n LESS_THAN = \"<\"\n GREATER_THAN_OR_EQUAL = \">=\"\n LESS_THAN_OR_EQUAL = \"<=\"\n STRICTLY_EQUAL = \"==\"\n NOT_EQUAL = \"!=\"\n\n\noperator_map = {\n _JoinOperator.STRICTLY_EQUAL.value: operator.eq,\n _JoinOperator.LESS_THAN.value: operator.lt,\n _JoinOperator.LESS_THAN_OR_EQUAL.value: operator.le,\n _JoinOperator.GREATER_THAN.value: operator.gt,\n _JoinOperator.GREATER_THAN_OR_EQUAL.value: operator.ge,\n _JoinOperator.NOT_EQUAL.value: operator.ne,\n}\n\n\nless_than_join_types = {\n _JoinOperator.LESS_THAN.value,\n _JoinOperator.LESS_THAN_OR_EQUAL.value,\n}\ngreater_than_join_types = {\n _JoinOperator.GREATER_THAN.value,\n _JoinOperator.GREATER_THAN_OR_EQUAL.value,\n}\n\n\ndef _check_operator(op: str):\n \"\"\"\n Check that operator is one of\n `>`, `>=`, `==`, `!=`, `<`, `<=`.\n\n Used in `conditional_join`.\n \"\"\"\n sequence_of_operators = {op.value for op in _JoinOperator}\n if op not in sequence_of_operators:\n raise ValueError(\n \"The conditional join operator \"\n f\"should be one of {sequence_of_operators}\"\n )\n\n\ndef _conditional_join_preliminary_checks(\n df: pd.DataFrame,\n right: Union[pd.DataFrame, pd.Series],\n conditions: tuple,\n how: str,\n sort_by_appearance: bool,\n df_columns: Any,\n right_columns: Any,\n keep: str,\n use_numba: bool,\n) -> tuple:\n \"\"\"\n Preliminary checks for conditional_join are conducted here.\n\n Checks include differences in number of column levels,\n length of conditions, existence of columns in dataframe, etc.\n \"\"\"\n\n check(\"right\", right, [pd.DataFrame, pd.Series])\n\n df = df[:]\n right = right[:]\n\n if isinstance(right, pd.Series):\n if not right.name:\n raise ValueError(\n \"Unnamed Series are not supported for conditional_join.\"\n )\n right = right.to_frame()\n\n if df.columns.nlevels != right.columns.nlevels:\n raise ValueError(\n \"The number of column levels \"\n \"from the left and right frames must match. \"\n \"The number of column levels from the left dataframe \"\n f\"is {df.columns.nlevels}, while the number of column levels \"\n f\"from the right dataframe is {right.columns.nlevels}.\"\n )\n\n if not conditions:\n raise ValueError(\"Kindly provide at least one join condition.\")\n\n for condition in conditions:\n check(\"condition\", condition, [tuple])\n len_condition = len(condition)\n if len_condition != 3:\n raise ValueError(\n \"condition should have only three elements; \"\n f\"{condition} however is of length {len_condition}.\"\n )\n\n for left_on, right_on, op in conditions:\n check(\"left_on\", left_on, [Hashable])\n check(\"right_on\", right_on, [Hashable])\n check(\"operator\", op, [str])\n check_column(df, [left_on])\n check_column(right, [right_on])\n _check_operator(op)\n\n if all(\n (op == _JoinOperator.STRICTLY_EQUAL.value for *_, op in conditions)\n ):\n raise ValueError(\"Equality only joins are not supported.\")\n\n check(\"how\", how, [str])\n\n if how not in {\"inner\", \"left\", \"right\"}:\n raise ValueError(\"'how' should be one of 'inner', 'left' or 'right'.\")\n\n check(\"sort_by_appearance\", sort_by_appearance, [bool])\n\n if (df.columns.nlevels > 1) and (\n isinstance(df_columns, dict) or isinstance(right_columns, dict)\n ):\n raise ValueError(\n \"Column renaming with a dictionary is not supported \"\n \"for MultiIndex columns.\"\n )\n\n check(\"keep\", keep, [str])\n\n if keep not in {\"all\", \"first\", \"last\"}:\n raise ValueError(\"'keep' should be one of 'all', 'first', 'last'.\")\n\n check(\"use_numba\", use_numba, [bool])\n\n return (\n df,\n right,\n conditions,\n how,\n sort_by_appearance,\n df_columns,\n right_columns,\n keep,\n use_numba,\n )\n\n\ndef _conditional_join_type_check(\n left_column: pd.Series, right_column: pd.Series, op: str\n) -> None:\n \"\"\"\n Raise error if column type is not any of numeric or datetime or string.\n \"\"\"\n\n permitted_types = {\n is_datetime64_dtype,\n is_numeric_dtype,\n is_string_dtype,\n is_categorical_dtype,\n }\n for func in permitted_types:\n if func(left_column):\n break\n else:\n raise ValueError(\n \"conditional_join only supports \"\n \"string, category, numeric, or date dtypes (without timezone) - \"\n f\"'{left_column.name} is of type {left_column.dtype}.\"\n )\n\n lk_is_cat = is_categorical_dtype(left_column)\n rk_is_cat = is_categorical_dtype(right_column)\n\n if lk_is_cat & rk_is_cat:\n if not left_column.array._categories_match_up_to_permutation(\n right_column.array\n ):\n raise ValueError(\n f\"'{left_column.name}' and '{right_column.name}' \"\n \"should have the same categories, and the same order.\"\n )\n elif not is_dtype_equal(left_column, right_column):\n raise ValueError(\n f\"Both columns should have the same type - \"\n f\"'{left_column.name}' has {left_column.dtype} type;\"\n f\"'{right_column.name}' has {right_column.dtype} type.\"\n )\n\n if (op in less_than_join_types.union(greater_than_join_types)) & (\n (is_string_dtype(left_column) | is_categorical_dtype(left_column))\n ):\n raise ValueError(\n \"non-equi joins are supported \"\n \"only for datetime and numeric dtypes. \"\n f\"{left_column.name} in condition \"\n f\"({left_column.name}, {right_column.name}, {op}) \"\n f\"has a dtype {left_column.dtype}.\"\n )\n\n return None\n\n\ndef _conditional_join_compute(\n df: pd.DataFrame,\n right: pd.DataFrame,\n conditions: list,\n how: str,\n sort_by_appearance: bool,\n df_columns: Any,\n right_columns: Any,\n keep: str,\n use_numba: bool,\n) -> pd.DataFrame:\n \"\"\"\n This is where the actual computation\n for the conditional join takes place.\n A pandas DataFrame is returned.\n \"\"\"\n\n (\n df,\n right,\n conditions,\n how,\n sort_by_appearance,\n df_columns,\n right_columns,\n keep,\n use_numba,\n ) = _conditional_join_preliminary_checks(\n df,\n right,\n conditions,\n how,\n sort_by_appearance,\n df_columns,\n right_columns,\n keep,\n use_numba,\n )\n\n eq_check = False\n le_lt_check = False\n for condition in conditions:\n left_on, right_on, op = condition\n _conditional_join_type_check(df[left_on], right[right_on], op)\n if op == _JoinOperator.STRICTLY_EQUAL.value:\n eq_check = True\n elif op in less_than_join_types.union(greater_than_join_types):\n le_lt_check = True\n\n df.index = range(len(df))\n right.index = range(len(right))\n\n if len(conditions) > 1:\n if eq_check:\n result = _multiple_conditional_join_eq(df, right, conditions, keep)\n elif le_lt_check:\n result = _multiple_conditional_join_le_lt(\n df, right, conditions, keep, use_numba\n )\n else:\n result = _multiple_conditional_join_ne(\n df, right, conditions, keep, use_numba\n )\n else:\n left_on, right_on, op = conditions[0]\n result = _generic_func_cond_join(\n df[left_on],\n right[right_on],\n op,\n False,\n keep,\n use_numba,\n )\n\n if result is None:\n result = np.array([], dtype=np.intp), np.array([], dtype=np.intp)\n\n return _create_frame(\n df,\n right,\n *result,\n how,\n sort_by_appearance,\n df_columns,\n right_columns,\n )\n\n\ndef _keep_output(keep: str, left: np.ndarray, right: np.ndarray):\n \"\"\"return indices for left and right index based on the value of `keep`.\"\"\"\n if keep == \"all\":\n return left, right\n grouped = pd.Series(right).groupby(left)\n if keep == \"first\":\n grouped = grouped.min()\n return grouped.index, grouped.array\n grouped = grouped.max()\n return grouped.index, grouped.array\n\n\ndef _less_than_indices(\n left: pd.Series,\n right: pd.Series,\n strict: bool,\n keep: str,\n) -> tuple:\n \"\"\"\n Use binary search to get indices where left\n is less than or equal to right.\n\n If strict is True, then only indices\n where `left` is less than\n (but not equal to) `right` are returned.\n\n A tuple of integer indexes\n for left and right is returned.\n \"\"\"\n\n # no point going through all the hassle\n if left.min() > right.max():\n return None\n\n any_nulls = pd.isna(left)\n if any_nulls.all():\n return None\n if any_nulls.any():\n left = left[~any_nulls]\n any_nulls = pd.isna(right)\n if any_nulls.all():\n return None\n if any_nulls.any():\n right = right[~any_nulls]\n any_nulls = any_nulls.any()\n right_is_sorted = right.is_monotonic_increasing\n if not right_is_sorted:\n right = right.sort_values(kind=\"stable\")\n\n left_index = left.index._values\n left = left._values\n right_index = right.index._values\n right = right._values\n\n search_indices = right.searchsorted(left, side=\"left\")\n\n # if any of the positions in `search_indices`\n # is equal to the length of `right_keys`\n # that means the respective position in `left`\n # has no values from `right` that are less than\n # or equal, and should therefore be discarded\n len_right = right.size\n rows_equal = search_indices == len_right\n\n if rows_equal.any():\n left = left[~rows_equal]\n left_index = left_index[~rows_equal]\n search_indices = search_indices[~rows_equal]\n\n # the idea here is that if there are any equal values\n # shift to the right to the immediate next position\n # that is not equal\n if strict:\n rows_equal = right[search_indices]\n rows_equal = left == rows_equal\n # replace positions where rows are equal\n # with positions from searchsorted('right')\n # positions from searchsorted('right') will never\n # be equal and will be the furthermost in terms of position\n # example : right -> [2, 2, 2, 3], and we need\n # positions where values are not equal for 2;\n # the furthermost will be 3, and searchsorted('right')\n # will return position 3.\n if rows_equal.any():\n replacements = right.searchsorted(left, side=\"right\")\n # now we can safely replace values\n # with strictly less than positions\n search_indices = np.where(rows_equal, replacements, search_indices)\n # check again if any of the values\n # have become equal to length of right\n # and get rid of them\n rows_equal = search_indices == len_right\n\n if rows_equal.any():\n left = left[~rows_equal]\n left_index = left_index[~rows_equal]\n search_indices = search_indices[~rows_equal]\n\n if not search_indices.size:\n return None\n if right_is_sorted and (keep == \"first\"):\n if any_nulls:\n return left_index, right_index[search_indices]\n return left_index, search_indices\n right = [right_index[ind:len_right] for ind in search_indices]\n if keep == \"first\":\n right = [arr.min() for arr in right]\n return left_index, right\n if keep == \"last\":\n right = [arr.max() for arr in right]\n return left_index, right\n right = np.concatenate(right)\n left = np.repeat(left_index, len_right - search_indices)\n return left, right\n\n\ndef _greater_than_indices(\n left: pd.Series,\n right: pd.Series,\n strict: bool,\n multiple_conditions: bool,\n keep: str,\n) -> tuple:\n \"\"\"\n Use binary search to get indices where left\n is greater than or equal to right.\n\n If strict is True, then only indices\n where `left` is greater than\n (but not equal to) `right` are returned.\n\n if multiple_conditions is False, a tuple of integer indexes\n for left and right is returned;\n else a tuple of the index for left, right, as well\n as the positions of left in right is returned.\n \"\"\"\n\n # quick break, avoiding the hassle\n if left.max() < right.min():\n return None\n\n any_nulls = pd.isna(left)\n if any_nulls.all():\n return None\n if any_nulls.any():\n left = left[~any_nulls]\n any_nulls = pd.isna(right)\n if any_nulls.all():\n return None\n if any_nulls.any():\n right = right[~any_nulls]\n any_nulls = any_nulls.any()\n right_is_sorted = right.is_monotonic_increasing\n if not right_is_sorted:\n right = right.sort_values(kind=\"stable\")\n\n left_index = left.index._values\n left = left._values\n right_index = right.index._values\n right = right._values\n\n search_indices = right.searchsorted(left, side=\"right\")\n # if any of the positions in `search_indices`\n # is equal to 0 (less than 1), it implies that\n # left[position] is not greater than any value\n # in right\n rows_equal = search_indices < 1\n if rows_equal.any():\n left = left[~rows_equal]\n left_index = left_index[~rows_equal]\n search_indices = search_indices[~rows_equal]\n\n # the idea here is that if there are any equal values\n # shift downwards to the immediate next position\n # that is not equal\n if strict:\n rows_equal = right[search_indices - 1]\n rows_equal = left == rows_equal\n # replace positions where rows are equal with\n # searchsorted('left');\n # however there can be scenarios where positions\n # from searchsorted('left') would still be equal;\n # in that case, we shift down by 1\n if rows_equal.any():\n replacements = right.searchsorted(left, side=\"left\")\n # return replacements\n # `left` might result in values equal to len right\n replacements = np.where(\n replacements == right.size, replacements - 1, replacements\n )\n # now we can safely replace values\n # with strictly greater than positions\n search_indices = np.where(rows_equal, replacements, search_indices)\n # any value less than 1 should be discarded\n # since the lowest value for binary search\n # with side='right' should be 1\n rows_equal = search_indices < 1\n if rows_equal.any():\n left = left[~rows_equal]\n left_index = left_index[~rows_equal]\n search_indices = search_indices[~rows_equal]\n\n if not search_indices.size:\n return None\n\n if multiple_conditions:\n return left_index, right_index, search_indices\n if right_is_sorted and (keep == \"last\"):\n if any_nulls:\n return left_index, right_index[search_indices - 1]\n return left_index, search_indices - 1\n right = [right_index[:ind] for ind in search_indices]\n if keep == \"first\":\n right = [arr.min() for arr in right]\n return left_index, right\n if keep == \"last\":\n right = [arr.max() for arr in right]\n return left_index, right\n right = np.concatenate(right)\n left = np.repeat(left_index, search_indices)\n return left, right\n\n\ndef _not_equal_indices(left: pd.Series, right: pd.Series, keep: str) -> tuple:\n \"\"\"\n Use binary search to get indices where\n `left` is exactly not equal to `right`.\n\n It is a combination of strictly less than\n and strictly greater than indices.\n\n A tuple of integer indexes for left and right\n is returned.\n \"\"\"\n\n dummy = np.array([], dtype=int)\n\n # deal with nulls\n l1_nulls = dummy\n r1_nulls = dummy\n l2_nulls = dummy\n r2_nulls = dummy\n any_left_nulls = left.isna()\n any_right_nulls = right.isna()\n if any_left_nulls.any():\n l1_nulls = left.index[any_left_nulls.array]\n l1_nulls = l1_nulls.to_numpy(copy=False)\n r1_nulls = right.index\n # avoid NAN duplicates\n if any_right_nulls.any():\n r1_nulls = r1_nulls[~any_right_nulls.array]\n r1_nulls = r1_nulls.to_numpy(copy=False)\n nulls_count = l1_nulls.size\n # blow up nulls to match length of right\n l1_nulls = np.tile(l1_nulls, r1_nulls.size)\n # ensure length of right matches left\n if nulls_count > 1:\n r1_nulls = np.repeat(r1_nulls, nulls_count)\n if any_right_nulls.any():\n r2_nulls = right.index[any_right_nulls.array]\n r2_nulls = r2_nulls.to_numpy(copy=False)\n l2_nulls = left.index\n nulls_count = r2_nulls.size\n # blow up nulls to match length of left\n r2_nulls = np.tile(r2_nulls, l2_nulls.size)\n # ensure length of left matches right\n if nulls_count > 1:\n l2_nulls = np.repeat(l2_nulls, nulls_count)\n\n l1_nulls = np.concatenate([l1_nulls, l2_nulls])\n r1_nulls = np.concatenate([r1_nulls, r2_nulls])\n\n outcome = _less_than_indices(left, right, strict=True, keep=keep)\n\n if outcome is None:\n lt_left = dummy\n lt_right = dummy\n else:\n lt_left, lt_right = outcome\n\n outcome = _greater_than_indices(\n left, right, strict=True, multiple_conditions=False, keep=keep\n )\n\n if outcome is None:\n gt_left = dummy\n gt_right = dummy\n else:\n gt_left, gt_right = outcome\n\n left = np.concatenate([lt_left, gt_left, l1_nulls])\n right = np.concatenate([lt_right, gt_right, r1_nulls])\n\n if (not left.size) & (not right.size):\n return None\n return _keep_output(keep, left, right)\n\n\ndef _generic_func_cond_join(\n left: pd.Series,\n right: pd.Series,\n op: str,\n multiple_conditions: bool,\n keep: str,\n use_numba: bool,\n) -> tuple:\n \"\"\"\n Generic function to call any of the individual functions\n (_less_than_indices, _greater_than_indices,\n or _not_equal_indices).\n \"\"\"\n strict = False\n\n if op in {\n _JoinOperator.GREATER_THAN.value,\n _JoinOperator.LESS_THAN.value,\n _JoinOperator.NOT_EQUAL.value,\n }:\n strict = True\n\n if use_numba:\n if op in less_than_join_types:\n op_code = 1\n elif op in greater_than_join_types:\n op_code = 0\n else:\n op_code = -1\n from janitor.functions._numba import _numba_single_join\n\n return _numba_single_join(left, right, strict, keep, op_code)\n\n if op in less_than_join_types:\n return _less_than_indices(left, right, strict, keep)\n if op in greater_than_join_types:\n return _greater_than_indices(\n left, right, strict, multiple_conditions, keep\n )\n if op == _JoinOperator.NOT_EQUAL.value:\n return _not_equal_indices(left, right, keep)\n\n\ndef _generate_indices(\n left_index: np.ndarray,\n right_index: np.ndarray,\n conditions: list[tuple[pd.Series, pd.Series, str]],\n) -> tuple:\n \"\"\"\n Run a for loop to get the final indices.\n This iteratively goes through each condition,\n builds a boolean array,\n and gets indices for rows that meet the condition requirements.\n `conditions` is a list of tuples, where a tuple is of the form:\n `(Series from df, Series from right, operator)`.\n \"\"\"\n\n for condition in conditions:\n left, right, op = condition\n left = left._values[left_index]\n right = right._values[right_index]\n op = operator_map[op]\n mask = op(left, right)\n if not mask.any():\n return None\n if is_extension_array_dtype(mask):\n mask = mask.to_numpy(dtype=bool, na_value=False)\n if not mask.all():\n left_index = left_index[mask]\n right_index = right_index[mask]\n\n return left_index, right_index\n\n\ndef _multiple_conditional_join_ne(\n df: pd.DataFrame,\n right: pd.DataFrame,\n conditions: list[tuple[pd.Series, pd.Series, str]],\n keep: str,\n use_numba: bool,\n) -> tuple:\n \"\"\"\n Get indices for multiple conditions,\n where all the operators are `!=`.\n\n Returns a tuple of (left_index, right_index)\n \"\"\"\n\n # currently, there is no optimization option here\n # not equal typically combines less than\n # and greater than, so a lot more rows are returned\n # than just less than or greater than\n\n # here we get indices for the first condition in conditions\n # then use those indices to get the final indices,\n # using _generate_indices\n first, *rest = conditions\n left_on, right_on, op = first\n\n # get indices from the first condition\n indices = _generic_func_cond_join(\n df[left_on],\n right[right_on],\n op,\n multiple_conditions=False,\n keep=\"all\",\n use_numba=use_numba,\n )\n if indices is None:\n return None\n\n rest = (\n (df[left_on], right[right_on], op) for left_on, right_on, op in rest\n )\n\n indices = _generate_indices(*indices, rest)\n\n if not indices:\n return None\n\n return _keep_output(keep, *indices)\n\n\ndef _multiple_conditional_join_eq(\n df: pd.DataFrame, right: pd.DataFrame, conditions: list, keep: str\n) -> tuple:\n \"\"\"\n Get indices for multiple conditions,\n if any of the conditions has an `==` operator.\n\n Returns a tuple of (df_index, right_index)\n \"\"\"\n eqs = [\n (left_on, right_on)\n for left_on, right_on, op in conditions\n if op == _JoinOperator.STRICTLY_EQUAL.value\n ]\n\n left_on, right_on = zip(*eqs)\n left_on = [*left_on]\n right_on = [*right_on]\n\n rest = (\n (df[left_on], right[right_on], op)\n for left_on, right_on, op in conditions\n if op != _JoinOperator.STRICTLY_EQUAL.value\n )\n\n left_index, right_index = _MergeOperation(\n df,\n right,\n left_on=left_on,\n right_on=right_on,\n sort=False,\n )._get_join_indexers()\n\n if not left_index.size:\n return None\n\n indices = _generate_indices(left_index, right_index, rest)\n\n if not indices:\n return None\n\n return _keep_output(keep, *indices)\n\n\ndef _multiple_conditional_join_le_lt(\n df: pd.DataFrame,\n right: pd.DataFrame,\n conditions: list,\n keep: str,\n use_numba: bool,\n) -> tuple:\n \"\"\"\n Get indices for multiple conditions,\n where `>/>=` or `</<=` is present,\n and there is no `==` operator.\n\n Returns a tuple of (df_index, right_index)\n \"\"\"\n if use_numba:\n from janitor.functions._numba import _numba_pair_le_lt\n\n pairs = [\n condition\n for condition in conditions\n if condition[-1] != _JoinOperator.NOT_EQUAL.value\n ]\n conditions = [\n condition\n for condition in conditions\n if condition[-1] == _JoinOperator.NOT_EQUAL.value\n ]\n if len(pairs) > 2:\n patch = pairs[2:]\n conditions.extend(patch)\n pairs = pairs[:2]\n if len(pairs) < 2:\n # combine with != condition\n # say we have ('start', 'ID', '<='), ('end', 'ID', '!=')\n # we convert conditions to :\n # ('start', 'ID', '<='), ('end', 'ID', '>'), ('end', 'ID', '<')\n # subsequently we run the numba pair fn on the pairs:\n # ('start', 'ID', '<=') & ('end', 'ID', '>')\n # ('start', 'ID', '<=') & ('end', 'ID', '<')\n # finally unionize the outcome of the pairs\n # this only works if there is no null in the != condition\n # thanks to Hypothesis tests for pointing this out\n left_on, right_on, op = conditions[0]\n # check for nulls in the patch\n # and follow this path, only if there are no nulls\n if df[left_on].notna().all() & right[right_on].notna().all():\n patch = (\n left_on,\n right_on,\n _JoinOperator.GREATER_THAN.value,\n ), (\n left_on,\n right_on,\n _JoinOperator.LESS_THAN.value,\n )\n pairs.extend(patch)\n first, middle, last = pairs\n pairs = [(first, middle), (first, last)]\n indices = [\n _numba_pair_le_lt(df, right, pair) for pair in pairs\n ]\n indices = [arr for arr in indices if arr is not None]\n if not indices:\n indices = None\n elif len(indices) == 1:\n indices = indices[0]\n else:\n indices = zip(*indices)\n indices = map(np.concatenate, indices)\n conditions = conditions[1:]\n else:\n left_on, right_on, op = pairs[0]\n indices = _generic_func_cond_join(\n df[left_on],\n right[right_on],\n op,\n multiple_conditions=False,\n keep=\"all\",\n use_numba=True,\n )\n else:\n indices = _numba_pair_le_lt(df, right, pairs)\n else:\n # there is an opportunity for optimization for range joins\n # which is usually `lower_value < value < upper_value`\n # or `lower_value < a` and `b < upper_value`\n # intervalindex is not used here, as there are scenarios\n # where there will be overlapping intervals;\n # intervalindex does not offer an efficient way to get\n # the indices for overlaps\n # also, intervalindex covers only the first option\n # i.e => `lower_value < value < upper_value`\n # it does not extend to range joins for different columns\n # i.e => `lower_value < a` and `b < upper_value`\n # the option used for range joins is a simple form\n # dependent on sorting and extensible to overlaps\n # as well as the second option:\n # i.e =>`lower_value < a` and `b < upper_value`\n # range joins are also the more common types of non-equi joins\n # the other joins do not have an optimisation opportunity\n # within this space, as far as I know,\n # so a blowup of all the rows is unavoidable.\n\n # The numba version offers optimisations\n # for all types of non-equi joins\n # and is generally much faster\n\n # first step is to get two conditions, if possible\n # where one has a less than operator\n # and the other has a greater than operator\n # get the indices from that\n # and then build the remaining indices,\n # using _generate_indices function\n # the aim of this for loop is to see if there is\n # the possiblity of a range join, and if there is,\n # then use the optimised path\n le_lt = None\n ge_gt = None\n # keep the first match for le_lt or ge_gt\n for condition in conditions:\n *_, op = condition\n if op in less_than_join_types:\n if le_lt:\n continue\n le_lt = condition\n elif op in greater_than_join_types:\n if ge_gt:\n continue\n ge_gt = condition\n if le_lt and ge_gt:\n break\n\n # optimised path\n if le_lt and ge_gt:\n conditions = [\n condition\n for condition in conditions\n if condition not in (ge_gt, le_lt)\n ]\n\n indices = _range_indices(df, right, ge_gt, le_lt)\n\n # no optimised path\n # blow up the rows and prune\n else:\n if le_lt:\n conditions = [\n condition for condition in conditions if condition != le_lt\n ]\n left_on, right_on, op = le_lt\n else:\n conditions = [\n condition for condition in conditions if condition != ge_gt\n ]\n left_on, right_on, op = ge_gt\n\n indices = _generic_func_cond_join(\n df[left_on],\n right[right_on],\n op,\n multiple_conditions=False,\n keep=\"all\",\n use_numba=False,\n )\n\n if not indices:\n return None\n\n if conditions:\n conditions = (\n (df[left_on], right[right_on], op)\n for left_on, right_on, op in conditions\n )\n\n indices = _generate_indices(*indices, conditions)\n if not indices:\n return None\n\n return _keep_output(keep, *indices)\n\n\ndef _range_indices(\n df: pd.DataFrame,\n right: pd.DataFrame,\n first: tuple,\n second: tuple,\n):\n \"\"\"\n Retrieve index positions for range/interval joins.\n\n Idea inspired by article:\n https://www.vertica.com/blog/what-is-a-range-join-and-why-is-it-so-fastba-p223413/\n\n Returns a tuple of (left_index, right_index)\n \"\"\"\n # summary of code for range join:\n # get the positions where start_left is >/>= start_right\n # then within the positions,\n # get the positions where end_left is </<= end_right\n # this should reduce the search space\n\n left_on, right_on, op = first\n left_c = df[left_on]\n right_c = right[right_on]\n left_on, right_on, _ = second\n # get rid of any nulls\n # this is helpful as we can convert extension arrays to numpy arrays safely\n # and simplify the search logic below\n any_nulls = pd.isna(df[left_on])\n if any_nulls.any():\n left_c = left_c[~any_nulls]\n any_nulls = pd.isna(right[right_on])\n if any_nulls.any():\n right_c = right_c[~any_nulls]\n\n strict = False\n if op == _JoinOperator.GREATER_THAN.value:\n strict = True\n\n outcome = _greater_than_indices(\n left_c,\n right_c,\n strict,\n multiple_conditions=True,\n keep=\"all\",\n )\n\n if outcome is None:\n return None\n\n left_index, right_index, search_indices = outcome\n left_on, right_on, op = second\n right_c = right.loc[right_index, right_on]\n left_c = df.loc[left_index, left_on]\n\n left_c = left_c._values\n right_c = right_c._values\n left_c, right_c = _convert_to_numpy_array(left_c, right_c)\n op = operator_map[op]\n pos = np.empty(left_c.size, dtype=np.intp)\n\n # better served in a compiled environment\n # where we can break early\n # parallelise the operation, as well as\n # avoid the restrictive fixed size approach of numpy\n # which isnt particularly helpful in a for loop\n for ind in range(left_c.size):\n out = op(left_c[ind], right_c)\n pos[ind] = np.argmax(out)\n\n # no point searching within (a, b)\n # if a == b\n # since range(a, b) yields none\n keep_rows = pos < search_indices\n\n if not keep_rows.any():\n return None\n\n if not keep_rows.all():\n left_index = left_index[keep_rows]\n pos = pos[keep_rows]\n search_indices = search_indices[keep_rows]\n\n repeater = search_indices - pos\n right_index = [\n right_index[start:end] for start, end in zip(pos, search_indices)\n ]\n\n right_index = np.concatenate(right_index)\n left_index = np.repeat(left_index, repeater)\n # here we search for actual positions\n # where left_c is </<= right_c\n # safe to index the arrays, since we are picking the positions\n # which are all in the original `df` and `right`\n # doing this allows some speed gains\n # while still ensuring correctness\n left_c = df[left_on]._values[left_index]\n right_c = right[right_on]._values[right_index]\n ext_arr = is_extension_array_dtype(left_c)\n\n mask = op(left_c, right_c)\n\n if ext_arr:\n mask = mask.to_numpy(dtype=bool, na_value=False)\n\n if not mask.all():\n left_index = left_index[mask]\n right_index = right_index[mask]\n\n return left_index, right_index\n\n\ndef _cond_join_select_columns(columns: Any, df: pd.DataFrame):\n \"\"\"\n Select columns in a DataFrame.\n Optionally rename the columns while selecting.\n Returns a Pandas DataFrame.\n \"\"\"\n\n df = df.select_columns(columns)\n\n if isinstance(columns, dict):\n df.columns = [columns.get(name, name) for name in df]\n\n return df\n\n\ndef _create_multiindex_column(df: pd.DataFrame, right: pd.DataFrame):\n \"\"\"\n Create a MultiIndex column for conditional_join.\n \"\"\"\n header = [np.array([\"left\"]).repeat(df.columns.size)]\n columns = [\n df.columns.get_level_values(n) for n in range(df.columns.nlevels)\n ]\n header.extend(columns)\n df.columns = pd.MultiIndex.from_arrays(header)\n header = [np.array([\"right\"]).repeat(right.columns.size)]\n columns = [\n right.columns.get_level_values(n) for n in range(right.columns.nlevels)\n ]\n header.extend(columns)\n right.columns = pd.MultiIndex.from_arrays(header)\n return df, right\n\n\ndef _create_frame(\n df: pd.DataFrame,\n right: pd.DataFrame,\n left_index: np.ndarray,\n right_index: np.ndarray,\n how: str,\n sort_by_appearance: bool,\n df_columns: Any,\n right_columns: Any,\n):\n \"\"\"\n Create final dataframe\n \"\"\"\n if df_columns:\n df = _cond_join_select_columns(df_columns, df)\n\n if right_columns:\n right = _cond_join_select_columns(right_columns, right)\n\n if set(df.columns).intersection(right.columns):\n df, right = _create_multiindex_column(df, right)\n\n if sort_by_appearance or (left_index.size == 0):\n if how in {\"inner\", \"left\"}:\n right = right.take(right_index)\n right.index = left_index\n else:\n df = df.take(left_index)\n df.index = right_index\n df = pd.merge(\n df,\n right,\n left_index=True,\n right_index=True,\n sort=False,\n copy=False,\n how=how,\n )\n df.index = range(len(df))\n return df\n\n def _inner(\n df: pd.DataFrame,\n right: pd.DataFrame,\n left_index: pd.DataFrame,\n right_index: pd.DataFrame,\n ) -> pd.DataFrame:\n \"\"\"Create DataFrame for inner join\"\"\"\n df = {key: value._values[left_index] for key, value in df.items()}\n right = {\n key: value._values[right_index] for key, value in right.items()\n }\n df.update(right)\n return pd.DataFrame(df, copy=False)\n\n if how == \"inner\":\n return _inner(df, right, left_index, right_index)\n\n if how == \"left\":\n df_ = np.bincount(left_index, minlength=df.index.size) == 0\n df_ = df_.nonzero()[0]\n if not df_.size:\n return _inner(df, right, left_index, right_index)\n df_ = df.take(df_)\n df = _inner(df, right, left_index, right_index)\n return pd.concat([df, df_], ignore_index=True)\n if how == \"right\":\n right_ = np.bincount(right_index, minlength=right.index.size) == 0\n right_ = right_.nonzero()[0]\n if not right_.size:\n return _inner(df, right, left_index, right_index)\n right_ = right.take(right_)\n right = _inner(df, right, left_index, right_index)\n return pd.concat([right, right_], ignore_index=True)\n", "path": "janitor/functions/conditional_join.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index c92fee966..16744781d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,7 +27,7 @@ - [ENH] The parameter `column_name` of `change_type` totally supports inputing multi-column now. #1163 @Zeroto521 - [ENH] Fix error when `sort_by_appearance=True` is combined with `dropna=True`. Issue #1168 @samukweku - [ENH] Add explicit default parameter to `case_when` function. Issue #1159 @samukweku - +- [BUG] pandas 1.5.x `_MergeOperation` doesn't have `copy` keyword anymore. Issue #1174 @Zeroto521 ## [v0.23.1] - 2022-05-03 diff --git a/janitor/functions/conditional_join.py b/janitor/functions/conditional_join.py index 417d7c902..2f8438166 100644 --- a/janitor/functions/conditional_join.py +++ b/janitor/functions/conditional_join.py @@ -902,7 +902,6 @@ def _multiple_conditional_join_eq( left_on=left_on, right_on=right_on, sort=False, - copy=False, )._get_join_indexers() if not left_index.size: diff --git a/tests/functions/test_case_when.py b/tests/functions/test_case_when.py index 846072be9..6c239a09f 100644 --- a/tests/functions/test_case_when.py +++ b/tests/functions/test_case_when.py @@ -175,7 +175,7 @@ def test_case_when_default_array(df): ) expected = np.where(df.numbers > 1, df.numbers + 10, default) expected = df.assign(bleh=expected) - assert_frame_equal(result, expected) + assert_frame_equal(result, expected, check_dtype=False) @given(df=categoricaldf_strategy())
kivy__kivy-4149
ModalView background size is not updated Since https://github.com/kivy/kivy/pull/4136 the ModalView background is not resized when the window size changes, run `kivy/uix/modalview.py`, then resize the window. ![capture](https://cloud.githubusercontent.com/assets/7513068/14303295/159f0434-fbb2-11e5-93b3-8de02ffd4e49.PNG)
[ { "content": "'''\nModalView\n=========\n\n.. versionadded:: 1.4.0\n\nThe :class:`ModalView` widget is used to create modal views. By default, the\nview will cover the whole \"parent\" window.\n\nRemember that the default size of a Widget is size_hint=(1, 1). If you don't\nwant your view to be fullscreen, either use size hints with values lower than\n1 (for instance size_hint=(.8, .8)) or deactivate the size_hint and use fixed\nsize attributes.\n\nExamples\n--------\n\nExample of a simple 400x400 Hello world view::\n\n view = ModalView(size_hint=(None, None), size=(400, 400))\n view.add_widget(Label(text='Hello world'))\n\nBy default, any click outside the view will dismiss it. If you don't\nwant that, you can set :attr:`ModalView.auto_dismiss` to False::\n\n view = ModalView(auto_dismiss=False)\n view.add_widget(Label(text='Hello world'))\n view.open()\n\nTo manually dismiss/close the view, use the :meth:`ModalView.dismiss` method of\nthe ModalView instance::\n\n view.dismiss()\n\nBoth :meth:`ModalView.open` and :meth:`ModalView.dismiss` are bindable. That\nmeans you can directly bind the function to an action, e.g. to a button's\non_press ::\n\n # create content and add it to the view\n content = Button(text='Close me!')\n view = ModalView(auto_dismiss=False)\n view.add_widget(content)\n\n # bind the on_press event of the button to the dismiss function\n content.bind(on_press=view.dismiss)\n\n # open the view\n view.open()\n\n\nModalView Events\n----------------\n\nThere are two events available: `on_open` which is raised when the view is\nopening, and `on_dismiss` which is raised when the view is closed.\nFor `on_dismiss`, you can prevent the view from closing by explictly returning\nTrue from your callback. ::\n\n def my_callback(instance):\n print('ModalView', instance, 'is being dismissed, but is prevented!')\n return True\n view = ModalView()\n view.add_widget(Label(text='Hello world'))\n view.bind(on_dismiss=my_callback)\n view.open()\n\n\n.. versionchanged:: 1.5.0\n The ModalView can be closed by hitting the escape key on the\n keyboard if the :attr:`ModalView.auto_dismiss` property is True (the\n default).\n\n'''\n\n__all__ = ('ModalView', )\n\nfrom kivy.logger import Logger\nfrom kivy.animation import Animation\nfrom kivy.uix.anchorlayout import AnchorLayout\nfrom kivy.properties import StringProperty, BooleanProperty, ObjectProperty, \\\n NumericProperty, ListProperty\n\n\nclass ModalView(AnchorLayout):\n '''ModalView class. See module documentation for more information.\n\n :Events:\n `on_open`:\n Fired when the ModalView is opened.\n `on_dismiss`:\n Fired when the ModalView is closed. If the callback returns True,\n the dismiss will be canceled.\n '''\n\n auto_dismiss = BooleanProperty(True)\n '''This property determines if the view is automatically\n dismissed when the user clicks outside it.\n\n :attr:`auto_dismiss` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n attach_to = ObjectProperty(None)\n '''If a widget is set on attach_to, the view will attach to the nearest\n parent window of the widget. If none is found, it will attach to the\n main/global Window.\n\n :attr:`attach_to` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to None.\n '''\n\n background_color = ListProperty([0, 0, 0, .7])\n '''Background color in the format (r, g, b, a).\n\n :attr:`background_color` is a :class:`~kivy.properties.ListProperty` and\n defaults to [0, 0, 0, .7].\n '''\n\n background = StringProperty(\n 'atlas://data/images/defaulttheme/modalview-background')\n '''Background image of the view used for the view background.\n\n :attr:`background` is a :class:`~kivy.properties.StringProperty` and\n defaults to 'atlas://data/images/defaulttheme/modalview-background'.\n '''\n\n border = ListProperty([16, 16, 16, 16])\n '''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`\n graphics instruction. Used for the :attr:`background_normal` and the\n :attr:`background_down` properties. Can be used when using custom\n backgrounds.\n\n It must be a list of four values: (top, right, bottom, left). Read the\n BorderImage instructions for more information about how to use it.\n\n :attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults to\n (16, 16, 16, 16).\n '''\n\n # Internals properties used for graphical representation.\n\n _anim_alpha = NumericProperty(0)\n\n _anim_duration = NumericProperty(.1)\n\n _window = ObjectProperty(None, allownone=True)\n\n __events__ = ('on_open', 'on_dismiss')\n\n def __init__(self, **kwargs):\n self._parent = None\n super(ModalView, self).__init__(**kwargs)\n\n def _search_window(self):\n # get window to attach to\n window = None\n if self.attach_to is not None:\n window = self.attach_to.get_parent_window()\n if not window:\n window = self.attach_to.get_root_window()\n if not window:\n from kivy.core.window import Window\n window = Window\n return window\n\n def open(self, *largs):\n '''Show the view window from the :attr:`attach_to` widget. If set, it\n will attach to the nearest window. If the widget is not attached to any\n window, the view will attach to the global\n :class:`~kivy.core.window.Window`.\n '''\n if self._window is not None:\n Logger.warning('ModalView: you can only open once.')\n return self\n # search window\n self._window = self._search_window()\n if not self._window:\n Logger.warning('ModalView: cannot open view, no window found.')\n return self\n self._window.add_widget(self)\n self._window.bind(\n on_resize=self._align_center,\n on_keyboard=self._handle_keyboard)\n self.center = self._window.center\n self.fbind('center', self._align_center)\n a = Animation(_anim_alpha=1., d=self._anim_duration)\n a.bind(on_complete=lambda *x: self.dispatch('on_open'))\n a.start(self)\n return self\n\n def dismiss(self, *largs, **kwargs):\n '''Close the view if it is open. If you really want to close the\n view, whatever the on_dismiss event returns, you can use the *force*\n argument:\n ::\n\n view = ModalView(...)\n view.dismiss(force=True)\n\n When the view is dismissed, it will be faded out before being\n removed from the parent. If you don't want animation, use::\n\n view.dismiss(animation=False)\n\n '''\n if self._window is None:\n return self\n if self.dispatch('on_dismiss') is True:\n if kwargs.get('force', False) is not True:\n return self\n if kwargs.get('animation', True):\n Animation(_anim_alpha=0., d=self._anim_duration).start(self)\n else:\n self._anim_alpha = 0\n self._real_remove_widget()\n return self\n\n def _align_center(self, *l):\n if self._window:\n self.center = self._window.center\n\n def on_touch_down(self, touch):\n if not self.collide_point(*touch.pos):\n if self.auto_dismiss:\n self.dismiss()\n return True\n super(ModalView, self).on_touch_down(touch)\n return True\n\n def on_touch_move(self, touch):\n super(ModalView, self).on_touch_move(touch)\n return True\n\n def on_touch_up(self, touch):\n super(ModalView, self).on_touch_up(touch)\n return True\n\n def on__anim_alpha(self, instance, value):\n if value == 0 and self._window is not None:\n self._real_remove_widget()\n\n def _real_remove_widget(self):\n if self._window is None:\n return\n self._window.remove_widget(self)\n self._window.unbind(\n on_resize=self._align_center,\n on_keyboard=self._handle_keyboard)\n self._window = None\n\n def on_open(self):\n pass\n\n def on_dismiss(self):\n pass\n\n def _handle_keyboard(self, window, key, *largs):\n if key == 27 and self.auto_dismiss:\n self.dismiss()\n return True\n\n\nif __name__ == '__main__':\n from kivy.base import runTouchApp\n from kivy.uix.button import Button\n from kivy.uix.label import Label\n from kivy.uix.gridlayout import GridLayout\n from kivy.core.window import Window\n\n # add view\n content = GridLayout(cols=1)\n content.add_widget(Label(text='This is a hello world'))\n view = ModalView(size_hint=(None, None), size=(256, 256),\n auto_dismiss=True)\n view.add_widget(content)\n\n def open_view(btn):\n view.open()\n\n layout = GridLayout(cols=3)\n for x in range(9):\n btn = Button(text='click me %s' % x)\n btn.bind(on_release=view.open)\n layout.add_widget(btn)\n Window.add_widget(layout)\n\n view.open()\n\n runTouchApp()\n", "path": "kivy/uix/modalview.py" } ]
[ { "content": "'''\nModalView\n=========\n\n.. versionadded:: 1.4.0\n\nThe :class:`ModalView` widget is used to create modal views. By default, the\nview will cover the whole \"parent\" window.\n\nRemember that the default size of a Widget is size_hint=(1, 1). If you don't\nwant your view to be fullscreen, either use size hints with values lower than\n1 (for instance size_hint=(.8, .8)) or deactivate the size_hint and use fixed\nsize attributes.\n\nExamples\n--------\n\nExample of a simple 400x400 Hello world view::\n\n view = ModalView(size_hint=(None, None), size=(400, 400))\n view.add_widget(Label(text='Hello world'))\n\nBy default, any click outside the view will dismiss it. If you don't\nwant that, you can set :attr:`ModalView.auto_dismiss` to False::\n\n view = ModalView(auto_dismiss=False)\n view.add_widget(Label(text='Hello world'))\n view.open()\n\nTo manually dismiss/close the view, use the :meth:`ModalView.dismiss` method of\nthe ModalView instance::\n\n view.dismiss()\n\nBoth :meth:`ModalView.open` and :meth:`ModalView.dismiss` are bindable. That\nmeans you can directly bind the function to an action, e.g. to a button's\non_press ::\n\n # create content and add it to the view\n content = Button(text='Close me!')\n view = ModalView(auto_dismiss=False)\n view.add_widget(content)\n\n # bind the on_press event of the button to the dismiss function\n content.bind(on_press=view.dismiss)\n\n # open the view\n view.open()\n\n\nModalView Events\n----------------\n\nThere are two events available: `on_open` which is raised when the view is\nopening, and `on_dismiss` which is raised when the view is closed.\nFor `on_dismiss`, you can prevent the view from closing by explictly returning\nTrue from your callback. ::\n\n def my_callback(instance):\n print('ModalView', instance, 'is being dismissed, but is prevented!')\n return True\n view = ModalView()\n view.add_widget(Label(text='Hello world'))\n view.bind(on_dismiss=my_callback)\n view.open()\n\n\n.. versionchanged:: 1.5.0\n The ModalView can be closed by hitting the escape key on the\n keyboard if the :attr:`ModalView.auto_dismiss` property is True (the\n default).\n\n'''\n\n__all__ = ('ModalView', )\n\nfrom kivy.logger import Logger\nfrom kivy.animation import Animation\nfrom kivy.uix.anchorlayout import AnchorLayout\nfrom kivy.properties import StringProperty, BooleanProperty, ObjectProperty, \\\n NumericProperty, ListProperty\n\n\nclass ModalView(AnchorLayout):\n '''ModalView class. See module documentation for more information.\n\n :Events:\n `on_open`:\n Fired when the ModalView is opened.\n `on_dismiss`:\n Fired when the ModalView is closed. If the callback returns True,\n the dismiss will be canceled.\n '''\n\n auto_dismiss = BooleanProperty(True)\n '''This property determines if the view is automatically\n dismissed when the user clicks outside it.\n\n :attr:`auto_dismiss` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n attach_to = ObjectProperty(None)\n '''If a widget is set on attach_to, the view will attach to the nearest\n parent window of the widget. If none is found, it will attach to the\n main/global Window.\n\n :attr:`attach_to` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to None.\n '''\n\n background_color = ListProperty([0, 0, 0, .7])\n '''Background color in the format (r, g, b, a).\n\n :attr:`background_color` is a :class:`~kivy.properties.ListProperty` and\n defaults to [0, 0, 0, .7].\n '''\n\n background = StringProperty(\n 'atlas://data/images/defaulttheme/modalview-background')\n '''Background image of the view used for the view background.\n\n :attr:`background` is a :class:`~kivy.properties.StringProperty` and\n defaults to 'atlas://data/images/defaulttheme/modalview-background'.\n '''\n\n border = ListProperty([16, 16, 16, 16])\n '''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`\n graphics instruction. Used for the :attr:`background_normal` and the\n :attr:`background_down` properties. Can be used when using custom\n backgrounds.\n\n It must be a list of four values: (top, right, bottom, left). Read the\n BorderImage instructions for more information about how to use it.\n\n :attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults to\n (16, 16, 16, 16).\n '''\n\n # Internals properties used for graphical representation.\n\n _anim_alpha = NumericProperty(0)\n\n _anim_duration = NumericProperty(.1)\n\n _window = ObjectProperty(None, allownone=True, rebind=True)\n\n __events__ = ('on_open', 'on_dismiss')\n\n def __init__(self, **kwargs):\n self._parent = None\n super(ModalView, self).__init__(**kwargs)\n\n def _search_window(self):\n # get window to attach to\n window = None\n if self.attach_to is not None:\n window = self.attach_to.get_parent_window()\n if not window:\n window = self.attach_to.get_root_window()\n if not window:\n from kivy.core.window import Window\n window = Window\n return window\n\n def open(self, *largs):\n '''Show the view window from the :attr:`attach_to` widget. If set, it\n will attach to the nearest window. If the widget is not attached to any\n window, the view will attach to the global\n :class:`~kivy.core.window.Window`.\n '''\n if self._window is not None:\n Logger.warning('ModalView: you can only open once.')\n return self\n # search window\n self._window = self._search_window()\n if not self._window:\n Logger.warning('ModalView: cannot open view, no window found.')\n return self\n self._window.add_widget(self)\n self._window.bind(\n on_resize=self._align_center,\n on_keyboard=self._handle_keyboard)\n self.center = self._window.center\n self.fbind('center', self._align_center)\n a = Animation(_anim_alpha=1., d=self._anim_duration)\n a.bind(on_complete=lambda *x: self.dispatch('on_open'))\n a.start(self)\n return self\n\n def dismiss(self, *largs, **kwargs):\n '''Close the view if it is open. If you really want to close the\n view, whatever the on_dismiss event returns, you can use the *force*\n argument:\n ::\n\n view = ModalView(...)\n view.dismiss(force=True)\n\n When the view is dismissed, it will be faded out before being\n removed from the parent. If you don't want animation, use::\n\n view.dismiss(animation=False)\n\n '''\n if self._window is None:\n return self\n if self.dispatch('on_dismiss') is True:\n if kwargs.get('force', False) is not True:\n return self\n if kwargs.get('animation', True):\n Animation(_anim_alpha=0., d=self._anim_duration).start(self)\n else:\n self._anim_alpha = 0\n self._real_remove_widget()\n return self\n\n def _align_center(self, *l):\n if self._window:\n self.center = self._window.center\n\n def on_touch_down(self, touch):\n if not self.collide_point(*touch.pos):\n if self.auto_dismiss:\n self.dismiss()\n return True\n super(ModalView, self).on_touch_down(touch)\n return True\n\n def on_touch_move(self, touch):\n super(ModalView, self).on_touch_move(touch)\n return True\n\n def on_touch_up(self, touch):\n super(ModalView, self).on_touch_up(touch)\n return True\n\n def on__anim_alpha(self, instance, value):\n if value == 0 and self._window is not None:\n self._real_remove_widget()\n\n def _real_remove_widget(self):\n if self._window is None:\n return\n self._window.remove_widget(self)\n self._window.unbind(\n on_resize=self._align_center,\n on_keyboard=self._handle_keyboard)\n self._window = None\n\n def on_open(self):\n pass\n\n def on_dismiss(self):\n pass\n\n def _handle_keyboard(self, window, key, *largs):\n if key == 27 and self.auto_dismiss:\n self.dismiss()\n return True\n\n\nif __name__ == '__main__':\n from kivy.base import runTouchApp\n from kivy.uix.button import Button\n from kivy.uix.label import Label\n from kivy.uix.gridlayout import GridLayout\n from kivy.core.window import Window\n\n # add view\n content = GridLayout(cols=1)\n content.add_widget(Label(text='This is a hello world'))\n view = ModalView(size_hint=(None, None), size=(256, 256),\n auto_dismiss=True)\n view.add_widget(content)\n\n def open_view(btn):\n view.open()\n\n layout = GridLayout(cols=3)\n for x in range(9):\n btn = Button(text='click me %s' % x)\n btn.bind(on_release=view.open)\n layout.add_widget(btn)\n Window.add_widget(layout)\n\n view.open()\n\n runTouchApp()\n", "path": "kivy/uix/modalview.py" } ]
diff --git a/kivy/uix/modalview.py b/kivy/uix/modalview.py index 504f8deb42..8c7e20ce99 100644 --- a/kivy/uix/modalview.py +++ b/kivy/uix/modalview.py @@ -143,7 +143,7 @@ class ModalView(AnchorLayout): _anim_duration = NumericProperty(.1) - _window = ObjectProperty(None, allownone=True) + _window = ObjectProperty(None, allownone=True, rebind=True) __events__ = ('on_open', 'on_dismiss')
deis__deis-4163
bug(client): deis apps only shows 100 apps The client is not using the pagination to return all the apps
[ { "content": "\"\"\"\nDjango settings for the Deis project.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport os.path\nimport random\nimport string\nimport sys\nimport tempfile\nimport ldap\n\nfrom django_auth_ldap.config import LDAPSearch, GroupOfNamesType\n\n\nPROJECT_ROOT = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))\n\nDEBUG = False\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n # ('Your Name', '[email protected]'),\n)\n\nMANAGERS = ADMINS\n\nCONN_MAX_AGE = 60 * 3\n\n# SECURITY: change this to allowed fqdn's to prevent host poisioning attacks\n# https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts\nALLOWED_HOSTS = ['*']\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'UTC'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/var/www/example.com/media/\"\nMEDIA_ROOT = ''\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://example.com/media/\", \"http://media.example.com/\"\nMEDIA_URL = ''\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/var/www/example.com/static/\"\nSTATIC_ROOT = os.path.abspath(os.path.join(__file__, '..', '..', 'static'))\n\n# URL prefix for static files.\n# Example: \"http://example.com/static/\", \"http://static.example.com/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = None # @UnusedVariable\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n \"django.contrib.auth.context_processors.auth\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.i18n\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.request\",\n \"django.core.context_processors.static\",\n \"django.core.context_processors.tz\",\n \"django.contrib.messages.context_processors.messages\",\n \"deis.context_processors.site\",\n)\n\nMIDDLEWARE_CLASSES = (\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'api.middleware.APIVersionMiddleware',\n 'deis.middleware.PlatformVersionMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'deis.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'deis.wsgi.application'\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\"\n # or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n PROJECT_ROOT + '/web/templates',\n)\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.humanize',\n 'django.contrib.messages',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n # Third-party apps\n 'django_auth_ldap',\n 'guardian',\n 'json_field',\n 'gunicorn',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'south',\n 'corsheaders',\n # Deis apps\n 'api',\n 'web',\n)\n\nAUTHENTICATION_BACKENDS = (\n \"django_auth_ldap.backend.LDAPBackend\",\n \"django.contrib.auth.backends.ModelBackend\",\n \"guardian.backends.ObjectPermissionBackend\",\n)\n\nANONYMOUS_USER_ID = -1\nLOGIN_URL = '/v1/auth/login/'\nLOGIN_REDIRECT_URL = '/'\n\nSOUTH_TESTS_MIGRATE = False\n\nCORS_ORIGIN_ALLOW_ALL = True\n\nCORS_ALLOW_HEADERS = (\n 'content-type',\n 'accept',\n 'origin',\n 'Authorization',\n 'Host',\n)\n\nCORS_EXPOSE_HEADERS = (\n 'X_DEIS_API_VERSION', # DEPRECATED\n 'X_DEIS_PLATFORM_VERSION', # DEPRECATED\n 'X-Deis-Release', # DEPRECATED\n 'DEIS_API_VERSION',\n 'DEIS_PLATFORM_VERSION',\n 'Deis-Release',\n)\n\nREST_FRAMEWORK = {\n 'DEFAULT_MODEL_SERIALIZER_CLASS':\n 'rest_framework.serializers.ModelSerializer',\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.IsAuthenticated',\n ),\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.TokenAuthentication',\n ),\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n ),\n 'PAGINATE_BY': 100,\n 'TEST_REQUEST_DEFAULT_FORMAT': 'json',\n}\n\n# URLs that end with slashes are ugly\nAPPEND_SLASH = False\n\n# Determine where to send syslog messages\nif os.path.exists('/dev/log'): # Linux rsyslog\n SYSLOG_ADDRESS = '/dev/log'\nelif os.path.exists('/var/log/syslog'): # Mac OS X syslog\n SYSLOG_ADDRESS = '/var/log/syslog'\nelse: # default SysLogHandler address\n SYSLOG_ADDRESS = ('localhost', 514)\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'null': {\n 'level': 'DEBUG',\n 'class': 'logging.NullHandler',\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'rsyslog': {\n 'class': 'logging.handlers.SysLogHandler',\n 'address': SYSLOG_ADDRESS,\n 'facility': 'local0',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['null'],\n 'level': 'INFO',\n 'propagate': True,\n },\n 'django.request': {\n 'handlers': ['console', 'mail_admins'],\n 'level': 'WARNING',\n 'propagate': True,\n },\n 'api': {\n 'handlers': ['console', 'mail_admins', 'rsyslog'],\n 'level': 'INFO',\n 'propagate': True,\n },\n }\n}\nTEST_RUNNER = 'api.tests.SilentDjangoTestSuiteRunner'\n\n# etcd settings\nETCD_HOST, ETCD_PORT = os.environ.get('ETCD', '127.0.0.1:4001').split(',')[0].split(':')\n\n# default deis settings\nDEIS_LOG_DIR = os.path.abspath(os.path.join(__file__, '..', '..', 'logs'))\nLOG_LINES = 1000\nTEMPDIR = tempfile.mkdtemp(prefix='deis')\nDEIS_DOMAIN = 'deisapp.local'\n\n# standard datetime format used for logging, model timestamps, etc.\nDEIS_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S%Z'\n\n# names which apps cannot reserve for routing\nDEIS_RESERVED_NAMES = ['deis']\n\n# default scheduler settings\nSCHEDULER_MODULE = 'scheduler.mock'\nSCHEDULER_TARGET = '' # path to scheduler endpoint (e.g. /var/run/fleet.sock)\nSCHEDULER_AUTH = ''\nSCHEDULER_OPTIONS = {}\n\n# security keys and auth tokens\nSSH_PRIVATE_KEY = '' # used for SSH connections to facilitate \"deis run\"\nSECRET_KEY = os.environ.get('DEIS_SECRET_KEY', 'CHANGEME_sapm$s%upvsw5l_zuy_&29rkywd^78ff(qi')\nBUILDER_KEY = os.environ.get('DEIS_BUILDER_KEY', 'CHANGEME_sapm$s%upvsw5l_zuy_&29rkywd^78ff(qi')\n\n# registry settings\nREGISTRY_MODULE = 'registry.mock'\nREGISTRY_URL = 'http://localhost:5000'\nREGISTRY_HOST = 'localhost'\nREGISTRY_PORT = 5000\n\n# check if we can register users with `deis register`\nREGISTRATION_ENABLED = True\n\n# check if we should enable the web UI module\nWEB_ENABLED = False\n\n# default to sqlite3, but allow postgresql config through envvars\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.' + os.environ.get('DATABASE_ENGINE', 'postgresql_psycopg2'),\n 'NAME': os.environ.get('DATABASE_NAME', 'deis'),\n # randomize test database name so we can run multiple unit tests simultaneously\n 'TEST_NAME': \"unittest-{}\".format(''.join(\n random.choice(string.ascii_letters + string.digits) for _ in range(8)))\n }\n}\n\nAPP_URL_REGEX = '[a-z0-9-]+'\n\n# Honor HTTPS from a trusted proxy\n# see https://docs.djangoproject.com/en/1.6/ref/settings/#secure-proxy-ssl-header\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n# Unit Hostname handling.\n# Supports:\n# default - Docker generated hostname\n# application - Hostname based on application unit name (i.e. my-application.v2.web.1)\n# server - Hostname based on CoreOS server hostname\nUNIT_HOSTNAME = 'default'\n\n# LDAP DEFAULT SETTINGS (Overrided by confd later)\nLDAP_ENDPOINT = \"\"\nBIND_DN = \"\"\nBIND_PASSWORD = \"\"\nUSER_BASEDN = \"\"\nUSER_FILTER = \"\"\nGROUP_BASEDN = \"\"\nGROUP_FILTER = \"\"\nGROUP_TYPE = \"\"\n\n# Create a file named \"local_settings.py\" to contain sensitive settings data\n# such as database configuration, admin email, or passwords and keys. It\n# should also be used for any settings which differ between development\n# and production.\n# The local_settings.py file should *not* be checked in to version control.\ntry:\n from .local_settings import * # noqa\nexcept ImportError:\n pass\n\n# have confd_settings within container execution override all others\n# including local_settings (which may end up in the container)\nif os.path.exists('/templates/confd_settings.py'):\n sys.path.append('/templates')\n from confd_settings import * # noqa\n\n# LDAP Backend Configuration\n# Should be always after the confd_settings import.\nLDAP_USER_SEARCH = LDAPSearch(\n base_dn=USER_BASEDN,\n scope=ldap.SCOPE_SUBTREE,\n filterstr=\"(%s=%%(user)s)\" % USER_FILTER\n)\nLDAP_GROUP_SEARCH = LDAPSearch(\n base_dn=GROUP_BASEDN,\n scope=ldap.SCOPE_SUBTREE,\n filterstr=\"(%s=%s)\" % (GROUP_FILTER, GROUP_TYPE)\n)\nAUTH_LDAP_SERVER_URI = LDAP_ENDPOINT\nAUTH_LDAP_BIND_DN = BIND_DN\nAUTH_LDAP_BIND_PASSWORD = BIND_PASSWORD\nAUTH_LDAP_USER_SEARCH = LDAP_USER_SEARCH\nAUTH_LDAP_GROUP_SEARCH = LDAP_GROUP_SEARCH\nAUTH_LDAP_GROUP_TYPE = GroupOfNamesType()\nAUTH_LDAP_USER_ATTR_MAP = {\n \"first_name\": \"givenName\",\n \"last_name\": \"sn\",\n \"email\": \"mail\",\n \"username\": USER_FILTER,\n}\nAUTH_LDAP_GLOBAL_OPTIONS = {\n ldap.OPT_X_TLS_REQUIRE_CERT: False,\n ldap.OPT_REFERRALS: False\n}\nAUTH_LDAP_ALWAYS_UPDATE_USER = True\nAUTH_LDAP_MIRROR_GROUPS = True\nAUTH_LDAP_FIND_GROUP_PERMS = True\nAUTH_LDAP_CACHE_GROUPS = False\n", "path": "controller/deis/settings.py" } ]
[ { "content": "\"\"\"\nDjango settings for the Deis project.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport os.path\nimport random\nimport string\nimport sys\nimport tempfile\nimport ldap\n\nfrom django_auth_ldap.config import LDAPSearch, GroupOfNamesType\n\n\nPROJECT_ROOT = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))\n\nDEBUG = False\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n # ('Your Name', '[email protected]'),\n)\n\nMANAGERS = ADMINS\n\nCONN_MAX_AGE = 60 * 3\n\n# SECURITY: change this to allowed fqdn's to prevent host poisioning attacks\n# https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts\nALLOWED_HOSTS = ['*']\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'UTC'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/var/www/example.com/media/\"\nMEDIA_ROOT = ''\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://example.com/media/\", \"http://media.example.com/\"\nMEDIA_URL = ''\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/var/www/example.com/static/\"\nSTATIC_ROOT = os.path.abspath(os.path.join(__file__, '..', '..', 'static'))\n\n# URL prefix for static files.\n# Example: \"http://example.com/static/\", \"http://static.example.com/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = None # @UnusedVariable\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n \"django.contrib.auth.context_processors.auth\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.i18n\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.request\",\n \"django.core.context_processors.static\",\n \"django.core.context_processors.tz\",\n \"django.contrib.messages.context_processors.messages\",\n \"deis.context_processors.site\",\n)\n\nMIDDLEWARE_CLASSES = (\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'api.middleware.APIVersionMiddleware',\n 'deis.middleware.PlatformVersionMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'deis.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'deis.wsgi.application'\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\"\n # or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n PROJECT_ROOT + '/web/templates',\n)\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.humanize',\n 'django.contrib.messages',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n # Third-party apps\n 'django_auth_ldap',\n 'guardian',\n 'json_field',\n 'gunicorn',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'south',\n 'corsheaders',\n # Deis apps\n 'api',\n 'web',\n)\n\nAUTHENTICATION_BACKENDS = (\n \"django_auth_ldap.backend.LDAPBackend\",\n \"django.contrib.auth.backends.ModelBackend\",\n \"guardian.backends.ObjectPermissionBackend\",\n)\n\nANONYMOUS_USER_ID = -1\nLOGIN_URL = '/v1/auth/login/'\nLOGIN_REDIRECT_URL = '/'\n\nSOUTH_TESTS_MIGRATE = False\n\nCORS_ORIGIN_ALLOW_ALL = True\n\nCORS_ALLOW_HEADERS = (\n 'content-type',\n 'accept',\n 'origin',\n 'Authorization',\n 'Host',\n)\n\nCORS_EXPOSE_HEADERS = (\n 'X_DEIS_API_VERSION', # DEPRECATED\n 'X_DEIS_PLATFORM_VERSION', # DEPRECATED\n 'X-Deis-Release', # DEPRECATED\n 'DEIS_API_VERSION',\n 'DEIS_PLATFORM_VERSION',\n 'Deis-Release',\n)\n\nREST_FRAMEWORK = {\n 'DEFAULT_MODEL_SERIALIZER_CLASS':\n 'rest_framework.serializers.ModelSerializer',\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.IsAuthenticated',\n ),\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.TokenAuthentication',\n ),\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n ),\n 'PAGINATE_BY': 100,\n 'PAGINATE_BY_PARAM': 'page_size',\n 'TEST_REQUEST_DEFAULT_FORMAT': 'json',\n}\n\n# URLs that end with slashes are ugly\nAPPEND_SLASH = False\n\n# Determine where to send syslog messages\nif os.path.exists('/dev/log'): # Linux rsyslog\n SYSLOG_ADDRESS = '/dev/log'\nelif os.path.exists('/var/log/syslog'): # Mac OS X syslog\n SYSLOG_ADDRESS = '/var/log/syslog'\nelse: # default SysLogHandler address\n SYSLOG_ADDRESS = ('localhost', 514)\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'null': {\n 'level': 'DEBUG',\n 'class': 'logging.NullHandler',\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'rsyslog': {\n 'class': 'logging.handlers.SysLogHandler',\n 'address': SYSLOG_ADDRESS,\n 'facility': 'local0',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['null'],\n 'level': 'INFO',\n 'propagate': True,\n },\n 'django.request': {\n 'handlers': ['console', 'mail_admins'],\n 'level': 'WARNING',\n 'propagate': True,\n },\n 'api': {\n 'handlers': ['console', 'mail_admins', 'rsyslog'],\n 'level': 'INFO',\n 'propagate': True,\n },\n }\n}\nTEST_RUNNER = 'api.tests.SilentDjangoTestSuiteRunner'\n\n# etcd settings\nETCD_HOST, ETCD_PORT = os.environ.get('ETCD', '127.0.0.1:4001').split(',')[0].split(':')\n\n# default deis settings\nDEIS_LOG_DIR = os.path.abspath(os.path.join(__file__, '..', '..', 'logs'))\nLOG_LINES = 1000\nTEMPDIR = tempfile.mkdtemp(prefix='deis')\nDEIS_DOMAIN = 'deisapp.local'\n\n# standard datetime format used for logging, model timestamps, etc.\nDEIS_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S%Z'\n\n# names which apps cannot reserve for routing\nDEIS_RESERVED_NAMES = ['deis']\n\n# default scheduler settings\nSCHEDULER_MODULE = 'scheduler.mock'\nSCHEDULER_TARGET = '' # path to scheduler endpoint (e.g. /var/run/fleet.sock)\nSCHEDULER_AUTH = ''\nSCHEDULER_OPTIONS = {}\n\n# security keys and auth tokens\nSSH_PRIVATE_KEY = '' # used for SSH connections to facilitate \"deis run\"\nSECRET_KEY = os.environ.get('DEIS_SECRET_KEY', 'CHANGEME_sapm$s%upvsw5l_zuy_&29rkywd^78ff(qi')\nBUILDER_KEY = os.environ.get('DEIS_BUILDER_KEY', 'CHANGEME_sapm$s%upvsw5l_zuy_&29rkywd^78ff(qi')\n\n# registry settings\nREGISTRY_MODULE = 'registry.mock'\nREGISTRY_URL = 'http://localhost:5000'\nREGISTRY_HOST = 'localhost'\nREGISTRY_PORT = 5000\n\n# check if we can register users with `deis register`\nREGISTRATION_ENABLED = True\n\n# check if we should enable the web UI module\nWEB_ENABLED = False\n\n# default to sqlite3, but allow postgresql config through envvars\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.' + os.environ.get('DATABASE_ENGINE', 'postgresql_psycopg2'),\n 'NAME': os.environ.get('DATABASE_NAME', 'deis'),\n # randomize test database name so we can run multiple unit tests simultaneously\n 'TEST_NAME': \"unittest-{}\".format(''.join(\n random.choice(string.ascii_letters + string.digits) for _ in range(8)))\n }\n}\n\nAPP_URL_REGEX = '[a-z0-9-]+'\n\n# Honor HTTPS from a trusted proxy\n# see https://docs.djangoproject.com/en/1.6/ref/settings/#secure-proxy-ssl-header\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n# Unit Hostname handling.\n# Supports:\n# default - Docker generated hostname\n# application - Hostname based on application unit name (i.e. my-application.v2.web.1)\n# server - Hostname based on CoreOS server hostname\nUNIT_HOSTNAME = 'default'\n\n# LDAP DEFAULT SETTINGS (Overrided by confd later)\nLDAP_ENDPOINT = \"\"\nBIND_DN = \"\"\nBIND_PASSWORD = \"\"\nUSER_BASEDN = \"\"\nUSER_FILTER = \"\"\nGROUP_BASEDN = \"\"\nGROUP_FILTER = \"\"\nGROUP_TYPE = \"\"\n\n# Create a file named \"local_settings.py\" to contain sensitive settings data\n# such as database configuration, admin email, or passwords and keys. It\n# should also be used for any settings which differ between development\n# and production.\n# The local_settings.py file should *not* be checked in to version control.\ntry:\n from .local_settings import * # noqa\nexcept ImportError:\n pass\n\n# have confd_settings within container execution override all others\n# including local_settings (which may end up in the container)\nif os.path.exists('/templates/confd_settings.py'):\n sys.path.append('/templates')\n from confd_settings import * # noqa\n\n# LDAP Backend Configuration\n# Should be always after the confd_settings import.\nLDAP_USER_SEARCH = LDAPSearch(\n base_dn=USER_BASEDN,\n scope=ldap.SCOPE_SUBTREE,\n filterstr=\"(%s=%%(user)s)\" % USER_FILTER\n)\nLDAP_GROUP_SEARCH = LDAPSearch(\n base_dn=GROUP_BASEDN,\n scope=ldap.SCOPE_SUBTREE,\n filterstr=\"(%s=%s)\" % (GROUP_FILTER, GROUP_TYPE)\n)\nAUTH_LDAP_SERVER_URI = LDAP_ENDPOINT\nAUTH_LDAP_BIND_DN = BIND_DN\nAUTH_LDAP_BIND_PASSWORD = BIND_PASSWORD\nAUTH_LDAP_USER_SEARCH = LDAP_USER_SEARCH\nAUTH_LDAP_GROUP_SEARCH = LDAP_GROUP_SEARCH\nAUTH_LDAP_GROUP_TYPE = GroupOfNamesType()\nAUTH_LDAP_USER_ATTR_MAP = {\n \"first_name\": \"givenName\",\n \"last_name\": \"sn\",\n \"email\": \"mail\",\n \"username\": USER_FILTER,\n}\nAUTH_LDAP_GLOBAL_OPTIONS = {\n ldap.OPT_X_TLS_REQUIRE_CERT: False,\n ldap.OPT_REFERRALS: False\n}\nAUTH_LDAP_ALWAYS_UPDATE_USER = True\nAUTH_LDAP_MIRROR_GROUPS = True\nAUTH_LDAP_FIND_GROUP_PERMS = True\nAUTH_LDAP_CACHE_GROUPS = False\n", "path": "controller/deis/settings.py" } ]
diff --git a/client-go/cmd/apps.go b/client-go/cmd/apps.go index c05aca0829..f6ecb687f5 100644 --- a/client-go/cmd/apps.go +++ b/client-go/cmd/apps.go @@ -55,20 +55,24 @@ func AppCreate(id string, buildpack string, remote string, noRemote bool) error } // AppsList lists apps on the Deis controller. -func AppsList() error { +func AppsList(results int) error { c, err := client.New() if err != nil { return err } - apps, err := apps.List(c) + if results == defaultLimit { + results = c.ResponseLimit + } + + apps, count, err := apps.List(c, results) if err != nil { return err } - fmt.Println("=== Apps") + fmt.Printf("=== Apps%s", limitCount(len(apps), count)) for _, app := range apps { fmt.Println(app.ID) diff --git a/client-go/cmd/builds.go b/client-go/cmd/builds.go index d1f1006fcf..2f6a93881d 100644 --- a/client-go/cmd/builds.go +++ b/client-go/cmd/builds.go @@ -9,20 +9,24 @@ import ( ) // BuildsList lists an app's builds. -func BuildsList(appID string) error { +func BuildsList(appID string, results int) error { c, appID, err := load(appID) if err != nil { return err } - builds, err := builds.List(c, appID) + if results == defaultLimit { + results = c.ResponseLimit + } + + builds, count, err := builds.List(c, appID, results) if err != nil { return err } - fmt.Printf("=== %s Builds\n", appID) + fmt.Printf("=== %s Builds%s", appID, limitCount(len(builds), count)) for _, build := range builds { fmt.Println(build.UUID, build.Created) diff --git a/client-go/cmd/certs.go b/client-go/cmd/certs.go index 5e787c1564..9f63705f2b 100644 --- a/client-go/cmd/certs.go +++ b/client-go/cmd/certs.go @@ -12,14 +12,18 @@ import ( ) // CertsList lists certs registered with the controller. -func CertsList() error { +func CertsList(results int) error { c, err := client.New() if err != nil { return err } - certList, err := certs.List(c) + if results == defaultLimit { + results = c.ResponseLimit + } + + certList, _, err := certs.List(c, results) if err != nil { return err diff --git a/client-go/cmd/domains.go b/client-go/cmd/domains.go index 13f3b0ee95..13e95a5965 100644 --- a/client-go/cmd/domains.go +++ b/client-go/cmd/domains.go @@ -7,20 +7,24 @@ import ( ) // DomainsList lists domains registered with an app. -func DomainsList(appID string) error { +func DomainsList(appID string, results int) error { c, appID, err := load(appID) if err != nil { return err } - domains, err := domains.List(c, appID) + if results == defaultLimit { + results = c.ResponseLimit + } + + domains, count, err := domains.List(c, appID, results) if err != nil { return err } - fmt.Printf("=== %s Domains\n", appID) + fmt.Printf("=== %s Domains%s", appID, limitCount(len(domains), count)) for _, domain := range domains { fmt.Println(domain.Domain) diff --git a/client-go/cmd/keys.go b/client-go/cmd/keys.go index 3b5d67b44c..b1855a719d 100644 --- a/client-go/cmd/keys.go +++ b/client-go/cmd/keys.go @@ -14,20 +14,24 @@ import ( ) // KeysList lists a user's keys. -func KeysList() error { +func KeysList(results int) error { c, err := client.New() if err != nil { return err } - keys, err := keys.List(c) + if results == defaultLimit { + results = c.ResponseLimit + } + + keys, count, err := keys.List(c, results) if err != nil { return err } - fmt.Printf("=== %s Keys\n", c.Username) + fmt.Printf("=== %s Keys%s", c.Username, limitCount(len(keys), count)) for _, key := range keys { fmt.Printf("%s %s...%s\n", key.ID, key.Public[:16], key.Public[len(key.Public)-10:]) diff --git a/client-go/cmd/perms.go b/client-go/cmd/perms.go index 7930bf0c32..a8fbdd9e45 100644 --- a/client-go/cmd/perms.go +++ b/client-go/cmd/perms.go @@ -9,7 +9,7 @@ import ( ) // PermsList prints which users have permissions. -func PermsList(appID string, admin bool) error { +func PermsList(appID string, admin bool, results int) error { c, appID, err := permsLoad(appID, admin) if err != nil { @@ -17,9 +17,13 @@ func PermsList(appID string, admin bool) error { } var users []string + var count int if admin { - users, err = perms.ListAdmins(c) + if results == defaultLimit { + results = c.ResponseLimit + } + users, count, err = perms.ListAdmins(c, results) } else { users, err = perms.List(c, appID) } @@ -29,7 +33,7 @@ func PermsList(appID string, admin bool) error { } if admin { - fmt.Println("=== Administrators") + fmt.Printf("=== Administrators%s", limitCount(len(users), count)) } else { fmt.Printf("=== %s's Users\n", appID) } diff --git a/client-go/cmd/ps.go b/client-go/cmd/ps.go index f950f0f4b8..4bd6ab77ca 100644 --- a/client-go/cmd/ps.go +++ b/client-go/cmd/ps.go @@ -12,20 +12,24 @@ import ( ) // PsList lists an app's processes. -func PsList(appID string) error { +func PsList(appID string, results int) error { c, appID, err := load(appID) if err != nil { return err } - processes, err := ps.List(c, appID) + if results == defaultLimit { + results = c.ResponseLimit + } + + processes, count, err := ps.List(c, appID, results) if err != nil { return err } - printProcesses(appID, processes) + printProcesses(appID, processes, count) return nil } @@ -69,13 +73,13 @@ func PsScale(appID string, targets []string) error { fmt.Printf("done in %ds\n", int(time.Since(startTime).Seconds())) - processes, err := ps.List(c, appID) + processes, count, err := ps.List(c, appID, c.ResponseLimit) if err != nil { return err } - printProcesses(appID, processes) + printProcesses(appID, processes, count) return nil } @@ -119,20 +123,20 @@ func PsRestart(appID, target string) error { fmt.Printf("done in %ds\n", int(time.Since(startTime).Seconds())) - processes, err := ps.List(c, appID) + processes, count, err := ps.List(c, appID, c.ResponseLimit) if err != nil { return err } - printProcesses(appID, processes) + printProcesses(appID, processes, count) return nil } -func printProcesses(appID string, processes []api.Process) { +func printProcesses(appID string, processes []api.Process, count int) { psMap := ps.ByType(processes) - fmt.Printf("=== %s Processes\n", appID) + fmt.Printf("=== %s Processes%s", appID, limitCount(len(processes), count)) for psType, procs := range psMap { fmt.Printf("--- %s:\n", psType) diff --git a/client-go/cmd/releases.go b/client-go/cmd/releases.go index 4350799e45..366164c413 100644 --- a/client-go/cmd/releases.go +++ b/client-go/cmd/releases.go @@ -9,16 +9,20 @@ import ( ) // ReleasesList lists an app's releases. -func ReleasesList(appID string) error { +func ReleasesList(appID string, results int) error { c, appID, err := load(appID) if err != nil { return err } - releases, err := releases.List(c, appID) + if results == defaultLimit { + results = c.ResponseLimit + } + + releases, count, err := releases.List(c, appID, results) - fmt.Printf("=== %s Releases\n", appID) + fmt.Printf("=== %s Releases%s", appID, limitCount(len(releases), count)) w := new(tabwriter.Writer) diff --git a/client-go/cmd/users.go b/client-go/cmd/users.go index 87e97e43ea..8b79ac4657 100644 --- a/client-go/cmd/users.go +++ b/client-go/cmd/users.go @@ -8,20 +8,24 @@ import ( ) // UsersList lists users registered with the controller. -func UsersList() error { +func UsersList(results int) error { c, err := client.New() if err != nil { return err } - users, err := users.List(c) + if results == defaultLimit { + results = c.ResponseLimit + } + + users, count, err := users.List(c, results) if err != nil { return err } - fmt.Println("=== Users") + fmt.Printf("=== Users%s", limitCount(len(users), count)) for _, user := range users { fmt.Println(user.Username) diff --git a/client-go/cmd/utils.go b/client-go/cmd/utils.go index 4e712c1214..8e3fbf4f5b 100644 --- a/client-go/cmd/utils.go +++ b/client-go/cmd/utils.go @@ -10,6 +10,8 @@ import ( "github.com/deis/deis/client-go/pkg/git" ) +var defaultLimit = -1 + func progress() chan bool { frames := []string{"...", "o..", ".o.", "..o"} backspaces := strings.Repeat("\b", 3) @@ -78,3 +80,11 @@ func drinkOfChoice() string { return drink } + +func limitCount(objs, total int) string { + if objs == total { + return "\n" + } + + return fmt.Sprintf(" (%d of %d)\n", objs, total) +} diff --git a/client-go/controller/api/apps.go b/client-go/controller/api/apps.go index d76a7b9e65..600a4696f9 100644 --- a/client-go/controller/api/apps.go +++ b/client-go/controller/api/apps.go @@ -10,14 +10,6 @@ type App struct { UUID string `json:"uuid"` } -// Apps is the definition of GET /v1/apps/. -type Apps struct { - Count int `json:"count"` - Next int `json:"next"` - Previous int `json:"previous"` - Apps []App `json:"results"` -} - // AppCreateRequest is the definition of POST /v1/apps/. type AppCreateRequest struct { ID string `json:"id,omitempty"` diff --git a/client-go/controller/api/builds.go b/client-go/controller/api/builds.go index 19fc94c333..1e239a115b 100644 --- a/client-go/controller/api/builds.go +++ b/client-go/controller/api/builds.go @@ -13,14 +13,6 @@ type Build struct { UUID string `json:"uuid"` } -// Builds is the structure of GET /v1/apps/<app id>/builds/. -type Builds struct { - Count int `json:"count"` - Next int `json:"next"` - Previous int `json:"previous"` - Builds []Build `json:"results"` -} - // CreateBuildRequest is the structure of POST /v1/apps/<app id>/builds/. type CreateBuildRequest struct { Image string `json:"image"` diff --git a/client-go/controller/api/certs.go b/client-go/controller/api/certs.go index 5cea946e86..53dc84711d 100644 --- a/client-go/controller/api/certs.go +++ b/client-go/controller/api/certs.go @@ -12,14 +12,6 @@ type Cert struct { ID int `json:"id,omitempty"` } -// Certs is the definition of GET /v1/certs/. -type Certs struct { - Count int `json:"count"` - Next int `json:"next"` - Previous int `json:"previous"` - Certs []Cert `json:"results"` -} - // CertCreateRequest is the definition of POST /v1/certs/. type CertCreateRequest struct { Certificate string `json:"certificate"` diff --git a/client-go/controller/api/domains.go b/client-go/controller/api/domains.go index bb13fad87a..542e1699f6 100644 --- a/client-go/controller/api/domains.go +++ b/client-go/controller/api/domains.go @@ -9,14 +9,6 @@ type Domain struct { Updated string `json:"updated"` } -// Domains is the structure of GET /v1/app/<app id>/domains/. -type Domains struct { - Count int `json:"count"` - Next int `json:"next"` - Previous int `json:"previous"` - Domains []Domain `json:"results"` -} - // DomainCreateRequest is the structure of POST /v1/app/<app id>/domains/. type DomainCreateRequest struct { Domain string `json:"domain"` diff --git a/client-go/controller/api/keys.go b/client-go/controller/api/keys.go index ff89e03a2e..eb5b11c7d7 100644 --- a/client-go/controller/api/keys.go +++ b/client-go/controller/api/keys.go @@ -10,14 +10,6 @@ type Key struct { UUID string `json:"uuid"` } -// Keys is the definition of GET /v1/keys/. -type Keys struct { - Count int `json:"count"` - Next int `json:"next"` - Previous int `json:"previous"` - Keys []Key `json:"results"` -} - // KeyCreateRequest is the definition of POST /v1/keys/. type KeyCreateRequest struct { ID string `json:"id"` diff --git a/client-go/controller/api/perms.go b/client-go/controller/api/perms.go index 707bf9ff06..04b71753a9 100644 --- a/client-go/controller/api/perms.go +++ b/client-go/controller/api/perms.go @@ -5,16 +5,6 @@ type PermsAppResponse struct { Users []string `json:"users"` } -// PermsAdminResponse is the definition of GET /v1/admin/perms/. -type PermsAdminResponse struct { - Count int `json:"count"` - Next int `json:"next"` - Previous int `json:"previous"` - Users []struct { - Username string `json:"username"` - } `json:"results"` -} - // PermsRequest is the definition of a requst on /perms/. type PermsRequest struct { Username string `json:"username"` diff --git a/client-go/controller/api/ps.go b/client-go/controller/api/ps.go index 1127424a0d..56c7841aad 100644 --- a/client-go/controller/api/ps.go +++ b/client-go/controller/api/ps.go @@ -12,11 +12,3 @@ type Process struct { Num int `json:"num"` State string `json:"state"` } - -// Processes defines the structure of processes. -type Processes struct { - Count int `json:"count"` - Next int `json:"next"` - Previous int `json:"previous"` - Processes []Process `json:"results"` -} diff --git a/client-go/controller/api/releases.go b/client-go/controller/api/releases.go index 0274982467..3917a6c3e3 100644 --- a/client-go/controller/api/releases.go +++ b/client-go/controller/api/releases.go @@ -13,14 +13,6 @@ type Release struct { Version int `json:"version"` } -// Releases is the definition of GET /v1/apps/<app id>/releases/. -type Releases struct { - Count int `json:"count"` - Next int `json:"next"` - Previous int `json:"previous"` - Releases []Release `json:"results"` -} - // ReleaseRollback is the defenition of POST /v1/apps/<app id>/releases/. type ReleaseRollback struct { Version int `json:"version"` diff --git a/client-go/controller/api/users.go b/client-go/controller/api/users.go index 307127c855..e83a63787d 100644 --- a/client-go/controller/api/users.go +++ b/client-go/controller/api/users.go @@ -13,11 +13,3 @@ type User struct { IsActive bool `json:"is_active"` DateJoined string `json:"date_joined"` } - -// Users is the definition of GET /v1/users. -type Users struct { - Count int `json:"count"` - Next int `json:"next"` - Previous int `json:"previous"` - Users []User `json:"results"` -} diff --git a/client-go/controller/client/client.go b/client-go/controller/client/client.go index ebb2683360..d13d63bfe3 100644 --- a/client-go/controller/client/client.go +++ b/client-go/controller/client/client.go @@ -26,13 +26,21 @@ type Client struct { // Username is the name of the user performing requests against the API. Username string + + // ResponseLimit is the number of results to return on requests that can be limited. + ResponseLimit int } +// DefaultResponseLimit is the default number of responses to return on requests that can +// be limited. +var DefaultResponseLimit = 100 + type settingsFile struct { Username string `json:"username"` SslVerify bool `json:"ssl_verify"` Controller string `json:"controller"` Token string `json:"token"` + Limit int `json:"response_limit"` } // New creates a new client from a settings file. @@ -62,15 +70,23 @@ func New() (*Client, error) { return nil, err } + if settings.Limit <= 0 { + settings.Limit = DefaultResponseLimit + } + return &Client{HTTPClient: CreateHTTPClient(settings.SslVerify), SSLVerify: settings.SslVerify, - ControllerURL: *u, Token: settings.Token, Username: settings.Username}, nil + ControllerURL: *u, Token: settings.Token, Username: settings.Username, + ResponseLimit: settings.Limit}, nil } // Save settings to a file func (c Client) Save() error { - settings := settingsFile{Username: c.Username, - SslVerify: c.SSLVerify, - Controller: c.ControllerURL.String(), Token: c.Token} + settings := settingsFile{Username: c.Username, SslVerify: c.SSLVerify, + Controller: c.ControllerURL.String(), Token: c.Token, Limit: c.ResponseLimit} + + if settings.Limit <= 0 { + settings.Limit = DefaultResponseLimit + } settingsContents, err := json.Marshal(settings) diff --git a/client-go/controller/client/client_test.go b/client-go/controller/client/client_test.go index 952082b06e..73259520ed 100644 --- a/client-go/controller/client/client_test.go +++ b/client-go/controller/client/client_test.go @@ -8,7 +8,7 @@ import ( "testing" ) -const sFile string = `{"username":"t","ssl_verify":false,"controller":"http://d.t","token":"a"}` +const sFile string = `{"username":"t","ssl_verify":false,"controller":"http://d.t","token":"a","response_limit": 50}` func createTempProfile(contents string) error { name, err := ioutil.TempDir("", "client") @@ -62,9 +62,15 @@ func TestLoadSave(t *testing.T) { t.Errorf("Expected %s, Got %s", expected, client.ControllerURL.String()) } + expectedI := 50 + if client.ResponseLimit != expectedI { + t.Errorf("Expected %d, Got %d", expectedI, client.ResponseLimit) + } + client.SSLVerify = true client.Token = "b" client.Username = "c" + client.ResponseLimit = 0 u, err := url.Parse("http://deis.test") @@ -99,6 +105,11 @@ func TestLoadSave(t *testing.T) { if client.ControllerURL.String() != expected { t.Errorf("Expected %s, Got %s", expected, client.ControllerURL.String()) } + + expectedI = 100 + if client.ResponseLimit != expectedI { + t.Errorf("Expected %d, Got %d", expectedI, client.ResponseLimit) + } } func TestDeleteSettings(t *testing.T) { diff --git a/client-go/controller/client/http.go b/client-go/controller/client/http.go index 03f0e43ab5..67433666d0 100644 --- a/client-go/controller/client/http.go +++ b/client-go/controller/client/http.go @@ -10,6 +10,7 @@ import ( "net/http" "net/url" "reflect" + "strconv" "strings" "github.com/deis/deis/version" @@ -60,6 +61,28 @@ func (c Client) Request(method string, path string, body []byte) (*http.Response return res, nil } +// LimitedRequest allows limiting the number of responses in a request. +func (c Client) LimitedRequest(path string, results int) (string, int, error) { + body, err := c.BasicRequest("GET", path+"?page_size="+strconv.Itoa(results), nil) + + if err != nil { + return "", -1, err + } + + res := make(map[string]interface{}) + if err = json.Unmarshal([]byte(body), &res); err != nil { + return "", -1, err + } + + out, err := json.Marshal(res["results"].([]interface{})) + + if err != nil { + return "", -1, err + } + + return string(out), int(res["count"].(float64)), nil +} + // BasicRequest makes a simple http request on the controller. func (c Client) BasicRequest(method string, path string, body []byte) (string, error) { res, err := c.Request(method, path, body) diff --git a/client-go/controller/client/http_test.go b/client-go/controller/client/http_test.go index 4144e1ac4e..67381cdd5c 100644 --- a/client-go/controller/client/http_test.go +++ b/client-go/controller/client/http_test.go @@ -13,6 +13,22 @@ import ( type fakeHTTPServer struct{} +const limitedFixture string = ` +{ + "count": 4, + "next": "http://replaced.com/limited2/", + "previous": null, + "results": [ + { + "test": "foo" + }, + { + "test": "bar" + } + ] +} +` + func (fakeHTTPServer) ServeHTTP(res http.ResponseWriter, req *http.Request) { res.Header().Add("DEIS_API_VERSION", version.APIVersion) @@ -31,6 +47,11 @@ func (fakeHTTPServer) ServeHTTP(res http.ResponseWriter, req *http.Request) { return } + if req.URL.Path == "/limited/" && req.Method == "GET" && req.URL.RawQuery == "page_size=2" { + res.Write([]byte(limitedFixture)) + return + } + if req.URL.Path == "/basic/" && req.Method == "POST" { eT := "token abc" if req.Header.Get("Authorization") != eT { @@ -180,3 +201,38 @@ func TestCheckErrorsReturnsNil(t *testing.T) { } } } + +func TestLimitedRequest(t *testing.T) { + t.Parallel() + + handler := fakeHTTPServer{} + server := httptest.NewServer(handler) + defer server.Close() + + u, err := url.Parse(server.URL) + + if err != nil { + t.Fatal(err) + } + + httpClient := CreateHTTPClient(false) + + client := Client{HTTPClient: httpClient, ControllerURL: *u, Token: "abc"} + + expected := `[{"test":"foo"},{"test":"bar"}]` + expectedC := 4 + + actual, count, err := client.LimitedRequest("/limited/", 2) + + if err != nil { + t.Fatal(err) + } + + if count != expectedC { + t.Errorf("Expected %d, Got %d", expectedC, count) + } + + if actual != expected { + t.Errorf("Expected %s, Got %s", expected, actual) + } +} diff --git a/client-go/controller/models/apps/apps.go b/client-go/controller/models/apps/apps.go index 57bd2f915a..bc705a8f84 100644 --- a/client-go/controller/models/apps/apps.go +++ b/client-go/controller/models/apps/apps.go @@ -11,19 +11,19 @@ import ( ) // List lists apps on a Deis controller. -func List(c *client.Client) ([]api.App, error) { - body, err := c.BasicRequest("GET", "/v1/apps/", nil) +func List(c *client.Client, results int) ([]api.App, int, error) { + body, count, err := c.LimitedRequest("/v1/apps/", results) if err != nil { - return []api.App{}, err + return []api.App{}, -1, err } - apps := api.Apps{} + var apps []api.App if err = json.Unmarshal([]byte(body), &apps); err != nil { - return []api.App{}, err + return []api.App{}, -1, err } - return apps.Apps, nil + return apps, count, nil } // New creates a new app. diff --git a/client-go/controller/models/apps/apps_test.go b/client-go/controller/models/apps/apps_test.go index 886ce44ddb..d4d3112877 100644 --- a/client-go/controller/models/apps/apps_test.go +++ b/client-go/controller/models/apps/apps_test.go @@ -291,7 +291,7 @@ func TestAppsList(t *testing.T) { client := client.Client{HTTPClient: httpClient, ControllerURL: *u, Token: "abc"} - actual, err := List(&client) + actual, _, err := List(&client, 100) if err != nil { t.Fatal(err) diff --git a/client-go/controller/models/builds/builds.go b/client-go/controller/models/builds/builds.go index 885251f941..b2f60e27fd 100644 --- a/client-go/controller/models/builds/builds.go +++ b/client-go/controller/models/builds/builds.go @@ -9,20 +9,20 @@ import ( ) // List lists an app's builds. -func List(c *client.Client, appID string) ([]api.Build, error) { +func List(c *client.Client, appID string, results int) ([]api.Build, int, error) { u := fmt.Sprintf("/v1/apps/%s/builds/", appID) - body, err := c.BasicRequest("GET", u, nil) + body, count, err := c.LimitedRequest(u, results) if err != nil { - return []api.Build{}, err + return []api.Build{}, -1, err } - builds := api.Builds{} + var builds []api.Build if err = json.Unmarshal([]byte(body), &builds); err != nil { - return []api.Build{}, err + return []api.Build{}, -1, err } - return builds.Builds, nil + return builds, count, nil } // New creates a build for an app. diff --git a/client-go/controller/models/builds/builds_test.go b/client-go/controller/models/builds/builds_test.go index 49a38eb04a..a50f4ec33d 100644 --- a/client-go/controller/models/builds/builds_test.go +++ b/client-go/controller/models/builds/builds_test.go @@ -122,7 +122,7 @@ func TestBuildsList(t *testing.T) { client := client.Client{HTTPClient: httpClient, ControllerURL: *u, Token: "abc"} - actual, err := List(&client, "example-go") + actual, _, err := List(&client, "example-go", 100) if err != nil { t.Fatal(err) diff --git a/client-go/controller/models/certs/certs.go b/client-go/controller/models/certs/certs.go index dac5f6f1d2..e6ea69e434 100644 --- a/client-go/controller/models/certs/certs.go +++ b/client-go/controller/models/certs/certs.go @@ -9,19 +9,19 @@ import ( ) // List certs registered with the controller. -func List(c *client.Client) ([]api.Cert, error) { - body, err := c.BasicRequest("GET", "/v1/certs/", nil) +func List(c *client.Client, results int) ([]api.Cert, int, error) { + body, count, err := c.LimitedRequest("/v1/certs/", results) if err != nil { - return []api.Cert{}, err + return []api.Cert{}, -1, err } - res := api.Certs{} + var res []api.Cert if err = json.Unmarshal([]byte(body), &res); err != nil { - return []api.Cert{}, err + return []api.Cert{}, -1, err } - return res.Certs, nil + return res, count, nil } // New creates a new cert. diff --git a/client-go/controller/models/certs/certs_test.go b/client-go/controller/models/certs/certs_test.go index 909dde06f7..78e10ad6e8 100644 --- a/client-go/controller/models/certs/certs_test.go +++ b/client-go/controller/models/certs/certs_test.go @@ -104,7 +104,7 @@ func TestCertsList(t *testing.T) { client := client.Client{HTTPClient: httpClient, ControllerURL: *u, Token: "abc"} - actual, err := List(&client) + actual, _, err := List(&client, 100) if err != nil { t.Fatal(err) diff --git a/client-go/controller/models/domains/domains.go b/client-go/controller/models/domains/domains.go index aa98adce3b..793f8fe673 100644 --- a/client-go/controller/models/domains/domains.go +++ b/client-go/controller/models/domains/domains.go @@ -9,20 +9,20 @@ import ( ) // List domains registered with an app. -func List(c *client.Client, appID string) ([]api.Domain, error) { +func List(c *client.Client, appID string, results int) ([]api.Domain, int, error) { u := fmt.Sprintf("/v1/apps/%s/domains/", appID) - body, err := c.BasicRequest("GET", u, nil) + body, count, err := c.LimitedRequest(u, results) if err != nil { - return []api.Domain{}, err + return []api.Domain{}, -1, err } - domains := api.Domains{} + var domains []api.Domain if err = json.Unmarshal([]byte(body), &domains); err != nil { - return []api.Domain{}, err + return []api.Domain{}, -1, err } - return domains.Domains, nil + return domains, count, nil } // New adds a domain to an app. diff --git a/client-go/controller/models/domains/domains_test.go b/client-go/controller/models/domains/domains_test.go index 83724c9d40..7c2a9a1676 100644 --- a/client-go/controller/models/domains/domains_test.go +++ b/client-go/controller/models/domains/domains_test.go @@ -110,7 +110,7 @@ func TestDomainsList(t *testing.T) { client := client.Client{HTTPClient: httpClient, ControllerURL: *u, Token: "abc"} - actual, err := List(&client, "example-go") + actual, _, err := List(&client, "example-go", 100) if err != nil { t.Fatal(err) diff --git a/client-go/controller/models/keys/keys.go b/client-go/controller/models/keys/keys.go index e164cf55d6..c82b6f39ed 100644 --- a/client-go/controller/models/keys/keys.go +++ b/client-go/controller/models/keys/keys.go @@ -9,19 +9,19 @@ import ( ) // List keys on a controller. -func List(c *client.Client) ([]api.Key, error) { - body, err := c.BasicRequest("GET", "/v1/keys/", nil) +func List(c *client.Client, results int) ([]api.Key, int, error) { + body, count, err := c.LimitedRequest("/v1/keys/", results) if err != nil { - return []api.Key{}, err + return []api.Key{}, -1, err } - keys := api.Keys{} + var keys []api.Key if err = json.Unmarshal([]byte(body), &keys); err != nil { - return []api.Key{}, err + return []api.Key{}, -1, err } - return keys.Keys, nil + return keys, count, nil } // New creates a new key. diff --git a/client-go/controller/models/keys/keys_test.go b/client-go/controller/models/keys/keys_test.go index 698f024118..e5f93777a3 100644 --- a/client-go/controller/models/keys/keys_test.go +++ b/client-go/controller/models/keys/keys_test.go @@ -113,7 +113,7 @@ func TestKeysList(t *testing.T) { client := client.Client{HTTPClient: httpClient, ControllerURL: *u, Token: "abc"} - actual, err := List(&client) + actual, _, err := List(&client, 100) if err != nil { t.Fatal(err) diff --git a/client-go/controller/models/perms/perms.go b/client-go/controller/models/perms/perms.go index fe01ce974d..a5d497f2d5 100644 --- a/client-go/controller/models/perms/perms.go +++ b/client-go/controller/models/perms/perms.go @@ -10,13 +10,13 @@ import ( // List users that can access an app. func List(c *client.Client, appID string) ([]string, error) { - body, err := doList(c, fmt.Sprintf("/v1/apps/%s/perms/", appID)) + body, err := c.BasicRequest("GET", fmt.Sprintf("/v1/apps/%s/perms/", appID), nil) if err != nil { return []string{}, err } - users := api.PermsAppResponse{} + var users api.PermsAppResponse if err = json.Unmarshal([]byte(body), &users); err != nil { return []string{}, err } @@ -25,35 +25,25 @@ func List(c *client.Client, appID string) ([]string, error) { } // ListAdmins lists administrators. -func ListAdmins(c *client.Client) ([]string, error) { - body, err := doList(c, "/v1/admin/perms/") +func ListAdmins(c *client.Client, results int) ([]string, int, error) { + body, count, err := c.LimitedRequest("/v1/admin/perms/", results) if err != nil { - return []string{}, err + return []string{}, -1, err } - users := api.PermsAdminResponse{} + var users []api.PermsRequest if err = json.Unmarshal([]byte(body), &users); err != nil { - return []string{}, err + return []string{}, -1, err } usersList := []string{} - for _, user := range users.Users { + for _, user := range users { usersList = append(usersList, user.Username) } - return usersList, nil -} - -func doList(c *client.Client, u string) (string, error) { - body, err := c.BasicRequest("GET", u, nil) - - if err != nil { - return "", err - } - - return body, nil + return usersList, count, nil } // New adds a user to an app. diff --git a/client-go/controller/models/perms/perms_test.go b/client-go/controller/models/perms/perms_test.go index f02d3f4c75..a96cb8bc37 100644 --- a/client-go/controller/models/perms/perms_test.go +++ b/client-go/controller/models/perms/perms_test.go @@ -170,7 +170,7 @@ func TestListAdmins(t *testing.T) { client := client.Client{HTTPClient: httpClient, ControllerURL: *u, Token: "abc"} - actual, err := ListAdmins(&client) + actual, _, err := ListAdmins(&client, 100) if err != nil { t.Fatal(err) diff --git a/client-go/controller/models/ps/ps.go b/client-go/controller/models/ps/ps.go index d5e732df18..4ba8a9f8d3 100644 --- a/client-go/controller/models/ps/ps.go +++ b/client-go/controller/models/ps/ps.go @@ -10,20 +10,20 @@ import ( ) // List an app's processes. -func List(c *client.Client, appID string) ([]api.Process, error) { +func List(c *client.Client, appID string, results int) ([]api.Process, int, error) { u := fmt.Sprintf("/v1/apps/%s/containers/", appID) - body, err := c.BasicRequest("GET", u, nil) + body, count, err := c.LimitedRequest(u, results) if err != nil { - return []api.Process{}, err + return []api.Process{}, -1, err } - procs := api.Processes{} + var procs []api.Process if err = json.Unmarshal([]byte(body), &procs); err != nil { - return []api.Process{}, err + return []api.Process{}, -1, err } - return procs.Processes, nil + return procs, count, nil } // Scale an app's processes. diff --git a/client-go/controller/models/ps/ps_test.go b/client-go/controller/models/ps/ps_test.go index 3b376f326d..9a91f928ad 100644 --- a/client-go/controller/models/ps/ps_test.go +++ b/client-go/controller/models/ps/ps_test.go @@ -163,7 +163,7 @@ func TestProcessesList(t *testing.T) { client := client.Client{HTTPClient: httpClient, ControllerURL: *u, Token: "abc"} - actual, err := List(&client, "example-go") + actual, _, err := List(&client, "example-go", 100) if err != nil { t.Fatal(err) diff --git a/client-go/controller/models/releases/releases.go b/client-go/controller/models/releases/releases.go index 968128ecaa..2d7ca6f105 100644 --- a/client-go/controller/models/releases/releases.go +++ b/client-go/controller/models/releases/releases.go @@ -9,21 +9,21 @@ import ( ) // List lists an app's releases. -func List(c *client.Client, appID string) ([]api.Release, error) { +func List(c *client.Client, appID string, results int) ([]api.Release, int, error) { u := fmt.Sprintf("/v1/apps/%s/releases/", appID) - body, err := c.BasicRequest("GET", u, nil) + body, count, err := c.LimitedRequest(u, results) if err != nil { - return []api.Release{}, err + return []api.Release{}, -1, err } - releases := api.Releases{} + var releases []api.Release if err = json.Unmarshal([]byte(body), &releases); err != nil { - return []api.Release{}, err + return []api.Release{}, -1, err } - return releases.Releases, nil + return releases, count, nil } // Get a release of an app. diff --git a/client-go/controller/models/releases/releases_test.go b/client-go/controller/models/releases/releases_test.go index 3c8eb5fbfb..1f6030ebf8 100644 --- a/client-go/controller/models/releases/releases_test.go +++ b/client-go/controller/models/releases/releases_test.go @@ -151,7 +151,7 @@ func TestReleasesList(t *testing.T) { client := client.Client{HTTPClient: httpClient, ControllerURL: *u, Token: "abc"} - actual, err := List(&client, "example-go") + actual, _, err := List(&client, "example-go", 100) if err != nil { t.Fatal(err) diff --git a/client-go/controller/models/users/users.go b/client-go/controller/models/users/users.go index 68edd9d4dd..8ca60e5508 100644 --- a/client-go/controller/models/users/users.go +++ b/client-go/controller/models/users/users.go @@ -8,17 +8,17 @@ import ( ) // List users registered with the controller. -func List(c *client.Client) ([]api.User, error) { - body, err := c.BasicRequest("GET", "/v1/users/", nil) +func List(c *client.Client, results int) ([]api.User, int, error) { + body, count, err := c.LimitedRequest("/v1/users/", results) if err != nil { - return []api.User{}, err + return []api.User{}, -1, err } - users := api.Users{} + var users []api.User if err = json.Unmarshal([]byte(body), &users); err != nil { - return []api.User{}, err + return []api.User{}, -1, err } - return users.Users, nil + return users, count, nil } diff --git a/client-go/controller/models/users/users_test.go b/client-go/controller/models/users/users_test.go index 7d34d5962f..ecca96465a 100644 --- a/client-go/controller/models/users/users_test.go +++ b/client-go/controller/models/users/users_test.go @@ -83,7 +83,7 @@ func TestUsersList(t *testing.T) { client := client.Client{HTTPClient: httpClient, ControllerURL: *u, Token: "abc"} - actual, err := List(&client) + actual, _, err := List(&client, 100) if err != nil { t.Fatal(err) diff --git a/client-go/parser/apps.go b/client-go/parser/apps.go index aed575cd99..f678e170cd 100644 --- a/client-go/parser/apps.go +++ b/client-go/parser/apps.go @@ -91,13 +91,25 @@ func appsList(argv []string) error { usage := ` Lists applications visible to the current user. -Usage: deis apps:list +Usage: deis apps:list [options] + +Options: + -l --limit=<num> + the maximum number of results to display, defaults to config setting ` - if _, err := docopt.Parse(usage, argv, true, "", false, true); err != nil { + args, err := docopt.Parse(usage, argv, true, "", false, true) + + if err != nil { + return err + } + + results, err := responseLimit(safeGetValue(args, "--limit")) + + if err != nil { return err } - return cmd.AppsList() + return cmd.AppsList(results) } func appInfo(argv []string) error { diff --git a/client-go/parser/builds.go b/client-go/parser/builds.go index d1737069f3..3516a7286a 100644 --- a/client-go/parser/builds.go +++ b/client-go/parser/builds.go @@ -44,6 +44,8 @@ Usage: deis builds:list [options] Options: -a --app=<app> the uniquely identifiable name for the application. + -l --limit=<num> + the maximum number of results to display, defaults to config setting ` args, err := docopt.Parse(usage, argv, true, "", false, true) @@ -52,7 +54,13 @@ Options: return err } - return cmd.BuildsList(safeGetValue(args, "--app")) + results, err := responseLimit(safeGetValue(args, "--limit")) + + if err != nil { + return err + } + + return cmd.BuildsList(safeGetValue(args, "--app"), results) } func buildsCreate(argv []string) error { diff --git a/client-go/parser/certs.go b/client-go/parser/certs.go index 245ecdbdb5..f33f5d3dee 100644 --- a/client-go/parser/certs.go +++ b/client-go/parser/certs.go @@ -42,14 +42,26 @@ func certsList(argv []string) error { usage := ` Show certificate information for an SSL application. -Usage: deis certs:list +Usage: deis certs:list [options] + +Options: + -l --limit=<num> + the maximum number of results to display, defaults to config setting ` - if _, err := docopt.Parse(usage, argv, true, "", false, true); err != nil { + args, err := docopt.Parse(usage, argv, true, "", false, true) + + if err != nil { + return err + } + + results, err := responseLimit(safeGetValue(args, "--limit")) + + if err != nil { return err } - return cmd.CertsList() + return cmd.CertsList(results) } func certAdd(argv []string) error { diff --git a/client-go/parser/domains.go b/client-go/parser/domains.go index 3522989d9f..ace96edcf5 100644 --- a/client-go/parser/domains.go +++ b/client-go/parser/domains.go @@ -69,8 +69,10 @@ Lists domains bound to an application. Usage: deis domains:list [options] Options: - -a --app=<app> - the uniquely identifiable name for the application. + -a --app=<app> + the uniquely identifiable name for the application. + -l --limit=<num> + the maximum number of results to display, defaults to config setting ` args, err := docopt.Parse(usage, argv, true, "", false, true) @@ -79,7 +81,13 @@ Options: return err } - return cmd.DomainsList(safeGetValue(args, "--app")) + results, err := responseLimit(safeGetValue(args, "--limit")) + + if err != nil { + return err + } + + return cmd.DomainsList(safeGetValue(args, "--app"), results) } func domainsRemove(argv []string) error { diff --git a/client-go/parser/keys.go b/client-go/parser/keys.go index 0b694947a8..0a73adad94 100644 --- a/client-go/parser/keys.go +++ b/client-go/parser/keys.go @@ -42,14 +42,26 @@ func keysList(argv []string) error { usage := ` Lists SSH keys for the logged in user. -Usage: deis keys:list +Usage: deis keys:list [options] + +Options: + -l --limit=<num> + the maximum number of results to display, defaults to config setting ` - if _, err := docopt.Parse(usage, argv, true, "", false, true); err != nil { + args, err := docopt.Parse(usage, argv, true, "", false, true) + + if err != nil { + return err + } + + results, err := responseLimit(safeGetValue(args, "--limit")) + + if err != nil { return err } - return cmd.KeysList() + return cmd.KeysList(results) } func keyAdd(argv []string) error { diff --git a/client-go/parser/perms.go b/client-go/parser/perms.go index b665648411..dc41fa3d66 100644 --- a/client-go/parser/perms.go +++ b/client-go/parser/perms.go @@ -43,7 +43,7 @@ func permsList(argv []string) error { Lists all users with permission to use an app, or lists all users with system administrator privileges. -Usage: deis perms:list [-a --app=<app>|--admin] +Usage: deis perms:list [-a --app=<app>|--admin|--admin --limit=<num>] Options: -a --app=<app> @@ -51,6 +51,8 @@ Options: for the application. --admin lists all users with system administrator privileges. + -l --limit=<num> + the maximum number of results to display, defaults to config setting ` args, err := docopt.Parse(usage, argv, true, "", false, true) @@ -61,7 +63,13 @@ Options: admin := args["--admin"].(bool) - return cmd.PermsList(safeGetValue(args, "--app"), admin) + results, err := responseLimit(safeGetValue(args, "--limit")) + + if err != nil { + return err + } + + return cmd.PermsList(safeGetValue(args, "--app"), admin, results) } func permCreate(argv []string) error { diff --git a/client-go/parser/ps.go b/client-go/parser/ps.go index 34d894955c..3cc5c8fca1 100644 --- a/client-go/parser/ps.go +++ b/client-go/parser/ps.go @@ -47,6 +47,8 @@ Usage: deis ps:list [options] Options: -a --app=<app> the uniquely identifiable name for the application. + -l --limit=<num> + the maximum number of results to display, defaults to config setting ` args, err := docopt.Parse(usage, argv, true, "", false, true) @@ -55,7 +57,13 @@ Options: return err } - return cmd.PsList(safeGetValue(args, "--app")) + results, err := responseLimit(safeGetValue(args, "--limit")) + + if err != nil { + return err + } + + return cmd.PsList(safeGetValue(args, "--app"), results) } func psRestart(argv []string) error { diff --git a/client-go/parser/releases.go b/client-go/parser/releases.go index 6d3a96cb17..dbe319e3b6 100644 --- a/client-go/parser/releases.go +++ b/client-go/parser/releases.go @@ -48,6 +48,8 @@ Usage: deis releases:list [options] Options: -a --app=<app> the uniquely identifiable name for the application. + -l --limit=<num> + the maximum number of results to display, defaults to config setting ` args, err := docopt.Parse(usage, argv, true, "", false, true) @@ -56,7 +58,13 @@ Options: return err } - return cmd.ReleasesList(safeGetValue(args, "--app")) + results, err := responseLimit(safeGetValue(args, "--limit")) + + if err != nil { + return err + } + + return cmd.ReleasesList(safeGetValue(args, "--app"), results) } func releasesInfo(argv []string) error { diff --git a/client-go/parser/users.go b/client-go/parser/users.go index a6bc489a67..30c98d3253 100644 --- a/client-go/parser/users.go +++ b/client-go/parser/users.go @@ -37,12 +37,24 @@ func usersList(argv []string) error { Lists all registered users. Requires admin privilages. -Usage: deis users:list +Usage: deis users:list [options] + +Options: + -l --limit=<num> + the maximum number of results to display, defaults to config setting ` - if _, err := docopt.Parse(usage, argv, true, "", false, true); err != nil { + args, err := docopt.Parse(usage, argv, true, "", false, true) + + if err != nil { + return err + } + + results, err := responseLimit(safeGetValue(args, "--limit")) + + if err != nil { return err } - return cmd.UsersList() + return cmd.UsersList(results) } diff --git a/client-go/parser/utils.go b/client-go/parser/utils.go index 3ed100067b..7bcb2707b8 100644 --- a/client-go/parser/utils.go +++ b/client-go/parser/utils.go @@ -2,6 +2,7 @@ package parser import ( "fmt" + "strconv" ) // docopt expects commands to be in the proper format, but we split them apart for @@ -21,6 +22,14 @@ func safeGetValue(args map[string]interface{}, key string) string { return args[key].(string) } +func responseLimit(limit string) (int, error) { + if limit == "" { + return -1, nil + } + + return strconv.Atoi(limit) +} + // PrintUsage runs if no matching command is found. func PrintUsage() { fmt.Println("Found no matching command, try 'deis help'") diff --git a/controller/deis/settings.py b/controller/deis/settings.py index 3bfbf45d74..abbec4c92e 100644 --- a/controller/deis/settings.py +++ b/controller/deis/settings.py @@ -199,6 +199,7 @@ 'rest_framework.renderers.JSONRenderer', ), 'PAGINATE_BY': 100, + 'PAGINATE_BY_PARAM': 'page_size', 'TEST_REQUEST_DEFAULT_FORMAT': 'json', } diff --git a/docs/reference/api-v1.6.rst b/docs/reference/api-v1.6.rst index 2037f897eb..00dd0920a5 100644 --- a/docs/reference/api-v1.6.rst +++ b/docs/reference/api-v1.6.rst @@ -16,6 +16,8 @@ What's New **New!** administrators no longer have to supply a password when deleting another user. +**New!** ``?page_size`` query parameter for paginated requests to set the number of results per page. + Authentication --------------
paperless-ngx__paperless-ngx-4602
[BUG] Unable to delete notes in 2.00 beta rc1 ### Description Error delete notes in 2.00 beta rc1 ### Steps to reproduce Existing or newly created notes cannot be deleted Newly created note overwrites existing old note ### Webserver logs ```bash {"headers":{"normalizedNames":{},"lazyUpdate":null},"status":404,"statusText":"Not Found","url":"http://192.168.0.110:8777/api/documents/1812/notes/?id=421","ok":false,"name":"HttpErrorResponse","message":"Http failure response for http://192.168.0.110:8777/api/documents/1812/notes/?id=421: 404 Not Found","error":{"detail":"Nicht gefunden."}} ``` ### Browser logs _No response_ ### Paperless-ngx version 2.00 beta rc1 ### Host OS Synology ### Installation method Docker - official image ### Browser Firefox ### Configuration changes _No response_ ### Other _No response_
[ { "content": "import itertools\nimport json\nimport logging\nimport os\nimport re\nimport tempfile\nimport urllib\nimport zipfile\nfrom datetime import datetime\nfrom pathlib import Path\nfrom time import mktime\nfrom unicodedata import normalize\nfrom urllib.parse import quote\n\nimport pathvalidate\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.db.models import Case\nfrom django.db.models import Count\nfrom django.db.models import IntegerField\nfrom django.db.models import Max\nfrom django.db.models import Sum\nfrom django.db.models import When\nfrom django.db.models.functions import Length\nfrom django.db.models.functions import Lower\nfrom django.http import Http404\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseBadRequest\nfrom django.http import HttpResponseForbidden\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import get_language\nfrom django.views import View\nfrom django.views.decorators.cache import cache_control\nfrom django.views.generic import TemplateView\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom langdetect import detect\nfrom packaging import version as packaging_version\nfrom rest_framework import parsers\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.filters import OrderingFilter\nfrom rest_framework.filters import SearchFilter\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.mixins import CreateModelMixin\nfrom rest_framework.mixins import DestroyModelMixin\nfrom rest_framework.mixins import ListModelMixin\nfrom rest_framework.mixins import RetrieveModelMixin\nfrom rest_framework.mixins import UpdateModelMixin\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.viewsets import GenericViewSet\nfrom rest_framework.viewsets import ModelViewSet\nfrom rest_framework.viewsets import ReadOnlyModelViewSet\nfrom rest_framework.viewsets import ViewSet\n\nfrom documents import bulk_edit\nfrom documents.bulk_download import ArchiveOnlyStrategy\nfrom documents.bulk_download import OriginalAndArchiveStrategy\nfrom documents.bulk_download import OriginalsOnlyStrategy\nfrom documents.classifier import load_classifier\nfrom documents.data_models import ConsumableDocument\nfrom documents.data_models import DocumentMetadataOverrides\nfrom documents.data_models import DocumentSource\nfrom documents.filters import CorrespondentFilterSet\nfrom documents.filters import DocumentFilterSet\nfrom documents.filters import DocumentTypeFilterSet\nfrom documents.filters import ObjectOwnedOrGrantedPermissionsFilter\nfrom documents.filters import ShareLinkFilterSet\nfrom documents.filters import StoragePathFilterSet\nfrom documents.filters import TagFilterSet\nfrom documents.matching import match_correspondents\nfrom documents.matching import match_document_types\nfrom documents.matching import match_storage_paths\nfrom documents.matching import match_tags\nfrom documents.models import ConsumptionTemplate\nfrom documents.models import Correspondent\nfrom documents.models import CustomField\nfrom documents.models import Document\nfrom documents.models import DocumentType\nfrom documents.models import Note\nfrom documents.models import PaperlessTask\nfrom documents.models import SavedView\nfrom documents.models import ShareLink\nfrom documents.models import StoragePath\nfrom documents.models import Tag\nfrom documents.parsers import get_parser_class_for_mime_type\nfrom documents.parsers import parse_date_generator\nfrom documents.permissions import PaperlessAdminPermissions\nfrom documents.permissions import PaperlessObjectPermissions\nfrom documents.permissions import get_objects_for_user_owner_aware\nfrom documents.permissions import has_perms_owner_aware\nfrom documents.permissions import set_permissions_for_object\nfrom documents.serialisers import AcknowledgeTasksViewSerializer\nfrom documents.serialisers import BulkDownloadSerializer\nfrom documents.serialisers import BulkEditObjectPermissionsSerializer\nfrom documents.serialisers import BulkEditSerializer\nfrom documents.serialisers import ConsumptionTemplateSerializer\nfrom documents.serialisers import CorrespondentSerializer\nfrom documents.serialisers import CustomFieldSerializer\nfrom documents.serialisers import DocumentListSerializer\nfrom documents.serialisers import DocumentSerializer\nfrom documents.serialisers import DocumentTypeSerializer\nfrom documents.serialisers import PostDocumentSerializer\nfrom documents.serialisers import SavedViewSerializer\nfrom documents.serialisers import ShareLinkSerializer\nfrom documents.serialisers import StoragePathSerializer\nfrom documents.serialisers import TagSerializer\nfrom documents.serialisers import TagSerializerVersion1\nfrom documents.serialisers import TasksViewSerializer\nfrom documents.serialisers import UiSettingsViewSerializer\nfrom documents.tasks import consume_file\nfrom paperless import version\nfrom paperless.db import GnuPG\nfrom paperless.views import StandardPagination\n\nif settings.AUDIT_LOG_ENABLED:\n from auditlog.models import LogEntry\n\nlogger = logging.getLogger(\"paperless.api\")\n\n\nclass IndexView(TemplateView):\n template_name = \"index.html\"\n\n def get_frontend_language(self):\n if hasattr(\n self.request.user,\n \"ui_settings\",\n ) and self.request.user.ui_settings.settings.get(\"language\"):\n lang = self.request.user.ui_settings.settings.get(\"language\")\n else:\n lang = get_language()\n # This is here for the following reason:\n # Django identifies languages in the form \"en-us\"\n # However, angular generates locales as \"en-US\".\n # this translates between these two forms.\n if \"-\" in lang:\n first = lang[: lang.index(\"-\")]\n second = lang[lang.index(\"-\") + 1 :]\n return f\"{first}-{second.upper()}\"\n else:\n return lang\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"cookie_prefix\"] = settings.COOKIE_PREFIX\n context[\"username\"] = self.request.user.username\n context[\"full_name\"] = self.request.user.get_full_name()\n context[\"styles_css\"] = f\"frontend/{self.get_frontend_language()}/styles.css\"\n context[\"runtime_js\"] = f\"frontend/{self.get_frontend_language()}/runtime.js\"\n context[\n \"polyfills_js\"\n ] = f\"frontend/{self.get_frontend_language()}/polyfills.js\"\n context[\"main_js\"] = f\"frontend/{self.get_frontend_language()}/main.js\"\n context[\n \"webmanifest\"\n ] = f\"frontend/{self.get_frontend_language()}/manifest.webmanifest\"\n context[\n \"apple_touch_icon\"\n ] = f\"frontend/{self.get_frontend_language()}/apple-touch-icon.png\"\n return context\n\n\nclass PassUserMixin(CreateModelMixin):\n \"\"\"\n Pass a user object to serializer\n \"\"\"\n\n def get_serializer(self, *args, **kwargs):\n kwargs.setdefault(\"user\", self.request.user)\n kwargs.setdefault(\n \"full_perms\",\n self.request.query_params.get(\"full_perms\", False),\n )\n return super().get_serializer(*args, **kwargs)\n\n\nclass CorrespondentViewSet(ModelViewSet, PassUserMixin):\n model = Correspondent\n\n queryset = Correspondent.objects.annotate(\n document_count=Count(\"documents\"),\n last_correspondence=Max(\"documents__created\"),\n ).order_by(Lower(\"name\"))\n\n serializer_class = CorrespondentSerializer\n pagination_class = StandardPagination\n permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n filter_backends = (\n DjangoFilterBackend,\n OrderingFilter,\n ObjectOwnedOrGrantedPermissionsFilter,\n )\n filterset_class = CorrespondentFilterSet\n ordering_fields = (\n \"name\",\n \"matching_algorithm\",\n \"match\",\n \"document_count\",\n \"last_correspondence\",\n )\n\n\nclass TagViewSet(ModelViewSet, PassUserMixin):\n model = Tag\n\n queryset = Tag.objects.annotate(document_count=Count(\"documents\")).order_by(\n Lower(\"name\"),\n )\n\n def get_serializer_class(self, *args, **kwargs):\n if int(self.request.version) == 1:\n return TagSerializerVersion1\n else:\n return TagSerializer\n\n pagination_class = StandardPagination\n permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n filter_backends = (\n DjangoFilterBackend,\n OrderingFilter,\n ObjectOwnedOrGrantedPermissionsFilter,\n )\n filterset_class = TagFilterSet\n ordering_fields = (\"color\", \"name\", \"matching_algorithm\", \"match\", \"document_count\")\n\n\nclass DocumentTypeViewSet(ModelViewSet, PassUserMixin):\n model = DocumentType\n\n queryset = DocumentType.objects.annotate(\n document_count=Count(\"documents\"),\n ).order_by(Lower(\"name\"))\n\n serializer_class = DocumentTypeSerializer\n pagination_class = StandardPagination\n permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n filter_backends = (\n DjangoFilterBackend,\n OrderingFilter,\n ObjectOwnedOrGrantedPermissionsFilter,\n )\n filterset_class = DocumentTypeFilterSet\n ordering_fields = (\"name\", \"matching_algorithm\", \"match\", \"document_count\")\n\n\nclass DocumentViewSet(\n PassUserMixin,\n RetrieveModelMixin,\n UpdateModelMixin,\n DestroyModelMixin,\n ListModelMixin,\n GenericViewSet,\n):\n model = Document\n queryset = Document.objects.annotate(num_notes=Count(\"notes\"))\n serializer_class = DocumentSerializer\n pagination_class = StandardPagination\n permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n filter_backends = (\n DjangoFilterBackend,\n SearchFilter,\n OrderingFilter,\n ObjectOwnedOrGrantedPermissionsFilter,\n )\n filterset_class = DocumentFilterSet\n search_fields = (\"title\", \"correspondent__name\", \"content\")\n ordering_fields = (\n \"id\",\n \"title\",\n \"correspondent__name\",\n \"document_type__name\",\n \"created\",\n \"modified\",\n \"added\",\n \"archive_serial_number\",\n \"num_notes\",\n \"owner\",\n )\n\n def get_queryset(self):\n return Document.objects.distinct().annotate(num_notes=Count(\"notes\"))\n\n def get_serializer(self, *args, **kwargs):\n fields_param = self.request.query_params.get(\"fields\", None)\n fields = fields_param.split(\",\") if fields_param else None\n truncate_content = self.request.query_params.get(\"truncate_content\", \"False\")\n kwargs.setdefault(\"context\", self.get_serializer_context())\n kwargs.setdefault(\"fields\", fields)\n kwargs.setdefault(\"truncate_content\", truncate_content.lower() in [\"true\", \"1\"])\n kwargs.setdefault(\n \"full_perms\",\n self.request.query_params.get(\"full_perms\", False),\n )\n return super().get_serializer(*args, **kwargs)\n\n def update(self, request, *args, **kwargs):\n response = super().update(request, *args, **kwargs)\n from documents import index\n\n index.add_or_update_document(self.get_object())\n return response\n\n def destroy(self, request, *args, **kwargs):\n from documents import index\n\n index.remove_document_from_index(self.get_object())\n return super().destroy(request, *args, **kwargs)\n\n @staticmethod\n def original_requested(request):\n return (\n \"original\" in request.query_params\n and request.query_params[\"original\"] == \"true\"\n )\n\n def file_response(self, pk, request, disposition):\n doc = Document.objects.get(id=pk)\n if request.user is not None and not has_perms_owner_aware(\n request.user,\n \"view_document\",\n doc,\n ):\n return HttpResponseForbidden(\"Insufficient permissions\")\n return serve_file(\n doc=doc,\n use_archive=not self.original_requested(request)\n and doc.has_archive_version,\n disposition=disposition,\n )\n\n def get_metadata(self, file, mime_type):\n if not os.path.isfile(file):\n return None\n\n parser_class = get_parser_class_for_mime_type(mime_type)\n if parser_class:\n parser = parser_class(progress_callback=None, logging_group=None)\n\n try:\n return parser.extract_metadata(file, mime_type)\n except Exception:\n # TODO: cover GPG errors, remove later.\n return []\n else:\n return []\n\n def get_filesize(self, filename):\n if os.path.isfile(filename):\n return os.stat(filename).st_size\n else:\n return None\n\n @action(methods=[\"get\"], detail=True)\n def metadata(self, request, pk=None):\n try:\n doc = Document.objects.get(pk=pk)\n if request.user is not None and not has_perms_owner_aware(\n request.user,\n \"view_document\",\n doc,\n ):\n return HttpResponseForbidden(\"Insufficient permissions\")\n except Document.DoesNotExist:\n raise Http404\n\n meta = {\n \"original_checksum\": doc.checksum,\n \"original_size\": self.get_filesize(doc.source_path),\n \"original_mime_type\": doc.mime_type,\n \"media_filename\": doc.filename,\n \"has_archive_version\": doc.has_archive_version,\n \"original_metadata\": self.get_metadata(doc.source_path, doc.mime_type),\n \"archive_checksum\": doc.archive_checksum,\n \"archive_media_filename\": doc.archive_filename,\n \"original_filename\": doc.original_filename,\n }\n\n lang = \"en\"\n try:\n lang = detect(doc.content)\n except Exception:\n pass\n meta[\"lang\"] = lang\n\n if doc.has_archive_version:\n meta[\"archive_size\"] = self.get_filesize(doc.archive_path)\n meta[\"archive_metadata\"] = self.get_metadata(\n doc.archive_path,\n \"application/pdf\",\n )\n else:\n meta[\"archive_size\"] = None\n meta[\"archive_metadata\"] = None\n\n return Response(meta)\n\n @action(methods=[\"get\"], detail=True)\n def suggestions(self, request, pk=None):\n doc = get_object_or_404(Document, pk=pk)\n if request.user is not None and not has_perms_owner_aware(\n request.user,\n \"view_document\",\n doc,\n ):\n return HttpResponseForbidden(\"Insufficient permissions\")\n\n classifier = load_classifier()\n\n dates = []\n if settings.NUMBER_OF_SUGGESTED_DATES > 0:\n gen = parse_date_generator(doc.filename, doc.content)\n dates = sorted(\n {i for i in itertools.islice(gen, settings.NUMBER_OF_SUGGESTED_DATES)},\n )\n\n return Response(\n {\n \"correspondents\": [\n c.id for c in match_correspondents(doc, classifier, request.user)\n ],\n \"tags\": [t.id for t in match_tags(doc, classifier, request.user)],\n \"document_types\": [\n dt.id for dt in match_document_types(doc, classifier, request.user)\n ],\n \"storage_paths\": [\n dt.id for dt in match_storage_paths(doc, classifier, request.user)\n ],\n \"dates\": [\n date.strftime(\"%Y-%m-%d\") for date in dates if date is not None\n ],\n },\n )\n\n @action(methods=[\"get\"], detail=True)\n def preview(self, request, pk=None):\n try:\n response = self.file_response(pk, request, \"inline\")\n return response\n except (FileNotFoundError, Document.DoesNotExist):\n raise Http404\n\n @action(methods=[\"get\"], detail=True)\n @method_decorator(cache_control(public=False, max_age=315360000))\n def thumb(self, request, pk=None):\n try:\n doc = Document.objects.get(id=pk)\n if request.user is not None and not has_perms_owner_aware(\n request.user,\n \"view_document\",\n doc,\n ):\n return HttpResponseForbidden(\"Insufficient permissions\")\n if doc.storage_type == Document.STORAGE_TYPE_GPG:\n handle = GnuPG.decrypted(doc.thumbnail_file)\n else:\n handle = doc.thumbnail_file\n # TODO: Send ETag information and use that to send new thumbnails\n # if available\n\n return HttpResponse(handle, content_type=\"image/webp\")\n except (FileNotFoundError, Document.DoesNotExist):\n raise Http404\n\n @action(methods=[\"get\"], detail=True)\n def download(self, request, pk=None):\n try:\n return self.file_response(pk, request, \"attachment\")\n except (FileNotFoundError, Document.DoesNotExist):\n raise Http404\n\n def getNotes(self, doc):\n return [\n {\n \"id\": c.id,\n \"note\": c.note,\n \"created\": c.created,\n \"user\": {\n \"id\": c.user.id,\n \"username\": c.user.username,\n \"first_name\": c.user.first_name,\n \"last_name\": c.user.last_name,\n },\n }\n for c in Note.objects.filter(document=doc).order_by(\"-created\")\n ]\n\n @action(methods=[\"get\", \"post\", \"delete\"], detail=True)\n def notes(self, request, pk=None):\n currentUser = request.user\n try:\n doc = Document.objects.get(pk=pk)\n if currentUser is not None and not has_perms_owner_aware(\n currentUser,\n \"view_document\",\n doc,\n ):\n return HttpResponseForbidden(\"Insufficient permissions to view notes\")\n except Document.DoesNotExist:\n raise Http404\n\n if request.method == \"GET\":\n try:\n return Response(self.getNotes(doc))\n except Exception as e:\n logger.warning(f\"An error occurred retrieving notes: {e!s}\")\n return Response(\n {\"error\": \"Error retrieving notes, check logs for more detail.\"},\n )\n elif request.method == \"POST\":\n try:\n if currentUser is not None and not has_perms_owner_aware(\n currentUser,\n \"change_document\",\n doc,\n ):\n return HttpResponseForbidden(\n \"Insufficient permissions to create notes\",\n )\n\n c = Note.objects.create(\n document=doc,\n note=request.data[\"note\"],\n user=currentUser,\n )\n c.save()\n # If audit log is enabled make an entry in the log\n # about this note change\n if settings.AUDIT_LOG_ENABLED:\n LogEntry.objects.log_create(\n instance=doc,\n changes=json.dumps(\n {\n \"Note Added\": [\"None\", c.id],\n },\n ),\n action=LogEntry.Action.UPDATE,\n )\n\n doc.modified = timezone.now()\n doc.save()\n\n from documents import index\n\n index.add_or_update_document(self.get_object())\n\n return Response(self.getNotes(doc))\n except Exception as e:\n logger.warning(f\"An error occurred saving note: {e!s}\")\n return Response(\n {\n \"error\": \"Error saving note, check logs for more detail.\",\n },\n )\n elif request.method == \"DELETE\":\n if currentUser is not None and not has_perms_owner_aware(\n currentUser,\n \"change_document\",\n doc,\n ):\n return HttpResponseForbidden(\"Insufficient permissions to delete notes\")\n\n note = Note.objects.get(id=int(request.GET.get(\"id\")))\n if settings.AUDIT_LOG_ENABLED:\n LogEntry.objects.log_create(\n instance=doc,\n changes=json.dumps(\n {\n \"Note Deleted\": [note.id, \"None\"],\n },\n ),\n action=LogEntry.Action.UPDATE,\n )\n\n note.delete()\n\n doc.modified = timezone.now()\n doc.save()\n\n from documents import index\n\n index.add_or_update_document(self.get_object())\n\n return Response(self.getNotes(doc))\n\n return Response(\n {\n \"error\": \"error\",\n },\n )\n\n @action(methods=[\"get\"], detail=True)\n def share_links(self, request, pk=None):\n currentUser = request.user\n try:\n doc = Document.objects.get(pk=pk)\n if currentUser is not None and not has_perms_owner_aware(\n currentUser,\n \"change_document\",\n doc,\n ):\n return HttpResponseForbidden(\n \"Insufficient permissions to add share link\",\n )\n except Document.DoesNotExist:\n raise Http404\n\n if request.method == \"GET\":\n now = timezone.now()\n links = [\n {\n \"id\": c.id,\n \"created\": c.created,\n \"expiration\": c.expiration,\n \"slug\": c.slug,\n }\n for c in ShareLink.objects.filter(document=doc)\n .exclude(expiration__lt=now)\n .order_by(\"-created\")\n ]\n return Response(links)\n\n\nclass SearchResultSerializer(DocumentSerializer, PassUserMixin):\n def to_representation(self, instance):\n doc = Document.objects.get(id=instance[\"id\"])\n notes = \",\".join(\n [str(c.note) for c in Note.objects.filter(document=instance[\"id\"])],\n )\n r = super().to_representation(doc)\n r[\"__search_hit__\"] = {\n \"score\": instance.score,\n \"highlights\": instance.highlights(\"content\", text=doc.content),\n \"note_highlights\": instance.highlights(\"notes\", text=notes)\n if doc\n else None,\n \"rank\": instance.rank,\n }\n\n return r\n\n\nclass UnifiedSearchViewSet(DocumentViewSet):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.searcher = None\n\n def get_serializer_class(self):\n if self._is_search_request():\n return SearchResultSerializer\n else:\n return DocumentSerializer\n\n def _is_search_request(self):\n return (\n \"query\" in self.request.query_params\n or \"more_like_id\" in self.request.query_params\n )\n\n def filter_queryset(self, queryset):\n if self._is_search_request():\n from documents import index\n\n if \"query\" in self.request.query_params:\n query_class = index.DelayedFullTextQuery\n elif \"more_like_id\" in self.request.query_params:\n query_class = index.DelayedMoreLikeThisQuery\n else:\n raise ValueError\n\n return query_class(\n self.searcher,\n self.request.query_params,\n self.paginator.get_page_size(self.request),\n self.request.user,\n )\n else:\n return super().filter_queryset(queryset)\n\n def list(self, request, *args, **kwargs):\n if self._is_search_request():\n from documents import index\n\n try:\n with index.open_index_searcher() as s:\n self.searcher = s\n return super().list(request)\n except NotFound:\n raise\n except Exception as e:\n logger.warning(f\"An error occurred listing search results: {e!s}\")\n return HttpResponseBadRequest(\n \"Error listing search results, check logs for more detail.\",\n )\n else:\n return super().list(request)\n\n @action(detail=False, methods=[\"GET\"], name=\"Get Next ASN\")\n def next_asn(self, request, *args, **kwargs):\n return Response(\n (\n Document.objects.filter(archive_serial_number__gte=0)\n .order_by(\"archive_serial_number\")\n .last()\n .archive_serial_number\n or 0\n )\n + 1,\n )\n\n\nclass LogViewSet(ViewSet):\n permission_classes = (IsAuthenticated, PaperlessAdminPermissions)\n\n log_files = [\"paperless\", \"mail\"]\n\n def get_log_filename(self, log):\n return os.path.join(settings.LOGGING_DIR, f\"{log}.log\")\n\n def retrieve(self, request, pk=None, *args, **kwargs):\n if pk not in self.log_files:\n raise Http404\n\n filename = self.get_log_filename(pk)\n\n if not os.path.isfile(filename):\n raise Http404\n\n with open(filename) as f:\n lines = [line.rstrip() for line in f.readlines()]\n\n return Response(lines)\n\n def list(self, request, *args, **kwargs):\n exist = [\n log for log in self.log_files if os.path.isfile(self.get_log_filename(log))\n ]\n return Response(exist)\n\n\nclass SavedViewViewSet(ModelViewSet, PassUserMixin):\n model = SavedView\n\n queryset = SavedView.objects.all()\n serializer_class = SavedViewSerializer\n pagination_class = StandardPagination\n permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n\n def get_queryset(self):\n user = self.request.user\n return SavedView.objects.filter(owner=user)\n\n def perform_create(self, serializer):\n serializer.save(owner=self.request.user)\n\n\nclass BulkEditView(GenericAPIView, PassUserMixin):\n permission_classes = (IsAuthenticated,)\n serializer_class = BulkEditSerializer\n parser_classes = (parsers.JSONParser,)\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n user = self.request.user\n method = serializer.validated_data.get(\"method\")\n parameters = serializer.validated_data.get(\"parameters\")\n documents = serializer.validated_data.get(\"documents\")\n\n if not user.is_superuser:\n document_objs = Document.objects.filter(pk__in=documents)\n has_perms = (\n all((doc.owner == user or doc.owner is None) for doc in document_objs)\n if method == bulk_edit.set_permissions\n else all(\n has_perms_owner_aware(user, \"change_document\", doc)\n for doc in document_objs\n )\n )\n\n if not has_perms:\n return HttpResponseForbidden(\"Insufficient permissions\")\n\n try:\n # TODO: parameter validation\n result = method(documents, **parameters)\n return Response({\"result\": result})\n except Exception as e:\n logger.warning(f\"An error occurred performing bulk edit: {e!s}\")\n return HttpResponseBadRequest(\n \"Error performing bulk edit, check logs for more detail.\",\n )\n\n\nclass PostDocumentView(GenericAPIView):\n permission_classes = (IsAuthenticated,)\n serializer_class = PostDocumentSerializer\n parser_classes = (parsers.MultiPartParser,)\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n doc_name, doc_data = serializer.validated_data.get(\"document\")\n correspondent_id = serializer.validated_data.get(\"correspondent\")\n document_type_id = serializer.validated_data.get(\"document_type\")\n tag_ids = serializer.validated_data.get(\"tags\")\n title = serializer.validated_data.get(\"title\")\n created = serializer.validated_data.get(\"created\")\n archive_serial_number = serializer.validated_data.get(\"archive_serial_number\")\n\n t = int(mktime(datetime.now().timetuple()))\n\n os.makedirs(settings.SCRATCH_DIR, exist_ok=True)\n\n temp_file_path = Path(tempfile.mkdtemp(dir=settings.SCRATCH_DIR)) / Path(\n pathvalidate.sanitize_filename(doc_name),\n )\n\n temp_file_path.write_bytes(doc_data)\n\n os.utime(temp_file_path, times=(t, t))\n\n input_doc = ConsumableDocument(\n source=DocumentSource.ApiUpload,\n original_file=temp_file_path,\n )\n input_doc_overrides = DocumentMetadataOverrides(\n filename=doc_name,\n title=title,\n correspondent_id=correspondent_id,\n document_type_id=document_type_id,\n tag_ids=tag_ids,\n created=created,\n asn=archive_serial_number,\n owner_id=request.user.id,\n )\n\n async_task = consume_file.delay(\n input_doc,\n input_doc_overrides,\n )\n\n return Response(async_task.id)\n\n\nclass SelectionDataView(GenericAPIView):\n permission_classes = (IsAuthenticated,)\n serializer_class = DocumentListSerializer\n parser_classes = (parsers.MultiPartParser, parsers.JSONParser)\n\n def post(self, request, format=None):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n ids = serializer.validated_data.get(\"documents\")\n\n correspondents = Correspondent.objects.annotate(\n document_count=Count(\n Case(When(documents__id__in=ids, then=1), output_field=IntegerField()),\n ),\n )\n\n tags = Tag.objects.annotate(\n document_count=Count(\n Case(When(documents__id__in=ids, then=1), output_field=IntegerField()),\n ),\n )\n\n types = DocumentType.objects.annotate(\n document_count=Count(\n Case(When(documents__id__in=ids, then=1), output_field=IntegerField()),\n ),\n )\n\n storage_paths = StoragePath.objects.annotate(\n document_count=Count(\n Case(When(documents__id__in=ids, then=1), output_field=IntegerField()),\n ),\n )\n\n r = Response(\n {\n \"selected_correspondents\": [\n {\"id\": t.id, \"document_count\": t.document_count}\n for t in correspondents\n ],\n \"selected_tags\": [\n {\"id\": t.id, \"document_count\": t.document_count} for t in tags\n ],\n \"selected_document_types\": [\n {\"id\": t.id, \"document_count\": t.document_count} for t in types\n ],\n \"selected_storage_paths\": [\n {\"id\": t.id, \"document_count\": t.document_count}\n for t in storage_paths\n ],\n },\n )\n\n return r\n\n\nclass SearchAutoCompleteView(APIView):\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, format=None):\n user = self.request.user if hasattr(self.request, \"user\") else None\n\n if \"term\" in request.query_params:\n term = request.query_params[\"term\"]\n else:\n return HttpResponseBadRequest(\"Term required\")\n\n if \"limit\" in request.query_params:\n limit = int(request.query_params[\"limit\"])\n if limit <= 0:\n return HttpResponseBadRequest(\"Invalid limit\")\n else:\n limit = 10\n\n from documents import index\n\n ix = index.open_index()\n\n return Response(\n index.autocomplete(\n ix,\n term,\n limit,\n user,\n ),\n )\n\n\nclass StatisticsView(APIView):\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, format=None):\n user = request.user if request.user is not None else None\n\n documents = (\n Document.objects.all()\n if user is None\n else get_objects_for_user_owner_aware(\n user,\n \"documents.view_document\",\n Document,\n )\n )\n tags = (\n Tag.objects.all()\n if user is None\n else get_objects_for_user_owner_aware(user, \"documents.view_tag\", Tag)\n )\n correspondent_count = (\n Correspondent.objects.count()\n if user is None\n else len(\n get_objects_for_user_owner_aware(\n user,\n \"documents.view_correspondent\",\n Correspondent,\n ),\n )\n )\n document_type_count = (\n DocumentType.objects.count()\n if user is None\n else len(\n get_objects_for_user_owner_aware(\n user,\n \"documents.view_documenttype\",\n DocumentType,\n ),\n )\n )\n storage_path_count = (\n StoragePath.objects.count()\n if user is None\n else len(\n get_objects_for_user_owner_aware(\n user,\n \"documents.view_storagepath\",\n StoragePath,\n ),\n )\n )\n\n documents_total = documents.count()\n\n inbox_tag = tags.filter(is_inbox_tag=True)\n\n documents_inbox = (\n documents.filter(tags__is_inbox_tag=True).distinct().count()\n if inbox_tag.exists()\n else None\n )\n\n document_file_type_counts = (\n documents.values(\"mime_type\")\n .annotate(mime_type_count=Count(\"mime_type\"))\n .order_by(\"-mime_type_count\")\n if documents_total > 0\n else []\n )\n\n character_count = (\n documents.annotate(\n characters=Length(\"content\"),\n )\n .aggregate(Sum(\"characters\"))\n .get(\"characters__sum\")\n )\n\n return Response(\n {\n \"documents_total\": documents_total,\n \"documents_inbox\": documents_inbox,\n \"inbox_tag\": inbox_tag.first().pk if inbox_tag.exists() else None,\n \"document_file_type_counts\": document_file_type_counts,\n \"character_count\": character_count,\n \"tag_count\": len(tags),\n \"correspondent_count\": correspondent_count,\n \"document_type_count\": document_type_count,\n \"storage_path_count\": storage_path_count,\n },\n )\n\n\nclass BulkDownloadView(GenericAPIView):\n permission_classes = (IsAuthenticated,)\n serializer_class = BulkDownloadSerializer\n parser_classes = (parsers.JSONParser,)\n\n def post(self, request, format=None):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n ids = serializer.validated_data.get(\"documents\")\n compression = serializer.validated_data.get(\"compression\")\n content = serializer.validated_data.get(\"content\")\n follow_filename_format = serializer.validated_data.get(\"follow_formatting\")\n\n os.makedirs(settings.SCRATCH_DIR, exist_ok=True)\n temp = tempfile.NamedTemporaryFile(\n dir=settings.SCRATCH_DIR,\n suffix=\"-compressed-archive\",\n delete=False,\n )\n\n if content == \"both\":\n strategy_class = OriginalAndArchiveStrategy\n elif content == \"originals\":\n strategy_class = OriginalsOnlyStrategy\n else:\n strategy_class = ArchiveOnlyStrategy\n\n with zipfile.ZipFile(temp.name, \"w\", compression) as zipf:\n strategy = strategy_class(zipf, follow_filename_format)\n for id in ids:\n doc = Document.objects.get(id=id)\n strategy.add_document(doc)\n\n with open(temp.name, \"rb\") as f:\n response = HttpResponse(f, content_type=\"application/zip\")\n response[\"Content-Disposition\"] = '{}; filename=\"{}\"'.format(\n \"attachment\",\n \"documents.zip\",\n )\n\n return response\n\n\nclass StoragePathViewSet(ModelViewSet, PassUserMixin):\n model = StoragePath\n\n queryset = StoragePath.objects.annotate(document_count=Count(\"documents\")).order_by(\n Lower(\"name\"),\n )\n\n serializer_class = StoragePathSerializer\n pagination_class = StandardPagination\n permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n filter_backends = (\n DjangoFilterBackend,\n OrderingFilter,\n ObjectOwnedOrGrantedPermissionsFilter,\n )\n filterset_class = StoragePathFilterSet\n ordering_fields = (\"name\", \"path\", \"matching_algorithm\", \"match\", \"document_count\")\n\n\nclass UiSettingsView(GenericAPIView):\n permission_classes = (IsAuthenticated,)\n serializer_class = UiSettingsViewSerializer\n\n def get(self, request, format=None):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n user = User.objects.get(pk=request.user.id)\n ui_settings = {}\n if hasattr(user, \"ui_settings\"):\n ui_settings = user.ui_settings.settings\n if \"update_checking\" in ui_settings:\n ui_settings[\"update_checking\"][\n \"backend_setting\"\n ] = settings.ENABLE_UPDATE_CHECK\n else:\n ui_settings[\"update_checking\"] = {\n \"backend_setting\": settings.ENABLE_UPDATE_CHECK,\n }\n user_resp = {\n \"id\": user.id,\n \"username\": user.username,\n \"is_superuser\": user.is_superuser,\n \"groups\": list(user.groups.values_list(\"id\", flat=True)),\n }\n\n if len(user.first_name) > 0:\n user_resp[\"first_name\"] = user.first_name\n if len(user.last_name) > 0:\n user_resp[\"last_name\"] = user.last_name\n\n # strip <app_label>.\n roles = map(lambda perm: re.sub(r\"^\\w+.\", \"\", perm), user.get_all_permissions())\n return Response(\n {\n \"user\": user_resp,\n \"settings\": ui_settings,\n \"permissions\": roles,\n },\n )\n\n def post(self, request, format=None):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n serializer.save(user=self.request.user)\n\n return Response(\n {\n \"success\": True,\n },\n )\n\n\nclass RemoteVersionView(GenericAPIView):\n def get(self, request, format=None):\n remote_version = \"0.0.0\"\n is_greater_than_current = False\n current_version = packaging_version.parse(version.__full_version_str__)\n try:\n req = urllib.request.Request(\n \"https://api.github.com/repos/paperlessngx/\"\n \"paperlessngx/releases/latest\",\n )\n # Ensure a JSON response\n req.add_header(\"Accept\", \"application/json\")\n\n with urllib.request.urlopen(req) as response:\n remote = response.read().decode(\"utf8\")\n try:\n remote_json = json.loads(remote)\n remote_version = remote_json[\"tag_name\"]\n # Basically PEP 616 but that only went in 3.9\n if remote_version.startswith(\"ngx-\"):\n remote_version = remote_version[len(\"ngx-\") :]\n except ValueError:\n logger.debug(\"An error occurred parsing remote version json\")\n except urllib.error.URLError:\n logger.debug(\"An error occurred checking for available updates\")\n\n is_greater_than_current = (\n packaging_version.parse(\n remote_version,\n )\n > current_version\n )\n\n return Response(\n {\n \"version\": remote_version,\n \"update_available\": is_greater_than_current,\n },\n )\n\n\nclass TasksViewSet(ReadOnlyModelViewSet):\n permission_classes = (IsAuthenticated,)\n serializer_class = TasksViewSerializer\n\n def get_queryset(self):\n queryset = (\n PaperlessTask.objects.filter(\n acknowledged=False,\n )\n .order_by(\"date_created\")\n .reverse()\n )\n task_id = self.request.query_params.get(\"task_id\")\n if task_id is not None:\n queryset = PaperlessTask.objects.filter(task_id=task_id)\n return queryset\n\n\nclass AcknowledgeTasksView(GenericAPIView):\n permission_classes = (IsAuthenticated,)\n serializer_class = AcknowledgeTasksViewSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n tasks = serializer.validated_data.get(\"tasks\")\n\n try:\n result = PaperlessTask.objects.filter(id__in=tasks).update(\n acknowledged=True,\n )\n return Response({\"result\": result})\n except Exception:\n return HttpResponseBadRequest()\n\n\nclass ShareLinkViewSet(ModelViewSet, PassUserMixin):\n model = ShareLink\n\n queryset = ShareLink.objects.all()\n\n serializer_class = ShareLinkSerializer\n pagination_class = StandardPagination\n permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n filter_backends = (\n DjangoFilterBackend,\n OrderingFilter,\n ObjectOwnedOrGrantedPermissionsFilter,\n )\n filterset_class = ShareLinkFilterSet\n ordering_fields = (\"created\", \"expiration\", \"document\")\n\n\nclass SharedLinkView(View):\n authentication_classes = []\n permission_classes = []\n\n def get(self, request, slug):\n share_link = ShareLink.objects.filter(slug=slug).first()\n if share_link is None:\n return HttpResponseRedirect(\"/accounts/login/?sharelink_notfound=1\")\n if share_link.expiration is not None and share_link.expiration < timezone.now():\n return HttpResponseRedirect(\"/accounts/login/?sharelink_expired=1\")\n return serve_file(\n doc=share_link.document,\n use_archive=share_link.file_version == \"archive\",\n disposition=\"inline\",\n )\n\n\ndef serve_file(doc: Document, use_archive: bool, disposition: str):\n if use_archive:\n file_handle = doc.archive_file\n filename = doc.get_public_filename(archive=True)\n mime_type = \"application/pdf\"\n else:\n file_handle = doc.source_file\n filename = doc.get_public_filename()\n mime_type = doc.mime_type\n # Support browser previewing csv files by using text mime type\n if mime_type in {\"application/csv\", \"text/csv\"} and disposition == \"inline\":\n mime_type = \"text/plain\"\n\n if doc.storage_type == Document.STORAGE_TYPE_GPG:\n file_handle = GnuPG.decrypted(file_handle)\n\n response = HttpResponse(file_handle, content_type=mime_type)\n # Firefox is not able to handle unicode characters in filename field\n # RFC 5987 addresses this issue\n # see https://datatracker.ietf.org/doc/html/rfc5987#section-4.2\n # Chromium cannot handle commas in the filename\n filename_normalized = normalize(\"NFKD\", filename.replace(\",\", \"_\")).encode(\n \"ascii\",\n \"ignore\",\n )\n filename_encoded = quote(filename)\n content_disposition = (\n f\"{disposition}; \"\n f'filename=\"{filename_normalized}\"; '\n f\"filename*=utf-8''{filename_encoded}\"\n )\n response[\"Content-Disposition\"] = content_disposition\n return response\n\n\nclass BulkEditObjectPermissionsView(GenericAPIView, PassUserMixin):\n permission_classes = (IsAuthenticated,)\n serializer_class = BulkEditObjectPermissionsSerializer\n parser_classes = (parsers.JSONParser,)\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n user = self.request.user\n object_type = serializer.validated_data.get(\"object_type\")\n object_ids = serializer.validated_data.get(\"objects\")\n object_class = serializer.get_object_class(object_type)\n permissions = serializer.validated_data.get(\"permissions\")\n owner = serializer.validated_data.get(\"owner\")\n\n if not user.is_superuser:\n objs = object_class.objects.filter(pk__in=object_ids)\n has_perms = all((obj.owner == user or obj.owner is None) for obj in objs)\n\n if not has_perms:\n return HttpResponseForbidden(\"Insufficient permissions\")\n\n try:\n qs = object_class.objects.filter(id__in=object_ids)\n\n if \"owner\" in serializer.validated_data:\n qs.update(owner=owner)\n\n if \"permissions\" in serializer.validated_data:\n for obj in qs:\n set_permissions_for_object(permissions, obj)\n\n return Response({\"result\": \"OK\"})\n except Exception as e:\n logger.warning(f\"An error occurred performing bulk permissions edit: {e!s}\")\n return HttpResponseBadRequest(\n \"Error performing bulk permissions edit, check logs for more detail.\",\n )\n\n\nclass ConsumptionTemplateViewSet(ModelViewSet):\n permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n\n serializer_class = ConsumptionTemplateSerializer\n pagination_class = StandardPagination\n\n model = ConsumptionTemplate\n\n queryset = ConsumptionTemplate.objects.all().order_by(\"name\")\n\n\nclass CustomFieldViewSet(ModelViewSet):\n permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n\n serializer_class = CustomFieldSerializer\n pagination_class = StandardPagination\n\n model = CustomField\n\n queryset = CustomField.objects.all().order_by(\"-created\")\n", "path": "src/documents/views.py" } ]
[ { "content": "import itertools\nimport json\nimport logging\nimport os\nimport re\nimport tempfile\nimport urllib\nimport zipfile\nfrom datetime import datetime\nfrom pathlib import Path\nfrom time import mktime\nfrom unicodedata import normalize\nfrom urllib.parse import quote\n\nimport pathvalidate\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.db.models import Case\nfrom django.db.models import Count\nfrom django.db.models import IntegerField\nfrom django.db.models import Max\nfrom django.db.models import Sum\nfrom django.db.models import When\nfrom django.db.models.functions import Length\nfrom django.db.models.functions import Lower\nfrom django.http import Http404\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseBadRequest\nfrom django.http import HttpResponseForbidden\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import get_language\nfrom django.views import View\nfrom django.views.decorators.cache import cache_control\nfrom django.views.generic import TemplateView\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom langdetect import detect\nfrom packaging import version as packaging_version\nfrom rest_framework import parsers\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.filters import OrderingFilter\nfrom rest_framework.filters import SearchFilter\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.mixins import CreateModelMixin\nfrom rest_framework.mixins import DestroyModelMixin\nfrom rest_framework.mixins import ListModelMixin\nfrom rest_framework.mixins import RetrieveModelMixin\nfrom rest_framework.mixins import UpdateModelMixin\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.viewsets import GenericViewSet\nfrom rest_framework.viewsets import ModelViewSet\nfrom rest_framework.viewsets import ReadOnlyModelViewSet\nfrom rest_framework.viewsets import ViewSet\n\nfrom documents import bulk_edit\nfrom documents.bulk_download import ArchiveOnlyStrategy\nfrom documents.bulk_download import OriginalAndArchiveStrategy\nfrom documents.bulk_download import OriginalsOnlyStrategy\nfrom documents.classifier import load_classifier\nfrom documents.data_models import ConsumableDocument\nfrom documents.data_models import DocumentMetadataOverrides\nfrom documents.data_models import DocumentSource\nfrom documents.filters import CorrespondentFilterSet\nfrom documents.filters import DocumentFilterSet\nfrom documents.filters import DocumentTypeFilterSet\nfrom documents.filters import ObjectOwnedOrGrantedPermissionsFilter\nfrom documents.filters import ShareLinkFilterSet\nfrom documents.filters import StoragePathFilterSet\nfrom documents.filters import TagFilterSet\nfrom documents.matching import match_correspondents\nfrom documents.matching import match_document_types\nfrom documents.matching import match_storage_paths\nfrom documents.matching import match_tags\nfrom documents.models import ConsumptionTemplate\nfrom documents.models import Correspondent\nfrom documents.models import CustomField\nfrom documents.models import Document\nfrom documents.models import DocumentType\nfrom documents.models import Note\nfrom documents.models import PaperlessTask\nfrom documents.models import SavedView\nfrom documents.models import ShareLink\nfrom documents.models import StoragePath\nfrom documents.models import Tag\nfrom documents.parsers import get_parser_class_for_mime_type\nfrom documents.parsers import parse_date_generator\nfrom documents.permissions import PaperlessAdminPermissions\nfrom documents.permissions import PaperlessObjectPermissions\nfrom documents.permissions import get_objects_for_user_owner_aware\nfrom documents.permissions import has_perms_owner_aware\nfrom documents.permissions import set_permissions_for_object\nfrom documents.serialisers import AcknowledgeTasksViewSerializer\nfrom documents.serialisers import BulkDownloadSerializer\nfrom documents.serialisers import BulkEditObjectPermissionsSerializer\nfrom documents.serialisers import BulkEditSerializer\nfrom documents.serialisers import ConsumptionTemplateSerializer\nfrom documents.serialisers import CorrespondentSerializer\nfrom documents.serialisers import CustomFieldSerializer\nfrom documents.serialisers import DocumentListSerializer\nfrom documents.serialisers import DocumentSerializer\nfrom documents.serialisers import DocumentTypeSerializer\nfrom documents.serialisers import PostDocumentSerializer\nfrom documents.serialisers import SavedViewSerializer\nfrom documents.serialisers import ShareLinkSerializer\nfrom documents.serialisers import StoragePathSerializer\nfrom documents.serialisers import TagSerializer\nfrom documents.serialisers import TagSerializerVersion1\nfrom documents.serialisers import TasksViewSerializer\nfrom documents.serialisers import UiSettingsViewSerializer\nfrom documents.tasks import consume_file\nfrom paperless import version\nfrom paperless.db import GnuPG\nfrom paperless.views import StandardPagination\n\nif settings.AUDIT_LOG_ENABLED:\n from auditlog.models import LogEntry\n\nlogger = logging.getLogger(\"paperless.api\")\n\n\nclass IndexView(TemplateView):\n template_name = \"index.html\"\n\n def get_frontend_language(self):\n if hasattr(\n self.request.user,\n \"ui_settings\",\n ) and self.request.user.ui_settings.settings.get(\"language\"):\n lang = self.request.user.ui_settings.settings.get(\"language\")\n else:\n lang = get_language()\n # This is here for the following reason:\n # Django identifies languages in the form \"en-us\"\n # However, angular generates locales as \"en-US\".\n # this translates between these two forms.\n if \"-\" in lang:\n first = lang[: lang.index(\"-\")]\n second = lang[lang.index(\"-\") + 1 :]\n return f\"{first}-{second.upper()}\"\n else:\n return lang\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"cookie_prefix\"] = settings.COOKIE_PREFIX\n context[\"username\"] = self.request.user.username\n context[\"full_name\"] = self.request.user.get_full_name()\n context[\"styles_css\"] = f\"frontend/{self.get_frontend_language()}/styles.css\"\n context[\"runtime_js\"] = f\"frontend/{self.get_frontend_language()}/runtime.js\"\n context[\n \"polyfills_js\"\n ] = f\"frontend/{self.get_frontend_language()}/polyfills.js\"\n context[\"main_js\"] = f\"frontend/{self.get_frontend_language()}/main.js\"\n context[\n \"webmanifest\"\n ] = f\"frontend/{self.get_frontend_language()}/manifest.webmanifest\"\n context[\n \"apple_touch_icon\"\n ] = f\"frontend/{self.get_frontend_language()}/apple-touch-icon.png\"\n return context\n\n\nclass PassUserMixin(CreateModelMixin):\n \"\"\"\n Pass a user object to serializer\n \"\"\"\n\n def get_serializer(self, *args, **kwargs):\n kwargs.setdefault(\"user\", self.request.user)\n kwargs.setdefault(\n \"full_perms\",\n self.request.query_params.get(\"full_perms\", False),\n )\n return super().get_serializer(*args, **kwargs)\n\n\nclass CorrespondentViewSet(ModelViewSet, PassUserMixin):\n model = Correspondent\n\n queryset = Correspondent.objects.annotate(\n document_count=Count(\"documents\"),\n last_correspondence=Max(\"documents__created\"),\n ).order_by(Lower(\"name\"))\n\n serializer_class = CorrespondentSerializer\n pagination_class = StandardPagination\n permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n filter_backends = (\n DjangoFilterBackend,\n OrderingFilter,\n ObjectOwnedOrGrantedPermissionsFilter,\n )\n filterset_class = CorrespondentFilterSet\n ordering_fields = (\n \"name\",\n \"matching_algorithm\",\n \"match\",\n \"document_count\",\n \"last_correspondence\",\n )\n\n\nclass TagViewSet(ModelViewSet, PassUserMixin):\n model = Tag\n\n queryset = Tag.objects.annotate(document_count=Count(\"documents\")).order_by(\n Lower(\"name\"),\n )\n\n def get_serializer_class(self, *args, **kwargs):\n if int(self.request.version) == 1:\n return TagSerializerVersion1\n else:\n return TagSerializer\n\n pagination_class = StandardPagination\n permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n filter_backends = (\n DjangoFilterBackend,\n OrderingFilter,\n ObjectOwnedOrGrantedPermissionsFilter,\n )\n filterset_class = TagFilterSet\n ordering_fields = (\"color\", \"name\", \"matching_algorithm\", \"match\", \"document_count\")\n\n\nclass DocumentTypeViewSet(ModelViewSet, PassUserMixin):\n model = DocumentType\n\n queryset = DocumentType.objects.annotate(\n document_count=Count(\"documents\"),\n ).order_by(Lower(\"name\"))\n\n serializer_class = DocumentTypeSerializer\n pagination_class = StandardPagination\n permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n filter_backends = (\n DjangoFilterBackend,\n OrderingFilter,\n ObjectOwnedOrGrantedPermissionsFilter,\n )\n filterset_class = DocumentTypeFilterSet\n ordering_fields = (\"name\", \"matching_algorithm\", \"match\", \"document_count\")\n\n\nclass DocumentViewSet(\n PassUserMixin,\n RetrieveModelMixin,\n UpdateModelMixin,\n DestroyModelMixin,\n ListModelMixin,\n GenericViewSet,\n):\n model = Document\n queryset = Document.objects.annotate(num_notes=Count(\"notes\"))\n serializer_class = DocumentSerializer\n pagination_class = StandardPagination\n permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n filter_backends = (\n DjangoFilterBackend,\n SearchFilter,\n OrderingFilter,\n ObjectOwnedOrGrantedPermissionsFilter,\n )\n filterset_class = DocumentFilterSet\n search_fields = (\"title\", \"correspondent__name\", \"content\")\n ordering_fields = (\n \"id\",\n \"title\",\n \"correspondent__name\",\n \"document_type__name\",\n \"created\",\n \"modified\",\n \"added\",\n \"archive_serial_number\",\n \"num_notes\",\n \"owner\",\n )\n\n def get_queryset(self):\n return Document.objects.distinct().annotate(num_notes=Count(\"notes\"))\n\n def get_serializer(self, *args, **kwargs):\n fields_param = self.request.query_params.get(\"fields\", None)\n fields = fields_param.split(\",\") if fields_param else None\n truncate_content = self.request.query_params.get(\"truncate_content\", \"False\")\n kwargs.setdefault(\"context\", self.get_serializer_context())\n kwargs.setdefault(\"fields\", fields)\n kwargs.setdefault(\"truncate_content\", truncate_content.lower() in [\"true\", \"1\"])\n kwargs.setdefault(\n \"full_perms\",\n self.request.query_params.get(\"full_perms\", False),\n )\n return super().get_serializer(*args, **kwargs)\n\n def update(self, request, *args, **kwargs):\n response = super().update(request, *args, **kwargs)\n from documents import index\n\n index.add_or_update_document(self.get_object())\n return response\n\n def destroy(self, request, *args, **kwargs):\n from documents import index\n\n index.remove_document_from_index(self.get_object())\n return super().destroy(request, *args, **kwargs)\n\n @staticmethod\n def original_requested(request):\n return (\n \"original\" in request.query_params\n and request.query_params[\"original\"] == \"true\"\n )\n\n def file_response(self, pk, request, disposition):\n doc = Document.objects.get(id=pk)\n if request.user is not None and not has_perms_owner_aware(\n request.user,\n \"view_document\",\n doc,\n ):\n return HttpResponseForbidden(\"Insufficient permissions\")\n return serve_file(\n doc=doc,\n use_archive=not self.original_requested(request)\n and doc.has_archive_version,\n disposition=disposition,\n )\n\n def get_metadata(self, file, mime_type):\n if not os.path.isfile(file):\n return None\n\n parser_class = get_parser_class_for_mime_type(mime_type)\n if parser_class:\n parser = parser_class(progress_callback=None, logging_group=None)\n\n try:\n return parser.extract_metadata(file, mime_type)\n except Exception:\n # TODO: cover GPG errors, remove later.\n return []\n else:\n return []\n\n def get_filesize(self, filename):\n if os.path.isfile(filename):\n return os.stat(filename).st_size\n else:\n return None\n\n @action(methods=[\"get\"], detail=True)\n def metadata(self, request, pk=None):\n try:\n doc = Document.objects.get(pk=pk)\n if request.user is not None and not has_perms_owner_aware(\n request.user,\n \"view_document\",\n doc,\n ):\n return HttpResponseForbidden(\"Insufficient permissions\")\n except Document.DoesNotExist:\n raise Http404\n\n meta = {\n \"original_checksum\": doc.checksum,\n \"original_size\": self.get_filesize(doc.source_path),\n \"original_mime_type\": doc.mime_type,\n \"media_filename\": doc.filename,\n \"has_archive_version\": doc.has_archive_version,\n \"original_metadata\": self.get_metadata(doc.source_path, doc.mime_type),\n \"archive_checksum\": doc.archive_checksum,\n \"archive_media_filename\": doc.archive_filename,\n \"original_filename\": doc.original_filename,\n }\n\n lang = \"en\"\n try:\n lang = detect(doc.content)\n except Exception:\n pass\n meta[\"lang\"] = lang\n\n if doc.has_archive_version:\n meta[\"archive_size\"] = self.get_filesize(doc.archive_path)\n meta[\"archive_metadata\"] = self.get_metadata(\n doc.archive_path,\n \"application/pdf\",\n )\n else:\n meta[\"archive_size\"] = None\n meta[\"archive_metadata\"] = None\n\n return Response(meta)\n\n @action(methods=[\"get\"], detail=True)\n def suggestions(self, request, pk=None):\n doc = get_object_or_404(Document, pk=pk)\n if request.user is not None and not has_perms_owner_aware(\n request.user,\n \"view_document\",\n doc,\n ):\n return HttpResponseForbidden(\"Insufficient permissions\")\n\n classifier = load_classifier()\n\n dates = []\n if settings.NUMBER_OF_SUGGESTED_DATES > 0:\n gen = parse_date_generator(doc.filename, doc.content)\n dates = sorted(\n {i for i in itertools.islice(gen, settings.NUMBER_OF_SUGGESTED_DATES)},\n )\n\n return Response(\n {\n \"correspondents\": [\n c.id for c in match_correspondents(doc, classifier, request.user)\n ],\n \"tags\": [t.id for t in match_tags(doc, classifier, request.user)],\n \"document_types\": [\n dt.id for dt in match_document_types(doc, classifier, request.user)\n ],\n \"storage_paths\": [\n dt.id for dt in match_storage_paths(doc, classifier, request.user)\n ],\n \"dates\": [\n date.strftime(\"%Y-%m-%d\") for date in dates if date is not None\n ],\n },\n )\n\n @action(methods=[\"get\"], detail=True)\n def preview(self, request, pk=None):\n try:\n response = self.file_response(pk, request, \"inline\")\n return response\n except (FileNotFoundError, Document.DoesNotExist):\n raise Http404\n\n @action(methods=[\"get\"], detail=True)\n @method_decorator(cache_control(public=False, max_age=315360000))\n def thumb(self, request, pk=None):\n try:\n doc = Document.objects.get(id=pk)\n if request.user is not None and not has_perms_owner_aware(\n request.user,\n \"view_document\",\n doc,\n ):\n return HttpResponseForbidden(\"Insufficient permissions\")\n if doc.storage_type == Document.STORAGE_TYPE_GPG:\n handle = GnuPG.decrypted(doc.thumbnail_file)\n else:\n handle = doc.thumbnail_file\n # TODO: Send ETag information and use that to send new thumbnails\n # if available\n\n return HttpResponse(handle, content_type=\"image/webp\")\n except (FileNotFoundError, Document.DoesNotExist):\n raise Http404\n\n @action(methods=[\"get\"], detail=True)\n def download(self, request, pk=None):\n try:\n return self.file_response(pk, request, \"attachment\")\n except (FileNotFoundError, Document.DoesNotExist):\n raise Http404\n\n def getNotes(self, doc):\n return [\n {\n \"id\": c.id,\n \"note\": c.note,\n \"created\": c.created,\n \"user\": {\n \"id\": c.user.id,\n \"username\": c.user.username,\n \"first_name\": c.user.first_name,\n \"last_name\": c.user.last_name,\n },\n }\n for c in Note.objects.filter(document=doc).order_by(\"-created\")\n ]\n\n @action(methods=[\"get\", \"post\", \"delete\"], detail=True)\n def notes(self, request, pk=None):\n currentUser = request.user\n try:\n doc = Document.objects.get(pk=pk)\n if currentUser is not None and not has_perms_owner_aware(\n currentUser,\n \"view_document\",\n doc,\n ):\n return HttpResponseForbidden(\"Insufficient permissions to view notes\")\n except Document.DoesNotExist:\n raise Http404\n\n if request.method == \"GET\":\n try:\n return Response(self.getNotes(doc))\n except Exception as e:\n logger.warning(f\"An error occurred retrieving notes: {e!s}\")\n return Response(\n {\"error\": \"Error retrieving notes, check logs for more detail.\"},\n )\n elif request.method == \"POST\":\n try:\n if currentUser is not None and not has_perms_owner_aware(\n currentUser,\n \"change_document\",\n doc,\n ):\n return HttpResponseForbidden(\n \"Insufficient permissions to create notes\",\n )\n\n c = Note.objects.create(\n document=doc,\n note=request.data[\"note\"],\n user=currentUser,\n )\n c.save()\n # If audit log is enabled make an entry in the log\n # about this note change\n if settings.AUDIT_LOG_ENABLED:\n LogEntry.objects.log_create(\n instance=doc,\n changes=json.dumps(\n {\n \"Note Added\": [\"None\", c.id],\n },\n ),\n action=LogEntry.Action.UPDATE,\n )\n\n doc.modified = timezone.now()\n doc.save()\n\n from documents import index\n\n index.add_or_update_document(self.get_object())\n\n return Response(self.getNotes(doc))\n except Exception as e:\n logger.warning(f\"An error occurred saving note: {e!s}\")\n return Response(\n {\n \"error\": \"Error saving note, check logs for more detail.\",\n },\n )\n elif request.method == \"DELETE\":\n if currentUser is not None and not has_perms_owner_aware(\n currentUser,\n \"change_document\",\n doc,\n ):\n return HttpResponseForbidden(\"Insufficient permissions to delete notes\")\n\n note = Note.objects.get(id=int(request.GET.get(\"id\")))\n if settings.AUDIT_LOG_ENABLED:\n LogEntry.objects.log_create(\n instance=doc,\n changes=json.dumps(\n {\n \"Note Deleted\": [note.id, \"None\"],\n },\n ),\n action=LogEntry.Action.UPDATE,\n )\n\n note.delete()\n\n doc.modified = timezone.now()\n doc.save()\n\n from documents import index\n\n index.add_or_update_document(doc)\n\n return Response(self.getNotes(doc))\n\n return Response(\n {\n \"error\": \"error\",\n },\n )\n\n @action(methods=[\"get\"], detail=True)\n def share_links(self, request, pk=None):\n currentUser = request.user\n try:\n doc = Document.objects.get(pk=pk)\n if currentUser is not None and not has_perms_owner_aware(\n currentUser,\n \"change_document\",\n doc,\n ):\n return HttpResponseForbidden(\n \"Insufficient permissions to add share link\",\n )\n except Document.DoesNotExist:\n raise Http404\n\n if request.method == \"GET\":\n now = timezone.now()\n links = [\n {\n \"id\": c.id,\n \"created\": c.created,\n \"expiration\": c.expiration,\n \"slug\": c.slug,\n }\n for c in ShareLink.objects.filter(document=doc)\n .exclude(expiration__lt=now)\n .order_by(\"-created\")\n ]\n return Response(links)\n\n\nclass SearchResultSerializer(DocumentSerializer, PassUserMixin):\n def to_representation(self, instance):\n doc = Document.objects.get(id=instance[\"id\"])\n notes = \",\".join(\n [str(c.note) for c in Note.objects.filter(document=instance[\"id\"])],\n )\n r = super().to_representation(doc)\n r[\"__search_hit__\"] = {\n \"score\": instance.score,\n \"highlights\": instance.highlights(\"content\", text=doc.content),\n \"note_highlights\": instance.highlights(\"notes\", text=notes)\n if doc\n else None,\n \"rank\": instance.rank,\n }\n\n return r\n\n\nclass UnifiedSearchViewSet(DocumentViewSet):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.searcher = None\n\n def get_serializer_class(self):\n if self._is_search_request():\n return SearchResultSerializer\n else:\n return DocumentSerializer\n\n def _is_search_request(self):\n return (\n \"query\" in self.request.query_params\n or \"more_like_id\" in self.request.query_params\n )\n\n def filter_queryset(self, queryset):\n if self._is_search_request():\n from documents import index\n\n if \"query\" in self.request.query_params:\n query_class = index.DelayedFullTextQuery\n elif \"more_like_id\" in self.request.query_params:\n query_class = index.DelayedMoreLikeThisQuery\n else:\n raise ValueError\n\n return query_class(\n self.searcher,\n self.request.query_params,\n self.paginator.get_page_size(self.request),\n self.request.user,\n )\n else:\n return super().filter_queryset(queryset)\n\n def list(self, request, *args, **kwargs):\n if self._is_search_request():\n from documents import index\n\n try:\n with index.open_index_searcher() as s:\n self.searcher = s\n return super().list(request)\n except NotFound:\n raise\n except Exception as e:\n logger.warning(f\"An error occurred listing search results: {e!s}\")\n return HttpResponseBadRequest(\n \"Error listing search results, check logs for more detail.\",\n )\n else:\n return super().list(request)\n\n @action(detail=False, methods=[\"GET\"], name=\"Get Next ASN\")\n def next_asn(self, request, *args, **kwargs):\n return Response(\n (\n Document.objects.filter(archive_serial_number__gte=0)\n .order_by(\"archive_serial_number\")\n .last()\n .archive_serial_number\n or 0\n )\n + 1,\n )\n\n\nclass LogViewSet(ViewSet):\n permission_classes = (IsAuthenticated, PaperlessAdminPermissions)\n\n log_files = [\"paperless\", \"mail\"]\n\n def get_log_filename(self, log):\n return os.path.join(settings.LOGGING_DIR, f\"{log}.log\")\n\n def retrieve(self, request, pk=None, *args, **kwargs):\n if pk not in self.log_files:\n raise Http404\n\n filename = self.get_log_filename(pk)\n\n if not os.path.isfile(filename):\n raise Http404\n\n with open(filename) as f:\n lines = [line.rstrip() for line in f.readlines()]\n\n return Response(lines)\n\n def list(self, request, *args, **kwargs):\n exist = [\n log for log in self.log_files if os.path.isfile(self.get_log_filename(log))\n ]\n return Response(exist)\n\n\nclass SavedViewViewSet(ModelViewSet, PassUserMixin):\n model = SavedView\n\n queryset = SavedView.objects.all()\n serializer_class = SavedViewSerializer\n pagination_class = StandardPagination\n permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n\n def get_queryset(self):\n user = self.request.user\n return SavedView.objects.filter(owner=user)\n\n def perform_create(self, serializer):\n serializer.save(owner=self.request.user)\n\n\nclass BulkEditView(GenericAPIView, PassUserMixin):\n permission_classes = (IsAuthenticated,)\n serializer_class = BulkEditSerializer\n parser_classes = (parsers.JSONParser,)\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n user = self.request.user\n method = serializer.validated_data.get(\"method\")\n parameters = serializer.validated_data.get(\"parameters\")\n documents = serializer.validated_data.get(\"documents\")\n\n if not user.is_superuser:\n document_objs = Document.objects.filter(pk__in=documents)\n has_perms = (\n all((doc.owner == user or doc.owner is None) for doc in document_objs)\n if method == bulk_edit.set_permissions\n else all(\n has_perms_owner_aware(user, \"change_document\", doc)\n for doc in document_objs\n )\n )\n\n if not has_perms:\n return HttpResponseForbidden(\"Insufficient permissions\")\n\n try:\n # TODO: parameter validation\n result = method(documents, **parameters)\n return Response({\"result\": result})\n except Exception as e:\n logger.warning(f\"An error occurred performing bulk edit: {e!s}\")\n return HttpResponseBadRequest(\n \"Error performing bulk edit, check logs for more detail.\",\n )\n\n\nclass PostDocumentView(GenericAPIView):\n permission_classes = (IsAuthenticated,)\n serializer_class = PostDocumentSerializer\n parser_classes = (parsers.MultiPartParser,)\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n doc_name, doc_data = serializer.validated_data.get(\"document\")\n correspondent_id = serializer.validated_data.get(\"correspondent\")\n document_type_id = serializer.validated_data.get(\"document_type\")\n tag_ids = serializer.validated_data.get(\"tags\")\n title = serializer.validated_data.get(\"title\")\n created = serializer.validated_data.get(\"created\")\n archive_serial_number = serializer.validated_data.get(\"archive_serial_number\")\n\n t = int(mktime(datetime.now().timetuple()))\n\n os.makedirs(settings.SCRATCH_DIR, exist_ok=True)\n\n temp_file_path = Path(tempfile.mkdtemp(dir=settings.SCRATCH_DIR)) / Path(\n pathvalidate.sanitize_filename(doc_name),\n )\n\n temp_file_path.write_bytes(doc_data)\n\n os.utime(temp_file_path, times=(t, t))\n\n input_doc = ConsumableDocument(\n source=DocumentSource.ApiUpload,\n original_file=temp_file_path,\n )\n input_doc_overrides = DocumentMetadataOverrides(\n filename=doc_name,\n title=title,\n correspondent_id=correspondent_id,\n document_type_id=document_type_id,\n tag_ids=tag_ids,\n created=created,\n asn=archive_serial_number,\n owner_id=request.user.id,\n )\n\n async_task = consume_file.delay(\n input_doc,\n input_doc_overrides,\n )\n\n return Response(async_task.id)\n\n\nclass SelectionDataView(GenericAPIView):\n permission_classes = (IsAuthenticated,)\n serializer_class = DocumentListSerializer\n parser_classes = (parsers.MultiPartParser, parsers.JSONParser)\n\n def post(self, request, format=None):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n ids = serializer.validated_data.get(\"documents\")\n\n correspondents = Correspondent.objects.annotate(\n document_count=Count(\n Case(When(documents__id__in=ids, then=1), output_field=IntegerField()),\n ),\n )\n\n tags = Tag.objects.annotate(\n document_count=Count(\n Case(When(documents__id__in=ids, then=1), output_field=IntegerField()),\n ),\n )\n\n types = DocumentType.objects.annotate(\n document_count=Count(\n Case(When(documents__id__in=ids, then=1), output_field=IntegerField()),\n ),\n )\n\n storage_paths = StoragePath.objects.annotate(\n document_count=Count(\n Case(When(documents__id__in=ids, then=1), output_field=IntegerField()),\n ),\n )\n\n r = Response(\n {\n \"selected_correspondents\": [\n {\"id\": t.id, \"document_count\": t.document_count}\n for t in correspondents\n ],\n \"selected_tags\": [\n {\"id\": t.id, \"document_count\": t.document_count} for t in tags\n ],\n \"selected_document_types\": [\n {\"id\": t.id, \"document_count\": t.document_count} for t in types\n ],\n \"selected_storage_paths\": [\n {\"id\": t.id, \"document_count\": t.document_count}\n for t in storage_paths\n ],\n },\n )\n\n return r\n\n\nclass SearchAutoCompleteView(APIView):\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, format=None):\n user = self.request.user if hasattr(self.request, \"user\") else None\n\n if \"term\" in request.query_params:\n term = request.query_params[\"term\"]\n else:\n return HttpResponseBadRequest(\"Term required\")\n\n if \"limit\" in request.query_params:\n limit = int(request.query_params[\"limit\"])\n if limit <= 0:\n return HttpResponseBadRequest(\"Invalid limit\")\n else:\n limit = 10\n\n from documents import index\n\n ix = index.open_index()\n\n return Response(\n index.autocomplete(\n ix,\n term,\n limit,\n user,\n ),\n )\n\n\nclass StatisticsView(APIView):\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, format=None):\n user = request.user if request.user is not None else None\n\n documents = (\n Document.objects.all()\n if user is None\n else get_objects_for_user_owner_aware(\n user,\n \"documents.view_document\",\n Document,\n )\n )\n tags = (\n Tag.objects.all()\n if user is None\n else get_objects_for_user_owner_aware(user, \"documents.view_tag\", Tag)\n )\n correspondent_count = (\n Correspondent.objects.count()\n if user is None\n else len(\n get_objects_for_user_owner_aware(\n user,\n \"documents.view_correspondent\",\n Correspondent,\n ),\n )\n )\n document_type_count = (\n DocumentType.objects.count()\n if user is None\n else len(\n get_objects_for_user_owner_aware(\n user,\n \"documents.view_documenttype\",\n DocumentType,\n ),\n )\n )\n storage_path_count = (\n StoragePath.objects.count()\n if user is None\n else len(\n get_objects_for_user_owner_aware(\n user,\n \"documents.view_storagepath\",\n StoragePath,\n ),\n )\n )\n\n documents_total = documents.count()\n\n inbox_tag = tags.filter(is_inbox_tag=True)\n\n documents_inbox = (\n documents.filter(tags__is_inbox_tag=True).distinct().count()\n if inbox_tag.exists()\n else None\n )\n\n document_file_type_counts = (\n documents.values(\"mime_type\")\n .annotate(mime_type_count=Count(\"mime_type\"))\n .order_by(\"-mime_type_count\")\n if documents_total > 0\n else []\n )\n\n character_count = (\n documents.annotate(\n characters=Length(\"content\"),\n )\n .aggregate(Sum(\"characters\"))\n .get(\"characters__sum\")\n )\n\n return Response(\n {\n \"documents_total\": documents_total,\n \"documents_inbox\": documents_inbox,\n \"inbox_tag\": inbox_tag.first().pk if inbox_tag.exists() else None,\n \"document_file_type_counts\": document_file_type_counts,\n \"character_count\": character_count,\n \"tag_count\": len(tags),\n \"correspondent_count\": correspondent_count,\n \"document_type_count\": document_type_count,\n \"storage_path_count\": storage_path_count,\n },\n )\n\n\nclass BulkDownloadView(GenericAPIView):\n permission_classes = (IsAuthenticated,)\n serializer_class = BulkDownloadSerializer\n parser_classes = (parsers.JSONParser,)\n\n def post(self, request, format=None):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n ids = serializer.validated_data.get(\"documents\")\n compression = serializer.validated_data.get(\"compression\")\n content = serializer.validated_data.get(\"content\")\n follow_filename_format = serializer.validated_data.get(\"follow_formatting\")\n\n os.makedirs(settings.SCRATCH_DIR, exist_ok=True)\n temp = tempfile.NamedTemporaryFile(\n dir=settings.SCRATCH_DIR,\n suffix=\"-compressed-archive\",\n delete=False,\n )\n\n if content == \"both\":\n strategy_class = OriginalAndArchiveStrategy\n elif content == \"originals\":\n strategy_class = OriginalsOnlyStrategy\n else:\n strategy_class = ArchiveOnlyStrategy\n\n with zipfile.ZipFile(temp.name, \"w\", compression) as zipf:\n strategy = strategy_class(zipf, follow_filename_format)\n for id in ids:\n doc = Document.objects.get(id=id)\n strategy.add_document(doc)\n\n with open(temp.name, \"rb\") as f:\n response = HttpResponse(f, content_type=\"application/zip\")\n response[\"Content-Disposition\"] = '{}; filename=\"{}\"'.format(\n \"attachment\",\n \"documents.zip\",\n )\n\n return response\n\n\nclass StoragePathViewSet(ModelViewSet, PassUserMixin):\n model = StoragePath\n\n queryset = StoragePath.objects.annotate(document_count=Count(\"documents\")).order_by(\n Lower(\"name\"),\n )\n\n serializer_class = StoragePathSerializer\n pagination_class = StandardPagination\n permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n filter_backends = (\n DjangoFilterBackend,\n OrderingFilter,\n ObjectOwnedOrGrantedPermissionsFilter,\n )\n filterset_class = StoragePathFilterSet\n ordering_fields = (\"name\", \"path\", \"matching_algorithm\", \"match\", \"document_count\")\n\n\nclass UiSettingsView(GenericAPIView):\n permission_classes = (IsAuthenticated,)\n serializer_class = UiSettingsViewSerializer\n\n def get(self, request, format=None):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n user = User.objects.get(pk=request.user.id)\n ui_settings = {}\n if hasattr(user, \"ui_settings\"):\n ui_settings = user.ui_settings.settings\n if \"update_checking\" in ui_settings:\n ui_settings[\"update_checking\"][\n \"backend_setting\"\n ] = settings.ENABLE_UPDATE_CHECK\n else:\n ui_settings[\"update_checking\"] = {\n \"backend_setting\": settings.ENABLE_UPDATE_CHECK,\n }\n user_resp = {\n \"id\": user.id,\n \"username\": user.username,\n \"is_superuser\": user.is_superuser,\n \"groups\": list(user.groups.values_list(\"id\", flat=True)),\n }\n\n if len(user.first_name) > 0:\n user_resp[\"first_name\"] = user.first_name\n if len(user.last_name) > 0:\n user_resp[\"last_name\"] = user.last_name\n\n # strip <app_label>.\n roles = map(lambda perm: re.sub(r\"^\\w+.\", \"\", perm), user.get_all_permissions())\n return Response(\n {\n \"user\": user_resp,\n \"settings\": ui_settings,\n \"permissions\": roles,\n },\n )\n\n def post(self, request, format=None):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n serializer.save(user=self.request.user)\n\n return Response(\n {\n \"success\": True,\n },\n )\n\n\nclass RemoteVersionView(GenericAPIView):\n def get(self, request, format=None):\n remote_version = \"0.0.0\"\n is_greater_than_current = False\n current_version = packaging_version.parse(version.__full_version_str__)\n try:\n req = urllib.request.Request(\n \"https://api.github.com/repos/paperlessngx/\"\n \"paperlessngx/releases/latest\",\n )\n # Ensure a JSON response\n req.add_header(\"Accept\", \"application/json\")\n\n with urllib.request.urlopen(req) as response:\n remote = response.read().decode(\"utf8\")\n try:\n remote_json = json.loads(remote)\n remote_version = remote_json[\"tag_name\"]\n # Basically PEP 616 but that only went in 3.9\n if remote_version.startswith(\"ngx-\"):\n remote_version = remote_version[len(\"ngx-\") :]\n except ValueError:\n logger.debug(\"An error occurred parsing remote version json\")\n except urllib.error.URLError:\n logger.debug(\"An error occurred checking for available updates\")\n\n is_greater_than_current = (\n packaging_version.parse(\n remote_version,\n )\n > current_version\n )\n\n return Response(\n {\n \"version\": remote_version,\n \"update_available\": is_greater_than_current,\n },\n )\n\n\nclass TasksViewSet(ReadOnlyModelViewSet):\n permission_classes = (IsAuthenticated,)\n serializer_class = TasksViewSerializer\n\n def get_queryset(self):\n queryset = (\n PaperlessTask.objects.filter(\n acknowledged=False,\n )\n .order_by(\"date_created\")\n .reverse()\n )\n task_id = self.request.query_params.get(\"task_id\")\n if task_id is not None:\n queryset = PaperlessTask.objects.filter(task_id=task_id)\n return queryset\n\n\nclass AcknowledgeTasksView(GenericAPIView):\n permission_classes = (IsAuthenticated,)\n serializer_class = AcknowledgeTasksViewSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n tasks = serializer.validated_data.get(\"tasks\")\n\n try:\n result = PaperlessTask.objects.filter(id__in=tasks).update(\n acknowledged=True,\n )\n return Response({\"result\": result})\n except Exception:\n return HttpResponseBadRequest()\n\n\nclass ShareLinkViewSet(ModelViewSet, PassUserMixin):\n model = ShareLink\n\n queryset = ShareLink.objects.all()\n\n serializer_class = ShareLinkSerializer\n pagination_class = StandardPagination\n permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n filter_backends = (\n DjangoFilterBackend,\n OrderingFilter,\n ObjectOwnedOrGrantedPermissionsFilter,\n )\n filterset_class = ShareLinkFilterSet\n ordering_fields = (\"created\", \"expiration\", \"document\")\n\n\nclass SharedLinkView(View):\n authentication_classes = []\n permission_classes = []\n\n def get(self, request, slug):\n share_link = ShareLink.objects.filter(slug=slug).first()\n if share_link is None:\n return HttpResponseRedirect(\"/accounts/login/?sharelink_notfound=1\")\n if share_link.expiration is not None and share_link.expiration < timezone.now():\n return HttpResponseRedirect(\"/accounts/login/?sharelink_expired=1\")\n return serve_file(\n doc=share_link.document,\n use_archive=share_link.file_version == \"archive\",\n disposition=\"inline\",\n )\n\n\ndef serve_file(doc: Document, use_archive: bool, disposition: str):\n if use_archive:\n file_handle = doc.archive_file\n filename = doc.get_public_filename(archive=True)\n mime_type = \"application/pdf\"\n else:\n file_handle = doc.source_file\n filename = doc.get_public_filename()\n mime_type = doc.mime_type\n # Support browser previewing csv files by using text mime type\n if mime_type in {\"application/csv\", \"text/csv\"} and disposition == \"inline\":\n mime_type = \"text/plain\"\n\n if doc.storage_type == Document.STORAGE_TYPE_GPG:\n file_handle = GnuPG.decrypted(file_handle)\n\n response = HttpResponse(file_handle, content_type=mime_type)\n # Firefox is not able to handle unicode characters in filename field\n # RFC 5987 addresses this issue\n # see https://datatracker.ietf.org/doc/html/rfc5987#section-4.2\n # Chromium cannot handle commas in the filename\n filename_normalized = normalize(\"NFKD\", filename.replace(\",\", \"_\")).encode(\n \"ascii\",\n \"ignore\",\n )\n filename_encoded = quote(filename)\n content_disposition = (\n f\"{disposition}; \"\n f'filename=\"{filename_normalized}\"; '\n f\"filename*=utf-8''{filename_encoded}\"\n )\n response[\"Content-Disposition\"] = content_disposition\n return response\n\n\nclass BulkEditObjectPermissionsView(GenericAPIView, PassUserMixin):\n permission_classes = (IsAuthenticated,)\n serializer_class = BulkEditObjectPermissionsSerializer\n parser_classes = (parsers.JSONParser,)\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n user = self.request.user\n object_type = serializer.validated_data.get(\"object_type\")\n object_ids = serializer.validated_data.get(\"objects\")\n object_class = serializer.get_object_class(object_type)\n permissions = serializer.validated_data.get(\"permissions\")\n owner = serializer.validated_data.get(\"owner\")\n\n if not user.is_superuser:\n objs = object_class.objects.filter(pk__in=object_ids)\n has_perms = all((obj.owner == user or obj.owner is None) for obj in objs)\n\n if not has_perms:\n return HttpResponseForbidden(\"Insufficient permissions\")\n\n try:\n qs = object_class.objects.filter(id__in=object_ids)\n\n if \"owner\" in serializer.validated_data:\n qs.update(owner=owner)\n\n if \"permissions\" in serializer.validated_data:\n for obj in qs:\n set_permissions_for_object(permissions, obj)\n\n return Response({\"result\": \"OK\"})\n except Exception as e:\n logger.warning(f\"An error occurred performing bulk permissions edit: {e!s}\")\n return HttpResponseBadRequest(\n \"Error performing bulk permissions edit, check logs for more detail.\",\n )\n\n\nclass ConsumptionTemplateViewSet(ModelViewSet):\n permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n\n serializer_class = ConsumptionTemplateSerializer\n pagination_class = StandardPagination\n\n model = ConsumptionTemplate\n\n queryset = ConsumptionTemplate.objects.all().order_by(\"name\")\n\n\nclass CustomFieldViewSet(ModelViewSet):\n permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n\n serializer_class = CustomFieldSerializer\n pagination_class = StandardPagination\n\n model = CustomField\n\n queryset = CustomField.objects.all().order_by(\"-created\")\n", "path": "src/documents/views.py" } ]
diff --git a/src/documents/views.py b/src/documents/views.py index 00d16022f4b..386f8740441 100644 --- a/src/documents/views.py +++ b/src/documents/views.py @@ -583,7 +583,7 @@ def notes(self, request, pk=None): from documents import index - index.add_or_update_document(self.get_object()) + index.add_or_update_document(doc) return Response(self.getNotes(doc))
mozilla__pontoon-3090
Document DDoS mitigation The `BLOCKED_IPS` env variable is not documented here: https://mozilla-pontoon.readthedocs.io/en/latest/admin/deployment.html. We should also add a paragraph here on DDoS mitigation: https://mozilla-pontoon.readthedocs.io/en/latest/admin/maintenance.html
[ { "content": "\"\"\"Django settings for Pontoon.\"\"\"\nimport re\nimport os\nimport socket\n\nfrom django.utils.functional import lazy\n\nimport dj_database_url\n\n\n_dirname = os.path.dirname\n\nROOT = _dirname(_dirname(_dirname(os.path.abspath(__file__))))\n\n\ndef path(*args):\n return os.path.join(ROOT, *args)\n\n\n# Environment-dependent settings. These are loaded from environment\n# variables.\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = os.environ[\"SECRET_KEY\"]\n\n# Is this a dev instance?\nDEV = os.environ.get(\"DJANGO_DEV\", \"False\") != \"False\"\n\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\") != \"False\"\n\nHEROKU_DEMO = os.environ.get(\"HEROKU_DEMO\", \"False\") != \"False\"\n\nLOGOUT_REDIRECT_URL = \"/\"\n\nADMINS = MANAGERS = (\n (os.environ.get(\"ADMIN_NAME\", \"\"), os.environ.get(\"ADMIN_EMAIL\", \"\")),\n)\n\n# A list of project manager email addresses to send project requests to\nPROJECT_MANAGERS = os.environ.get(\"PROJECT_MANAGERS\", \"\").split(\",\")\n\n\ndef _get_site_url_netloc():\n from urllib.parse import urlparse\n from django.conf import settings\n\n return urlparse(settings.SITE_URL).netloc\n\n\ndef _default_from_email():\n return os.environ.get(\n \"DEFAULT_FROM_EMAIL\", f\"Pontoon <pontoon@{_get_site_url_netloc()}>\"\n )\n\n\nDEFAULT_FROM_EMAIL = lazy(_default_from_email, str)()\n\n# VCS identity to be used when committing translations.\nVCS_SYNC_NAME = os.environ.get(\"VCS_SYNC_NAME\", \"Pontoon\")\nVCS_SYNC_EMAIL = os.environ.get(\"VCS_SYNC_EMAIL\", \"[email protected]\")\n\nDATABASES = {\n \"default\": dj_database_url.config(default=\"mysql://root@localhost/pontoon\")\n}\n\n# Ensure that psycopg2 uses a secure SSL connection.\nif not DEV and not DEBUG:\n if \"OPTIONS\" not in DATABASES[\"default\"]:\n DATABASES[\"default\"][\"OPTIONS\"] = {}\n DATABASES[\"default\"][\"OPTIONS\"][\"sslmode\"] = \"require\"\n\nTAGADMIN_DIR = os.path.join(ROOT, \"tag-admin\")\nTRANSLATE_DIR = os.path.join(ROOT, \"translate\")\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = os.environ.get(\"STATIC_ROOT\", path(\"static\"))\n\n# Optional CDN hostname for static files, e.g. '//asdf.cloudfront.net'\nSTATIC_HOST = os.environ.get(\"STATIC_HOST\", \"\")\n\nSESSION_COOKIE_HTTPONLY = os.environ.get(\"SESSION_COOKIE_HTTPONLY\", \"True\") != \"False\"\nSESSION_COOKIE_SECURE = os.environ.get(\"SESSION_COOKIE_SECURE\", \"True\") != \"False\"\n\nAPP_URL_KEY = \"APP_URL\"\n\nSITE_URL = os.environ.get(\"SITE_URL\", \"http://localhost:8000\")\n\n# Custom LD_LIBRARY_PATH environment variable for SVN\nSVN_LD_LIBRARY_PATH = os.environ.get(\"SVN_LD_LIBRARY_PATH\", \"\")\n\n# URL to the RabbitMQ server\nBROKER_URL = os.environ.get(\"RABBITMQ_URL\", None)\n\n# Google Cloud Translation API key\nGOOGLE_TRANSLATE_API_KEY = os.environ.get(\"GOOGLE_TRANSLATE_API_KEY\", \"\")\n\n# Pontoon locale codes supported by Google Cloud AutoML Translation Project ID\n#\n# Source:\n# https://cloud.google.com/translate/automl/docs/languages#supported_codes_for_language_variants\nGOOGLE_AUTOML_SUPPORTED_LOCALES = [\n \"af\",\n \"ar\",\n \"az\",\n \"bg\",\n \"bn\",\n \"ca\",\n \"cs\",\n \"cy\",\n \"da\",\n \"de\",\n \"el\",\n \"es\",\n \"es-AR\",\n \"es-CL\",\n \"es-ES\",\n \"es-MX\",\n \"et\",\n \"fa\",\n \"fi\",\n \"fil\",\n \"fr\",\n \"gl\",\n \"gu-IN\",\n \"he\",\n \"hi\",\n \"hi-IN\",\n \"hr\",\n \"ht\",\n \"hu\",\n \"id\",\n \"is\",\n \"it\",\n \"ja\",\n \"jv\",\n \"ka\",\n \"km\",\n \"ko\",\n \"lt\",\n \"lv\",\n \"mr\",\n \"ms\",\n \"my\",\n \"nb-NO\",\n \"ne-NP\",\n \"nl\",\n \"pa-IN\",\n \"pa-PK\",\n \"pl\",\n \"ps\",\n \"pt\",\n \"pt-BR\",\n \"pt-PT\",\n \"ro\",\n \"ru\",\n \"sk\",\n \"sl\",\n \"sq\",\n \"sr\",\n \"sv-SE\",\n \"sw\",\n \"ta\",\n \"te\",\n \"th\",\n \"tr\",\n \"uk\",\n \"ur\",\n \"uz\",\n \"vi\",\n \"zh-CN\",\n \"zh-HK\",\n \"zh-TW\",\n \"zu\",\n]\n\n# Google Cloud AutoML Translation Project ID\nGOOGLE_AUTOML_PROJECT_ID = os.environ.get(\"GOOGLE_AUTOML_PROJECT_ID\", \"\")\n\n# It is recommended to make Google Cloud AutoML Translation warmup requests every minute,\n# although in our experience every 5 minutes (300 seconds) is sufficient.\nGOOGLE_AUTOML_WARMUP_INTERVAL = float(\n os.environ.get(\"GOOGLE_AUTOML_WARMUP_INTERVAL\", \"300\")\n)\n\n# Microsoft Translator API Key\nMICROSOFT_TRANSLATOR_API_KEY = os.environ.get(\"MICROSOFT_TRANSLATOR_API_KEY\", \"\")\n\n# SYSTRAN Translate Settings\nSYSTRAN_TRANSLATE_API_KEY = os.environ.get(\"SYSTRAN_TRANSLATE_API_KEY\", \"\")\nSYSTRAN_TRANSLATE_SERVER = os.environ.get(\"SYSTRAN_TRANSLATE_SERVER\", \"\")\nSYSTRAN_TRANSLATE_PROFILE_OWNER = os.environ.get(\"SYSTRAN_TRANSLATE_PROFILE_OWNER\", \"\")\n\n# Google Analytics Key\nGOOGLE_ANALYTICS_KEY = os.environ.get(\"GOOGLE_ANALYTICS_KEY\", \"\")\n\n# Raygun.io configuration\nRAYGUN4PY_CONFIG = {\"api_key\": os.environ.get(\"RAYGUN_APIKEY\", \"\")}\n\n# Email settings\nEMAIL_HOST_USER = os.environ.get(\n \"EMAIL_HOST_USER\", os.environ.get(\"SENDGRID_USERNAME\", \"apikey\")\n)\nEMAIL_HOST = os.environ.get(\"EMAIL_HOST\", \"smtp.sendgrid.net\")\nEMAIL_PORT = int(os.environ.get(\"EMAIL_PORT\", \"587\"))\nEMAIL_USE_TLS = os.environ.get(\"EMAIL_USE_TLS\", \"True\") != \"False\"\nEMAIL_USE_SSL = os.environ.get(\"EMAIL_USE_SSL\", \"False\") != \"False\"\nEMAIL_HOST_PASSWORD = os.environ.get(\n \"EMAIL_HOST_PASSWORD\", os.environ.get(\"SENDGRID_PASSWORD\", \"\")\n)\n\n# Log emails to console if the SendGrid credentials are missing.\nif EMAIL_HOST_USER and EMAIL_HOST_PASSWORD:\n EMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\nelse:\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n\n# Environment-independent settings. These shouldn't have to change\n# between server environments.\nROOT_URLCONF = \"pontoon.urls\"\n\nINSTALLED_APPS = (\n \"pontoon.actionlog\",\n \"pontoon.administration\",\n \"pontoon.base\",\n \"pontoon.contributors\",\n \"pontoon.checks\",\n \"pontoon.insights\",\n \"pontoon.localizations\",\n \"pontoon.machinery\",\n \"pontoon.projects\",\n \"pontoon.sync\",\n \"pontoon.tags\",\n \"pontoon.teams\",\n \"pontoon.terminology\",\n \"pontoon.tour\",\n \"pontoon.translate\",\n \"pontoon.translations\",\n \"pontoon.uxactionlog\",\n \"pontoon.homepage\",\n # Django contrib apps\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.messages\",\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n # Django sites app is required by django-allauth\n \"django.contrib.sites\",\n # Third-party apps, patches, fixes\n \"django_jinja\",\n \"pipeline\",\n \"guardian\",\n \"corsheaders\",\n \"allauth\",\n \"allauth.account\",\n \"allauth.socialaccount\",\n \"allauth.socialaccount.providers.fxa\",\n \"allauth.socialaccount.providers.github\",\n \"allauth.socialaccount.providers.google\",\n \"allauth.socialaccount.providers.gitlab\",\n \"allauth.socialaccount.providers.keycloak\",\n \"notifications\",\n \"graphene_django\",\n \"django_ace\",\n)\n\nBLOCKED_IPS = os.environ.get(\"BLOCKED_IPS\", \"\").split(\",\")\n\nMIDDLEWARE = (\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.middleware.gzip.GZipMiddleware\",\n \"pontoon.base.middleware.RaygunExceptionMiddleware\",\n \"pontoon.base.middleware.BlockedIpMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"csp.middleware.CSPMiddleware\",\n)\n\nCONTEXT_PROCESSORS = (\n \"django.contrib.auth.context_processors.auth\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.media\",\n \"django.template.context_processors.request\",\n \"django.contrib.messages.context_processors.messages\",\n \"pontoon.base.context_processors.globals\",\n)\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django_jinja.backend.Jinja2\",\n \"NAME\": \"jinja2\",\n \"APP_DIRS\": True,\n \"DIRS\": [os.path.join(TRANSLATE_DIR, \"public\")],\n \"OPTIONS\": {\n \"match_extension\": \"\",\n \"match_regex\": re.compile(\n r\"\"\"\n ^(?!(\n admin|\n registration|\n account|\n socialaccount|\n graphene|\n )/).*\\.(\n html|\n jinja|\n js|\n )$\n \"\"\",\n re.VERBOSE,\n ),\n \"context_processors\": CONTEXT_PROCESSORS,\n \"extensions\": [\n \"jinja2.ext.do\",\n \"jinja2.ext.loopcontrols\",\n \"jinja2.ext.with_\",\n \"jinja2.ext.i18n\",\n \"jinja2.ext.autoescape\",\n \"django_jinja.builtins.extensions.CsrfExtension\",\n \"django_jinja.builtins.extensions.CacheExtension\",\n \"django_jinja.builtins.extensions.TimezoneExtension\",\n \"django_jinja.builtins.extensions.UrlsExtension\",\n \"django_jinja.builtins.extensions.StaticFilesExtension\",\n \"django_jinja.builtins.extensions.DjangoFiltersExtension\",\n \"pipeline.jinja2.PipelineExtension\",\n ],\n },\n },\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [path(\"pontoon/base/templates/django\")],\n \"OPTIONS\": {\n \"debug\": DEBUG,\n \"context_processors\": CONTEXT_PROCESSORS,\n \"loaders\": [\n \"django.template.loaders.filesystem.Loader\",\n \"django.template.loaders.app_directories.Loader\",\n ],\n },\n },\n]\n\nSESSION_COOKIE_SAMESITE = \"lax\"\n\nAUTHENTICATION_BACKENDS = [\n \"django.contrib.auth.backends.ModelBackend\",\n \"allauth.account.auth_backends.AuthenticationBackend\",\n \"guardian.backends.ObjectPermissionBackend\",\n]\n\nGUARDIAN_RAISE_403 = True\n\nPIPELINE_CSS = {\n \"base\": {\n \"source_filenames\": (\n \"css/dark-theme.css\",\n \"css/light-theme.css\",\n \"css/fontawesome-all.css\",\n \"css/nprogress.css\",\n \"css/boilerplate.css\",\n \"css/fonts.css\",\n \"css/style.css\",\n ),\n \"output_filename\": \"css/base.min.css\",\n },\n \"translate\": {\n \"source_filenames\": (\n \"translate.css\",\n \"css/dark-theme.css\",\n \"css/light-theme.css\",\n ),\n \"output_filename\": \"css/translate.min.css\",\n },\n \"admin\": {\n \"source_filenames\": (\n \"css/table.css\",\n \"css/admin.css\",\n ),\n \"output_filename\": \"css/admin.min.css\",\n },\n \"admin_project\": {\n \"source_filenames\": (\n \"css/double_list_selector.css\",\n \"css/multiple_team_selector.css\",\n \"css/admin_project.css\",\n \"tag_admin.css\",\n ),\n \"output_filename\": \"css/admin_project.min.css\",\n },\n \"project\": {\n \"source_filenames\": (\n \"css/table.css\",\n \"css/request.css\",\n \"css/contributors.css\",\n \"css/heading_info.css\",\n \"css/sidebar_menu.css\",\n \"css/multiple_team_selector.css\",\n \"css/manual_notifications.css\",\n \"css/insights_charts.css\",\n \"css/insights_tab.css\",\n ),\n \"output_filename\": \"css/project.min.css\",\n },\n \"insights\": {\n \"source_filenames\": (\n \"css/insights_charts.css\",\n \"css/insights.css\",\n ),\n \"output_filename\": \"css/insights.min.css\",\n },\n \"localization\": {\n \"source_filenames\": (\n \"css/table.css\",\n \"css/contributors.css\",\n \"css/heading_info.css\",\n \"css/info.css\",\n \"css/download_selector.css\",\n \"css/insights_charts.css\",\n \"css/insights_tab.css\",\n ),\n \"output_filename\": \"css/localization.min.css\",\n },\n \"projects\": {\n \"source_filenames\": (\n \"css/heading_info.css\",\n \"css/table.css\",\n ),\n \"output_filename\": \"css/projects.min.css\",\n },\n \"team\": {\n \"source_filenames\": (\n \"css/table.css\",\n \"css/double_list_selector.css\",\n \"css/download_selector.css\",\n \"css/contributors.css\",\n \"css/heading_info.css\",\n \"css/team.css\",\n \"css/request.css\",\n \"css/insights_charts.css\",\n \"css/insights_tab.css\",\n \"css/info.css\",\n ),\n \"output_filename\": \"css/team.min.css\",\n },\n \"teams\": {\n \"source_filenames\": (\n \"css/heading_info.css\",\n \"css/table.css\",\n \"css/request.css\",\n ),\n \"output_filename\": \"css/teams.min.css\",\n },\n \"sync_logs\": {\n \"source_filenames\": (\"css/sync_logs.css\",),\n \"output_filename\": \"css/sync_logs.min.css\",\n },\n \"profile\": {\n \"source_filenames\": (\n \"css/contributor.css\",\n \"css/insights_charts.css\",\n \"css/profile.css\",\n ),\n \"output_filename\": \"css/profile.min.css\",\n },\n \"settings\": {\n \"source_filenames\": (\n \"css/multiple_team_selector.css\",\n \"css/contributor.css\",\n \"css/team_selector.css\",\n \"css/settings.css\",\n ),\n \"output_filename\": \"css/settings.min.css\",\n },\n \"notifications\": {\n \"source_filenames\": (\n \"css/sidebar_menu.css\",\n \"css/notifications.css\",\n ),\n \"output_filename\": \"css/notifications.min.css\",\n },\n \"machinery\": {\n \"source_filenames\": (\n \"css/team_selector.css\",\n \"css/machinery.css\",\n ),\n \"output_filename\": \"css/machinery.min.css\",\n },\n \"contributors\": {\n \"source_filenames\": (\n \"css/heading_info.css\",\n \"css/contributors.css\",\n ),\n \"output_filename\": \"css/contributors.min.css\",\n },\n \"terms\": {\n \"source_filenames\": (\"css/terms.css\",),\n \"output_filename\": \"css/terms.min.css\",\n },\n \"homepage\": {\n \"source_filenames\": (\"css/homepage.css\",),\n \"output_filename\": \"css/homepage.min.css\",\n },\n}\n\nPIPELINE_JS = {\n \"base\": {\n \"source_filenames\": (\n \"js/lib/jquery-3.6.1.js\",\n \"js/lib/jquery.timeago.js\",\n \"js/lib/jquery.color-2.1.2.js\",\n \"js/lib/nprogress.js\",\n \"js/main.js\",\n \"js/theme-switcher.js\",\n ),\n \"output_filename\": \"js/base.min.js\",\n },\n \"translate\": {\n \"source_filenames\": (\"translate.js\",),\n \"output_filename\": \"js/translate.min.js\",\n },\n \"admin\": {\n \"source_filenames\": (\"js/table.js\",),\n \"output_filename\": \"js/admin.min.js\",\n },\n \"admin_project\": {\n \"source_filenames\": (\n \"js/double_list_selector.js\",\n \"js/multiple_team_selector.js\",\n \"js/admin_project.js\",\n \"tag_admin.js\",\n ),\n \"output_filename\": \"js/admin_project.min.js\",\n },\n \"insights\": {\n \"source_filenames\": (\n \"js/lib/Chart.bundle.js\",\n \"js/insights_charts.js\",\n \"js/insights.js\",\n ),\n \"output_filename\": \"js/insights.min.js\",\n },\n \"localization\": {\n \"source_filenames\": (\n \"js/lib/Chart.bundle.js\",\n \"js/table.js\",\n \"js/progress-chart.js\",\n \"js/tabs.js\",\n \"js/insights_charts.js\",\n \"js/insights_tab.js\",\n \"js/info.js\",\n ),\n \"output_filename\": \"js/localization.min.js\",\n },\n \"project\": {\n \"source_filenames\": (\n \"js/lib/Chart.bundle.js\",\n \"js/table.js\",\n \"js/request.js\",\n \"js/progress-chart.js\",\n \"js/tabs.js\",\n \"js/sidebar_menu.js\",\n \"js/multiple_team_selector.js\",\n \"js/manual_notifications.js\",\n \"js/insights_charts.js\",\n \"js/insights_tab.js\",\n ),\n \"output_filename\": \"js/project.min.js\",\n },\n \"projects\": {\n \"source_filenames\": (\n \"js/table.js\",\n \"js/progress-chart.js\",\n ),\n \"output_filename\": \"js/projects.min.js\",\n },\n \"team\": {\n \"source_filenames\": (\n \"js/lib/Chart.bundle.js\",\n \"js/table.js\",\n \"js/progress-chart.js\",\n \"js/double_list_selector.js\",\n \"js/bugzilla.js\",\n \"js/tabs.js\",\n \"js/request.js\",\n \"js/permissions.js\",\n \"js/insights_charts.js\",\n \"js/insights_tab.js\",\n \"js/info.js\",\n ),\n \"output_filename\": \"js/team.min.js\",\n },\n \"teams\": {\n \"source_filenames\": (\n \"js/table.js\",\n \"js/progress-chart.js\",\n \"js/request.js\",\n ),\n \"output_filename\": \"js/teams.min.js\",\n },\n \"profile\": {\n \"source_filenames\": (\n \"js/lib/Chart.bundle.js\",\n \"js/insights_charts.js\",\n \"js/profile.js\",\n ),\n \"output_filename\": \"js/profile.min.js\",\n },\n \"settings\": {\n \"source_filenames\": (\n \"js/lib/jquery-ui-1.13.2.js\",\n \"js/multiple_team_selector.js\",\n \"js/team_selector.js\",\n \"js/settings.js\",\n ),\n \"output_filename\": \"js/settings.min.js\",\n },\n \"notifications\": {\n \"source_filenames\": (\n \"js/sidebar_menu.js\",\n \"js/notifications.js\",\n ),\n \"output_filename\": \"js/notifications.min.js\",\n },\n \"machinery\": {\n \"source_filenames\": (\n \"js/lib/diff.js\",\n \"js/lib/clipboard.min.js\",\n \"js/team_selector.js\",\n \"js/machinery.js\",\n ),\n \"output_filename\": \"js/machinery.min.js\",\n },\n \"homepage\": {\n \"source_filenames\": (\"js/homepage.js\",),\n \"output_filename\": \"js/homepage.min.js\",\n },\n}\n\nPIPELINE = {\n \"STYLESHEETS\": PIPELINE_CSS,\n \"JAVASCRIPT\": PIPELINE_JS,\n \"JS_COMPRESSOR\": \"pipeline.compressors.terser.TerserCompressor\",\n \"CSS_COMPRESSOR\": \"pipeline.compressors.NoopCompressor\",\n \"YUGLIFY_BINARY\": path(\n os.environ.get(\"YUGLIFY_BINARY\", \"node_modules/.bin/yuglify\")\n ),\n \"TERSER_BINARY\": path(os.environ.get(\"TERSER_BINARY\", \"node_modules/.bin/terser\")),\n \"DISABLE_WRAPPER\": True,\n}\n\n# Cache config\n# If the environment contains configuration data for Memcached, use\n# BMemcached for the cache backend. Otherwise, default to an in-memory\n# cache.\nif os.environ.get(\"MEMCACHE_SERVERS\") is not None:\n CACHES = {\n \"default\": {\"BACKEND\": \"django_bmemcached.memcached.BMemcached\", \"OPTIONS\": {}}\n }\nelse:\n CACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n \"LOCATION\": \"pontoon\",\n }\n }\n\n# Site ID is used by Django's Sites framework.\nSITE_ID = 1\n\n# Media and templates.\n\n# Absolute path to the directory that holds media.\n# Example: \"/home/media/media.lawrence.com/\"\nMEDIA_ROOT = os.environ.get(\"MEDIA_ROOT\", path(\"media\"))\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash if there is a path component (optional in other cases).\n# Examples: \"http://media.lawrence.com\", \"http://example.com/media/\"\nMEDIA_URL = \"/media/\"\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = STATIC_HOST + \"/static/\"\n\nSTATICFILES_STORAGE = \"pontoon.base.storage.CompressedManifestPipelineStorage\"\nSTATICFILES_FINDERS = (\n \"pipeline.finders.PipelineFinder\",\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n)\nSTATICFILES_DIRS = [\n os.path.join(TRANSLATE_DIR, \"dist\"),\n os.path.join(TRANSLATE_DIR, \"public\"),\n os.path.join(TAGADMIN_DIR, \"dist\"),\n]\n\n\n# Set ALLOWED_HOSTS based on SITE_URL setting.\ndef _allowed_hosts():\n host = _get_site_url_netloc() # Remove protocol and path\n result = [host]\n # In order to be able to use ALLOWED_HOSTS to validate URLs, we need to\n # have a version of the host that contains the port. This only applies\n # to local development (usually the host is localhost:8000).\n if \":\" in host:\n host_no_port = host.rsplit(\":\", 1)[0]\n result = [host, host_no_port]\n\n # add values from environment variable. Needed in case of URL/domain redirections\n env_vars_str = os.getenv(\"ALLOWED_HOSTS\", \"127.0.0.1:8000\")\n env_vars = [x.strip() for x in env_vars_str.split(\",\")]\n result.extend(env_vars)\n\n return result\n\n\nALLOWED_HOSTS = lazy(_allowed_hosts, list)()\n\n# Auth\n# The first hasher in this list will be used for new passwords.\n# Any other hasher in the list can be used for existing passwords.\nPASSWORD_HASHERS = (\n \"django.contrib.auth.hashers.PBKDF2PasswordHasher\",\n \"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher\",\n \"django.contrib.auth.hashers.BCryptSHA256PasswordHasher\",\n \"django.contrib.auth.hashers.BCryptPasswordHasher\",\n \"django.contrib.auth.hashers.SHA1PasswordHasher\",\n \"django.contrib.auth.hashers.MD5PasswordHasher\",\n \"django.contrib.auth.hashers.UnsaltedMD5PasswordHasher\",\n)\n\n# Logging\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"handlers\": {\"console\": {\"class\": \"logging.StreamHandler\"}},\n \"formatters\": {\n \"verbose\": {\"format\": \"[%(levelname)s:%(name)s] %(asctime)s %(message)s\"},\n },\n \"loggers\": {\n \"django\": {\"handlers\": [\"console\"]},\n \"pontoon\": {\n \"handlers\": [\"console\"],\n \"level\": os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"INFO\"),\n },\n },\n}\n\nif DEBUG:\n LOGGING[\"handlers\"][\"console\"][\"formatter\"] = \"verbose\"\n\nif os.environ.get(\"DJANGO_SQL_LOG\", False):\n LOGGING[\"loggers\"][\"django.db.backends\"] = {\n \"level\": \"DEBUG\",\n \"handlers\": [\"console\"],\n }\n\n# General auth settings\nLOGIN_URL = \"/\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGIN_REDIRECT_URL_FAILURE = \"/\"\n\n# Should robots.txt deny everything or disallow a calculated list of\n# URLs we don't want to be crawled? Default is false, disallow\n# everything.\nENGAGE_ROBOTS = False\n\n# Store the CSRF token in the user's session instead of in a cookie.\nCSRF_USE_SESSIONS = True\n\n# Set X-Frame-Options to DENY by default on all responses.\nX_FRAME_OPTIONS = \"DENY\"\n\n# Use correct header for detecting HTTPS on Heroku.\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n\n# Do not set SECURE_HSTS_SECONDS.\n# HSTS is being taken care of in pontoon/wsgi.py.\n# SECURE_HSTS_SECONDS = 63072000\n\n# X-Content-Type-Options: nosniff\n# Disables browser MIME type sniffing\nSECURE_CONTENT_TYPE_NOSNIFF = True\n\n# x-xss-protection: 1; mode=block\n# Activates the browser's XSS filtering and helps prevent XSS attacks\nSECURE_BROWSER_XSS_FILTER = True\n\n# Redirect non-HTTPS requests to HTTPS\nSECURE_SSL_REDIRECT = not (DEBUG or os.environ.get(\"CI\", False))\n\n# Content-Security-Policy headers\nCSP_DEFAULT_SRC = (\"'none'\",)\nCSP_FRAME_SRC = (\"https:\",)\nCSP_WORKER_SRC = (\"https:\",)\nCSP_CONNECT_SRC = (\n \"'self'\",\n \"https://bugzilla.mozilla.org/rest/bug\",\n)\nCSP_FONT_SRC = (\"'self'\",)\nCSP_IMG_SRC = (\n \"'self'\",\n \"https:\",\n # Needed for ACE editor images\n \"data:\",\n \"https://*.wp.com/pontoon.mozilla.org/\",\n \"https://www.google-analytics.com\",\n \"https://www.gravatar.com/avatar/\",\n)\nCSP_SCRIPT_SRC = (\n \"'self'\",\n \"'unsafe-eval'\",\n \"'sha256-fDsgbzHC0sNuBdM4W91nXVccgFLwIDkl197QEca/Cl4='\",\n # Rules related to Google Analytics\n \"'sha256-G5/M3dBlZdlvno5Cibw42fbeLr2PTEGd1M909Z7vPZE='\",\n \"https://www.google-analytics.com/analytics.js\",\n)\nCSP_STYLE_SRC = (\n \"'self'\",\n \"'unsafe-inline'\",\n)\n\n# Needed if site not hosted on HTTPS domains (like local setup)\nif not (HEROKU_DEMO or SITE_URL.startswith(\"https\")):\n CSP_IMG_SRC = CSP_IMG_SRC + (\"http://www.gravatar.com/avatar/\",)\n CSP_WORKER_SRC = CSP_FRAME_SRC = CSP_FRAME_SRC + (\"http:\",)\n\n# For absolute urls\ntry:\n DOMAIN = socket.gethostname()\nexcept OSError:\n DOMAIN = \"localhost\"\nPROTOCOL = \"http://\"\nPORT = 80\n\n# Names for slave databases from the DATABASES setting.\nSLAVE_DATABASES = []\n\n# Internationalization.\n\n# Enable timezone-aware datetimes.\nUSE_TZ = True\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = os.environ.get(\"TZ\", \"UTC\")\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = False\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale\nUSE_L10N = False\n\n# Enable Bugs tab on the team pages, pulling data from bugzilla.mozilla.org.\n# See bug 1567402 for details. A Mozilla-specific variable.\nENABLE_BUGS_TAB = os.environ.get(\"ENABLE_BUGS_TAB\", \"False\") != \"False\"\n\n# Enable Insights dashboards,\n# presenting data that needs to be collected by a scheduled job.\n# See docs/admin/deployment.rst for more information.\nENABLE_INSIGHTS = os.environ.get(\"ENABLE_INSIGHTS\", \"False\") != \"False\"\n\n# Bleach tags and attributes\nALLOWED_TAGS = [\n \"a\",\n \"abbr\",\n \"acronym\",\n \"b\",\n \"blockquote\",\n \"br\",\n \"code\",\n \"em\",\n \"i\",\n \"li\",\n \"ol\",\n \"p\",\n \"strong\",\n \"ul\",\n]\n\nALLOWED_ATTRIBUTES = {\n \"a\": [\"href\", \"title\", \"target\"],\n \"abbr\": [\"title\"],\n \"acronym\": [\"title\"],\n}\n\n# Multiple sync tasks for the same project cannot run concurrently to prevent\n# potential DB and VCS inconsistencies. We store the information about the\n# running task in cache and clear it after the task completes. In case of an\n# error, we might never clear the cache, so we use SYNC_TASK_TIMEOUT as the\n# longest possible period (in seconds) after which the cache is cleared and\n# the subsequent task can run. The value should exceed the longest sync task\n# of the instance.\ntry:\n SYNC_TASK_TIMEOUT = int(os.environ.get(\"SYNC_TASK_TIMEOUT\", \"\"))\nexcept ValueError:\n SYNC_TASK_TIMEOUT = 60 * 60 * 1 # 1 hour\n\nSYNC_LOG_RETENTION = 90 # days\n\nMANUAL_SYNC = os.environ.get(\"MANUAL_SYNC\", \"True\") != \"False\"\n\n# Celery\n\n# Execute celery tasks locally instead of in a worker unless the\n# environment is configured.\nCELERY_ALWAYS_EAGER = os.environ.get(\"CELERY_ALWAYS_EAGER\", \"True\") != \"False\"\n\n# Limit the number of tasks a celery worker can handle before being replaced.\ntry:\n CELERYD_MAX_TASKS_PER_CHILD = int(os.environ.get(\"CELERYD_MAX_TASKS_PER_CHILD\", \"\"))\nexcept ValueError:\n CELERYD_MAX_TASKS_PER_CHILD = 20\n\nBROKER_POOL_LIMIT = 1 # Limit to one connection per worker\nBROKER_CONNECTION_TIMEOUT = 30 # Give up connecting faster\nCELERY_RESULT_BACKEND = None # We don't store results\nCELERY_SEND_EVENTS = False # We aren't yet monitoring events\n\n# The default serializer since Celery 4 is 'json'\nCELERY_TASK_SERIALIZER = \"pickle\"\nCELERY_RESULT_SERIALIZER = \"pickle\"\nCELERY_ACCEPT_CONTENT = [\"pickle\"]\n\n# Settings related to the CORS mechanisms.\n# For the sake of integration with other sites,\n# all origins are allowed for the GraphQL endpoint.\nCORS_ALLOW_ALL_ORIGINS = True\nCORS_URLS_REGEX = r\"^/graphql/?$\"\n\nSOCIALACCOUNT_ENABLED = True\nSOCIALACCOUNT_ADAPTER = \"pontoon.base.adapter.PontoonSocialAdapter\"\n\n# Supported values: 'django', 'fxa', 'github', 'gitlab', 'google'\nAUTHENTICATION_METHOD = os.environ.get(\"AUTHENTICATION_METHOD\", \"django\")\n\n\ndef account_username(user):\n return user.name_or_email\n\n\n# django-allauth settings\nACCOUNT_AUTHENTICATED_METHOD = \"email\"\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = \"none\"\nACCOUNT_USER_DISPLAY = account_username\n\n# Mozilla Accounts (formerly Firefox Accounts)\nFXA_CLIENT_ID = os.environ.get(\"FXA_CLIENT_ID\")\nFXA_SECRET_KEY = os.environ.get(\"FXA_SECRET_KEY\")\nFXA_OAUTH_ENDPOINT = os.environ.get(\"FXA_OAUTH_ENDPOINT\", \"\")\nFXA_PROFILE_ENDPOINT = os.environ.get(\"FXA_PROFILE_ENDPOINT\", \"\")\nFXA_SCOPE = [\"profile:uid\", \"profile:display_name\", \"profile:email\"]\n\n# Github\nGITHUB_CLIENT_ID = os.environ.get(\"GITHUB_CLIENT_ID\")\nGITHUB_SECRET_KEY = os.environ.get(\"GITHUB_SECRET_KEY\")\n\n# GitLab\nGITLAB_URL = os.environ.get(\"GITLAB_URL\", \"https://gitlab.com\")\nGITLAB_CLIENT_ID = os.environ.get(\"GITLAB_CLIENT_ID\")\nGITLAB_SECRET_KEY = os.environ.get(\"GITLAB_SECRET_KEY\")\n\n# Google Accounts\nGOOGLE_CLIENT_ID = os.environ.get(\"GOOGLE_CLIENT_ID\")\nGOOGLE_SECRET_KEY = os.environ.get(\"GOOGLE_SECRET_KEY\")\n\n# Keycloak Accounts\nKEYCLOAK_CLIENT_ID = os.environ.get(\"KEYCLOAK_CLIENT_ID\")\nKEYCLOAK_CLIENT_SECRET = os.environ.get(\"KEYCLOAK_CLIENT_SECRET\")\n\n# All settings related to the AllAuth\nSOCIALACCOUNT_PROVIDERS = {\n \"fxa\": {\n \"SCOPE\": FXA_SCOPE,\n \"OAUTH_ENDPOINT\": FXA_OAUTH_ENDPOINT,\n \"PROFILE_ENDPOINT\": FXA_PROFILE_ENDPOINT,\n },\n \"gitlab\": {\"GITLAB_URL\": GITLAB_URL, \"SCOPE\": [\"read_user\"]},\n \"keycloak\": {\n \"KEYCLOAK_URL\": os.environ.get(\"KEYCLOAK_URL\"),\n \"KEYCLOAK_REALM\": os.environ.get(\"KEYCLOAK_REALM\"),\n },\n}\n\n# Configuration of `django-notifications-hq` app\nDJANGO_NOTIFICATIONS_CONFIG = {\n # Attach extra arguments passed to notify.send(...) to the .data attribute\n # of the Notification object.\n \"USE_JSONFIELD\": True,\n}\n\n# Maximum number of read notifications to display in the notifications menu\nNOTIFICATIONS_MAX_COUNT = 7\n\n# Integer representing a day of the week on which the `send_suggestion_notifications`\n# management command will run.\nSUGGESTION_NOTIFICATIONS_DAY = os.environ.get(\"SUGGESTION_NOTIFICATIONS_DAY\", 4)\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n", "path": "pontoon/settings/base.py" } ]
[ { "content": "\"\"\"Django settings for Pontoon.\"\"\"\nimport re\nimport os\nimport socket\n\nfrom django.utils.functional import lazy\n\nimport dj_database_url\n\n\n_dirname = os.path.dirname\n\nROOT = _dirname(_dirname(_dirname(os.path.abspath(__file__))))\n\n\ndef path(*args):\n return os.path.join(ROOT, *args)\n\n\n# Environment-dependent settings. These are loaded from environment\n# variables.\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = os.environ[\"SECRET_KEY\"]\n\n# Is this a dev instance?\nDEV = os.environ.get(\"DJANGO_DEV\", \"False\") != \"False\"\n\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\") != \"False\"\n\nHEROKU_DEMO = os.environ.get(\"HEROKU_DEMO\", \"False\") != \"False\"\n\nLOGOUT_REDIRECT_URL = \"/\"\n\nADMINS = MANAGERS = (\n (os.environ.get(\"ADMIN_NAME\", \"\"), os.environ.get(\"ADMIN_EMAIL\", \"\")),\n)\n\n# A list of project manager email addresses to send project requests to\nPROJECT_MANAGERS = os.environ.get(\"PROJECT_MANAGERS\", \"\").split(\",\")\n\n\ndef _get_site_url_netloc():\n from urllib.parse import urlparse\n from django.conf import settings\n\n return urlparse(settings.SITE_URL).netloc\n\n\ndef _default_from_email():\n return os.environ.get(\n \"DEFAULT_FROM_EMAIL\", f\"Pontoon <pontoon@{_get_site_url_netloc()}>\"\n )\n\n\nDEFAULT_FROM_EMAIL = lazy(_default_from_email, str)()\n\n# VCS identity to be used when committing translations.\nVCS_SYNC_NAME = os.environ.get(\"VCS_SYNC_NAME\", \"Pontoon\")\nVCS_SYNC_EMAIL = os.environ.get(\"VCS_SYNC_EMAIL\", \"[email protected]\")\n\nDATABASES = {\n \"default\": dj_database_url.config(default=\"mysql://root@localhost/pontoon\")\n}\n\n# Ensure that psycopg2 uses a secure SSL connection.\nif not DEV and not DEBUG:\n if \"OPTIONS\" not in DATABASES[\"default\"]:\n DATABASES[\"default\"][\"OPTIONS\"] = {}\n DATABASES[\"default\"][\"OPTIONS\"][\"sslmode\"] = \"require\"\n\nTAGADMIN_DIR = os.path.join(ROOT, \"tag-admin\")\nTRANSLATE_DIR = os.path.join(ROOT, \"translate\")\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = os.environ.get(\"STATIC_ROOT\", path(\"static\"))\n\n# Optional CDN hostname for static files, e.g. '//asdf.cloudfront.net'\nSTATIC_HOST = os.environ.get(\"STATIC_HOST\", \"\")\n\nSESSION_COOKIE_HTTPONLY = os.environ.get(\"SESSION_COOKIE_HTTPONLY\", \"True\") != \"False\"\nSESSION_COOKIE_SECURE = os.environ.get(\"SESSION_COOKIE_SECURE\", \"True\") != \"False\"\n\nAPP_URL_KEY = \"APP_URL\"\n\nSITE_URL = os.environ.get(\"SITE_URL\", \"http://localhost:8000\")\n\n# Custom LD_LIBRARY_PATH environment variable for SVN\nSVN_LD_LIBRARY_PATH = os.environ.get(\"SVN_LD_LIBRARY_PATH\", \"\")\n\n# URL to the RabbitMQ server\nBROKER_URL = os.environ.get(\"RABBITMQ_URL\", None)\n\n# Google Cloud Translation API key\nGOOGLE_TRANSLATE_API_KEY = os.environ.get(\"GOOGLE_TRANSLATE_API_KEY\", \"\")\n\n# Pontoon locale codes supported by Google Cloud AutoML Translation Project ID\n#\n# Source:\n# https://cloud.google.com/translate/automl/docs/languages#supported_codes_for_language_variants\nGOOGLE_AUTOML_SUPPORTED_LOCALES = [\n \"af\",\n \"ar\",\n \"az\",\n \"bg\",\n \"bn\",\n \"ca\",\n \"cs\",\n \"cy\",\n \"da\",\n \"de\",\n \"el\",\n \"es\",\n \"es-AR\",\n \"es-CL\",\n \"es-ES\",\n \"es-MX\",\n \"et\",\n \"fa\",\n \"fi\",\n \"fil\",\n \"fr\",\n \"gl\",\n \"gu-IN\",\n \"he\",\n \"hi\",\n \"hi-IN\",\n \"hr\",\n \"ht\",\n \"hu\",\n \"id\",\n \"is\",\n \"it\",\n \"ja\",\n \"jv\",\n \"ka\",\n \"km\",\n \"ko\",\n \"lt\",\n \"lv\",\n \"mr\",\n \"ms\",\n \"my\",\n \"nb-NO\",\n \"ne-NP\",\n \"nl\",\n \"pa-IN\",\n \"pa-PK\",\n \"pl\",\n \"ps\",\n \"pt\",\n \"pt-BR\",\n \"pt-PT\",\n \"ro\",\n \"ru\",\n \"sk\",\n \"sl\",\n \"sq\",\n \"sr\",\n \"sv-SE\",\n \"sw\",\n \"ta\",\n \"te\",\n \"th\",\n \"tr\",\n \"uk\",\n \"ur\",\n \"uz\",\n \"vi\",\n \"zh-CN\",\n \"zh-HK\",\n \"zh-TW\",\n \"zu\",\n]\n\n# Google Cloud AutoML Translation Project ID\nGOOGLE_AUTOML_PROJECT_ID = os.environ.get(\"GOOGLE_AUTOML_PROJECT_ID\", \"\")\n\n# It is recommended to make Google Cloud AutoML Translation warmup requests every minute,\n# although in our experience every 5 minutes (300 seconds) is sufficient.\nGOOGLE_AUTOML_WARMUP_INTERVAL = float(\n os.environ.get(\"GOOGLE_AUTOML_WARMUP_INTERVAL\", \"300\")\n)\n\n# Microsoft Translator API Key\nMICROSOFT_TRANSLATOR_API_KEY = os.environ.get(\"MICROSOFT_TRANSLATOR_API_KEY\", \"\")\n\n# SYSTRAN Translate Settings\nSYSTRAN_TRANSLATE_API_KEY = os.environ.get(\"SYSTRAN_TRANSLATE_API_KEY\", \"\")\nSYSTRAN_TRANSLATE_SERVER = os.environ.get(\"SYSTRAN_TRANSLATE_SERVER\", \"\")\nSYSTRAN_TRANSLATE_PROFILE_OWNER = os.environ.get(\"SYSTRAN_TRANSLATE_PROFILE_OWNER\", \"\")\n\n# Google Analytics Key\nGOOGLE_ANALYTICS_KEY = os.environ.get(\"GOOGLE_ANALYTICS_KEY\", \"\")\n\n# Raygun.io configuration\nRAYGUN4PY_CONFIG = {\"api_key\": os.environ.get(\"RAYGUN_APIKEY\", \"\")}\n\n# Email settings\nEMAIL_HOST_USER = os.environ.get(\n \"EMAIL_HOST_USER\", os.environ.get(\"SENDGRID_USERNAME\", \"apikey\")\n)\nEMAIL_HOST = os.environ.get(\"EMAIL_HOST\", \"smtp.sendgrid.net\")\nEMAIL_PORT = int(os.environ.get(\"EMAIL_PORT\", \"587\"))\nEMAIL_USE_TLS = os.environ.get(\"EMAIL_USE_TLS\", \"True\") != \"False\"\nEMAIL_USE_SSL = os.environ.get(\"EMAIL_USE_SSL\", \"False\") != \"False\"\nEMAIL_HOST_PASSWORD = os.environ.get(\n \"EMAIL_HOST_PASSWORD\", os.environ.get(\"SENDGRID_PASSWORD\", \"\")\n)\n\n# Log emails to console if the SendGrid credentials are missing.\nif EMAIL_HOST_USER and EMAIL_HOST_PASSWORD:\n EMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\nelse:\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n\n# Environment-independent settings. These shouldn't have to change\n# between server environments.\nROOT_URLCONF = \"pontoon.urls\"\n\nINSTALLED_APPS = (\n \"pontoon.actionlog\",\n \"pontoon.administration\",\n \"pontoon.base\",\n \"pontoon.contributors\",\n \"pontoon.checks\",\n \"pontoon.insights\",\n \"pontoon.localizations\",\n \"pontoon.machinery\",\n \"pontoon.projects\",\n \"pontoon.sync\",\n \"pontoon.tags\",\n \"pontoon.teams\",\n \"pontoon.terminology\",\n \"pontoon.tour\",\n \"pontoon.translate\",\n \"pontoon.translations\",\n \"pontoon.uxactionlog\",\n \"pontoon.homepage\",\n # Django contrib apps\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.messages\",\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n # Django sites app is required by django-allauth\n \"django.contrib.sites\",\n # Third-party apps, patches, fixes\n \"django_jinja\",\n \"pipeline\",\n \"guardian\",\n \"corsheaders\",\n \"allauth\",\n \"allauth.account\",\n \"allauth.socialaccount\",\n \"allauth.socialaccount.providers.fxa\",\n \"allauth.socialaccount.providers.github\",\n \"allauth.socialaccount.providers.google\",\n \"allauth.socialaccount.providers.gitlab\",\n \"allauth.socialaccount.providers.keycloak\",\n \"notifications\",\n \"graphene_django\",\n \"django_ace\",\n)\n\n# A list of IP addresses to be blocked from accessing the app, because they are DDoS'ing the server\nBLOCKED_IPS = os.environ.get(\"BLOCKED_IPS\", \"\").split(\",\")\n\nMIDDLEWARE = (\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.middleware.gzip.GZipMiddleware\",\n \"pontoon.base.middleware.RaygunExceptionMiddleware\",\n \"pontoon.base.middleware.BlockedIpMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"csp.middleware.CSPMiddleware\",\n)\n\nCONTEXT_PROCESSORS = (\n \"django.contrib.auth.context_processors.auth\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.media\",\n \"django.template.context_processors.request\",\n \"django.contrib.messages.context_processors.messages\",\n \"pontoon.base.context_processors.globals\",\n)\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django_jinja.backend.Jinja2\",\n \"NAME\": \"jinja2\",\n \"APP_DIRS\": True,\n \"DIRS\": [os.path.join(TRANSLATE_DIR, \"public\")],\n \"OPTIONS\": {\n \"match_extension\": \"\",\n \"match_regex\": re.compile(\n r\"\"\"\n ^(?!(\n admin|\n registration|\n account|\n socialaccount|\n graphene|\n )/).*\\.(\n html|\n jinja|\n js|\n )$\n \"\"\",\n re.VERBOSE,\n ),\n \"context_processors\": CONTEXT_PROCESSORS,\n \"extensions\": [\n \"jinja2.ext.do\",\n \"jinja2.ext.loopcontrols\",\n \"jinja2.ext.with_\",\n \"jinja2.ext.i18n\",\n \"jinja2.ext.autoescape\",\n \"django_jinja.builtins.extensions.CsrfExtension\",\n \"django_jinja.builtins.extensions.CacheExtension\",\n \"django_jinja.builtins.extensions.TimezoneExtension\",\n \"django_jinja.builtins.extensions.UrlsExtension\",\n \"django_jinja.builtins.extensions.StaticFilesExtension\",\n \"django_jinja.builtins.extensions.DjangoFiltersExtension\",\n \"pipeline.jinja2.PipelineExtension\",\n ],\n },\n },\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [path(\"pontoon/base/templates/django\")],\n \"OPTIONS\": {\n \"debug\": DEBUG,\n \"context_processors\": CONTEXT_PROCESSORS,\n \"loaders\": [\n \"django.template.loaders.filesystem.Loader\",\n \"django.template.loaders.app_directories.Loader\",\n ],\n },\n },\n]\n\nSESSION_COOKIE_SAMESITE = \"lax\"\n\nAUTHENTICATION_BACKENDS = [\n \"django.contrib.auth.backends.ModelBackend\",\n \"allauth.account.auth_backends.AuthenticationBackend\",\n \"guardian.backends.ObjectPermissionBackend\",\n]\n\nGUARDIAN_RAISE_403 = True\n\nPIPELINE_CSS = {\n \"base\": {\n \"source_filenames\": (\n \"css/dark-theme.css\",\n \"css/light-theme.css\",\n \"css/fontawesome-all.css\",\n \"css/nprogress.css\",\n \"css/boilerplate.css\",\n \"css/fonts.css\",\n \"css/style.css\",\n ),\n \"output_filename\": \"css/base.min.css\",\n },\n \"translate\": {\n \"source_filenames\": (\n \"translate.css\",\n \"css/dark-theme.css\",\n \"css/light-theme.css\",\n ),\n \"output_filename\": \"css/translate.min.css\",\n },\n \"admin\": {\n \"source_filenames\": (\n \"css/table.css\",\n \"css/admin.css\",\n ),\n \"output_filename\": \"css/admin.min.css\",\n },\n \"admin_project\": {\n \"source_filenames\": (\n \"css/double_list_selector.css\",\n \"css/multiple_team_selector.css\",\n \"css/admin_project.css\",\n \"tag_admin.css\",\n ),\n \"output_filename\": \"css/admin_project.min.css\",\n },\n \"project\": {\n \"source_filenames\": (\n \"css/table.css\",\n \"css/request.css\",\n \"css/contributors.css\",\n \"css/heading_info.css\",\n \"css/sidebar_menu.css\",\n \"css/multiple_team_selector.css\",\n \"css/manual_notifications.css\",\n \"css/insights_charts.css\",\n \"css/insights_tab.css\",\n ),\n \"output_filename\": \"css/project.min.css\",\n },\n \"insights\": {\n \"source_filenames\": (\n \"css/insights_charts.css\",\n \"css/insights.css\",\n ),\n \"output_filename\": \"css/insights.min.css\",\n },\n \"localization\": {\n \"source_filenames\": (\n \"css/table.css\",\n \"css/contributors.css\",\n \"css/heading_info.css\",\n \"css/info.css\",\n \"css/download_selector.css\",\n \"css/insights_charts.css\",\n \"css/insights_tab.css\",\n ),\n \"output_filename\": \"css/localization.min.css\",\n },\n \"projects\": {\n \"source_filenames\": (\n \"css/heading_info.css\",\n \"css/table.css\",\n ),\n \"output_filename\": \"css/projects.min.css\",\n },\n \"team\": {\n \"source_filenames\": (\n \"css/table.css\",\n \"css/double_list_selector.css\",\n \"css/download_selector.css\",\n \"css/contributors.css\",\n \"css/heading_info.css\",\n \"css/team.css\",\n \"css/request.css\",\n \"css/insights_charts.css\",\n \"css/insights_tab.css\",\n \"css/info.css\",\n ),\n \"output_filename\": \"css/team.min.css\",\n },\n \"teams\": {\n \"source_filenames\": (\n \"css/heading_info.css\",\n \"css/table.css\",\n \"css/request.css\",\n ),\n \"output_filename\": \"css/teams.min.css\",\n },\n \"sync_logs\": {\n \"source_filenames\": (\"css/sync_logs.css\",),\n \"output_filename\": \"css/sync_logs.min.css\",\n },\n \"profile\": {\n \"source_filenames\": (\n \"css/contributor.css\",\n \"css/insights_charts.css\",\n \"css/profile.css\",\n ),\n \"output_filename\": \"css/profile.min.css\",\n },\n \"settings\": {\n \"source_filenames\": (\n \"css/multiple_team_selector.css\",\n \"css/contributor.css\",\n \"css/team_selector.css\",\n \"css/settings.css\",\n ),\n \"output_filename\": \"css/settings.min.css\",\n },\n \"notifications\": {\n \"source_filenames\": (\n \"css/sidebar_menu.css\",\n \"css/notifications.css\",\n ),\n \"output_filename\": \"css/notifications.min.css\",\n },\n \"machinery\": {\n \"source_filenames\": (\n \"css/team_selector.css\",\n \"css/machinery.css\",\n ),\n \"output_filename\": \"css/machinery.min.css\",\n },\n \"contributors\": {\n \"source_filenames\": (\n \"css/heading_info.css\",\n \"css/contributors.css\",\n ),\n \"output_filename\": \"css/contributors.min.css\",\n },\n \"terms\": {\n \"source_filenames\": (\"css/terms.css\",),\n \"output_filename\": \"css/terms.min.css\",\n },\n \"homepage\": {\n \"source_filenames\": (\"css/homepage.css\",),\n \"output_filename\": \"css/homepage.min.css\",\n },\n}\n\nPIPELINE_JS = {\n \"base\": {\n \"source_filenames\": (\n \"js/lib/jquery-3.6.1.js\",\n \"js/lib/jquery.timeago.js\",\n \"js/lib/jquery.color-2.1.2.js\",\n \"js/lib/nprogress.js\",\n \"js/main.js\",\n \"js/theme-switcher.js\",\n ),\n \"output_filename\": \"js/base.min.js\",\n },\n \"translate\": {\n \"source_filenames\": (\"translate.js\",),\n \"output_filename\": \"js/translate.min.js\",\n },\n \"admin\": {\n \"source_filenames\": (\"js/table.js\",),\n \"output_filename\": \"js/admin.min.js\",\n },\n \"admin_project\": {\n \"source_filenames\": (\n \"js/double_list_selector.js\",\n \"js/multiple_team_selector.js\",\n \"js/admin_project.js\",\n \"tag_admin.js\",\n ),\n \"output_filename\": \"js/admin_project.min.js\",\n },\n \"insights\": {\n \"source_filenames\": (\n \"js/lib/Chart.bundle.js\",\n \"js/insights_charts.js\",\n \"js/insights.js\",\n ),\n \"output_filename\": \"js/insights.min.js\",\n },\n \"localization\": {\n \"source_filenames\": (\n \"js/lib/Chart.bundle.js\",\n \"js/table.js\",\n \"js/progress-chart.js\",\n \"js/tabs.js\",\n \"js/insights_charts.js\",\n \"js/insights_tab.js\",\n \"js/info.js\",\n ),\n \"output_filename\": \"js/localization.min.js\",\n },\n \"project\": {\n \"source_filenames\": (\n \"js/lib/Chart.bundle.js\",\n \"js/table.js\",\n \"js/request.js\",\n \"js/progress-chart.js\",\n \"js/tabs.js\",\n \"js/sidebar_menu.js\",\n \"js/multiple_team_selector.js\",\n \"js/manual_notifications.js\",\n \"js/insights_charts.js\",\n \"js/insights_tab.js\",\n ),\n \"output_filename\": \"js/project.min.js\",\n },\n \"projects\": {\n \"source_filenames\": (\n \"js/table.js\",\n \"js/progress-chart.js\",\n ),\n \"output_filename\": \"js/projects.min.js\",\n },\n \"team\": {\n \"source_filenames\": (\n \"js/lib/Chart.bundle.js\",\n \"js/table.js\",\n \"js/progress-chart.js\",\n \"js/double_list_selector.js\",\n \"js/bugzilla.js\",\n \"js/tabs.js\",\n \"js/request.js\",\n \"js/permissions.js\",\n \"js/insights_charts.js\",\n \"js/insights_tab.js\",\n \"js/info.js\",\n ),\n \"output_filename\": \"js/team.min.js\",\n },\n \"teams\": {\n \"source_filenames\": (\n \"js/table.js\",\n \"js/progress-chart.js\",\n \"js/request.js\",\n ),\n \"output_filename\": \"js/teams.min.js\",\n },\n \"profile\": {\n \"source_filenames\": (\n \"js/lib/Chart.bundle.js\",\n \"js/insights_charts.js\",\n \"js/profile.js\",\n ),\n \"output_filename\": \"js/profile.min.js\",\n },\n \"settings\": {\n \"source_filenames\": (\n \"js/lib/jquery-ui-1.13.2.js\",\n \"js/multiple_team_selector.js\",\n \"js/team_selector.js\",\n \"js/settings.js\",\n ),\n \"output_filename\": \"js/settings.min.js\",\n },\n \"notifications\": {\n \"source_filenames\": (\n \"js/sidebar_menu.js\",\n \"js/notifications.js\",\n ),\n \"output_filename\": \"js/notifications.min.js\",\n },\n \"machinery\": {\n \"source_filenames\": (\n \"js/lib/diff.js\",\n \"js/lib/clipboard.min.js\",\n \"js/team_selector.js\",\n \"js/machinery.js\",\n ),\n \"output_filename\": \"js/machinery.min.js\",\n },\n \"homepage\": {\n \"source_filenames\": (\"js/homepage.js\",),\n \"output_filename\": \"js/homepage.min.js\",\n },\n}\n\nPIPELINE = {\n \"STYLESHEETS\": PIPELINE_CSS,\n \"JAVASCRIPT\": PIPELINE_JS,\n \"JS_COMPRESSOR\": \"pipeline.compressors.terser.TerserCompressor\",\n \"CSS_COMPRESSOR\": \"pipeline.compressors.NoopCompressor\",\n \"YUGLIFY_BINARY\": path(\n os.environ.get(\"YUGLIFY_BINARY\", \"node_modules/.bin/yuglify\")\n ),\n \"TERSER_BINARY\": path(os.environ.get(\"TERSER_BINARY\", \"node_modules/.bin/terser\")),\n \"DISABLE_WRAPPER\": True,\n}\n\n# Cache config\n# If the environment contains configuration data for Memcached, use\n# BMemcached for the cache backend. Otherwise, default to an in-memory\n# cache.\nif os.environ.get(\"MEMCACHE_SERVERS\") is not None:\n CACHES = {\n \"default\": {\"BACKEND\": \"django_bmemcached.memcached.BMemcached\", \"OPTIONS\": {}}\n }\nelse:\n CACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n \"LOCATION\": \"pontoon\",\n }\n }\n\n# Site ID is used by Django's Sites framework.\nSITE_ID = 1\n\n# Media and templates.\n\n# Absolute path to the directory that holds media.\n# Example: \"/home/media/media.lawrence.com/\"\nMEDIA_ROOT = os.environ.get(\"MEDIA_ROOT\", path(\"media\"))\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash if there is a path component (optional in other cases).\n# Examples: \"http://media.lawrence.com\", \"http://example.com/media/\"\nMEDIA_URL = \"/media/\"\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = STATIC_HOST + \"/static/\"\n\nSTATICFILES_STORAGE = \"pontoon.base.storage.CompressedManifestPipelineStorage\"\nSTATICFILES_FINDERS = (\n \"pipeline.finders.PipelineFinder\",\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n)\nSTATICFILES_DIRS = [\n os.path.join(TRANSLATE_DIR, \"dist\"),\n os.path.join(TRANSLATE_DIR, \"public\"),\n os.path.join(TAGADMIN_DIR, \"dist\"),\n]\n\n\n# Set ALLOWED_HOSTS based on SITE_URL setting.\ndef _allowed_hosts():\n host = _get_site_url_netloc() # Remove protocol and path\n result = [host]\n # In order to be able to use ALLOWED_HOSTS to validate URLs, we need to\n # have a version of the host that contains the port. This only applies\n # to local development (usually the host is localhost:8000).\n if \":\" in host:\n host_no_port = host.rsplit(\":\", 1)[0]\n result = [host, host_no_port]\n\n # add values from environment variable. Needed in case of URL/domain redirections\n env_vars_str = os.getenv(\"ALLOWED_HOSTS\", \"127.0.0.1:8000\")\n env_vars = [x.strip() for x in env_vars_str.split(\",\")]\n result.extend(env_vars)\n\n return result\n\n\nALLOWED_HOSTS = lazy(_allowed_hosts, list)()\n\n# Auth\n# The first hasher in this list will be used for new passwords.\n# Any other hasher in the list can be used for existing passwords.\nPASSWORD_HASHERS = (\n \"django.contrib.auth.hashers.PBKDF2PasswordHasher\",\n \"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher\",\n \"django.contrib.auth.hashers.BCryptSHA256PasswordHasher\",\n \"django.contrib.auth.hashers.BCryptPasswordHasher\",\n \"django.contrib.auth.hashers.SHA1PasswordHasher\",\n \"django.contrib.auth.hashers.MD5PasswordHasher\",\n \"django.contrib.auth.hashers.UnsaltedMD5PasswordHasher\",\n)\n\n# Logging\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"handlers\": {\"console\": {\"class\": \"logging.StreamHandler\"}},\n \"formatters\": {\n \"verbose\": {\"format\": \"[%(levelname)s:%(name)s] %(asctime)s %(message)s\"},\n },\n \"loggers\": {\n \"django\": {\"handlers\": [\"console\"]},\n \"pontoon\": {\n \"handlers\": [\"console\"],\n \"level\": os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"INFO\"),\n },\n },\n}\n\nif DEBUG:\n LOGGING[\"handlers\"][\"console\"][\"formatter\"] = \"verbose\"\n\nif os.environ.get(\"DJANGO_SQL_LOG\", False):\n LOGGING[\"loggers\"][\"django.db.backends\"] = {\n \"level\": \"DEBUG\",\n \"handlers\": [\"console\"],\n }\n\n# General auth settings\nLOGIN_URL = \"/\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGIN_REDIRECT_URL_FAILURE = \"/\"\n\n# Should robots.txt deny everything or disallow a calculated list of\n# URLs we don't want to be crawled? Default is false, disallow\n# everything.\nENGAGE_ROBOTS = False\n\n# Store the CSRF token in the user's session instead of in a cookie.\nCSRF_USE_SESSIONS = True\n\n# Set X-Frame-Options to DENY by default on all responses.\nX_FRAME_OPTIONS = \"DENY\"\n\n# Use correct header for detecting HTTPS on Heroku.\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n\n# Do not set SECURE_HSTS_SECONDS.\n# HSTS is being taken care of in pontoon/wsgi.py.\n# SECURE_HSTS_SECONDS = 63072000\n\n# X-Content-Type-Options: nosniff\n# Disables browser MIME type sniffing\nSECURE_CONTENT_TYPE_NOSNIFF = True\n\n# x-xss-protection: 1; mode=block\n# Activates the browser's XSS filtering and helps prevent XSS attacks\nSECURE_BROWSER_XSS_FILTER = True\n\n# Redirect non-HTTPS requests to HTTPS\nSECURE_SSL_REDIRECT = not (DEBUG or os.environ.get(\"CI\", False))\n\n# Content-Security-Policy headers\nCSP_DEFAULT_SRC = (\"'none'\",)\nCSP_FRAME_SRC = (\"https:\",)\nCSP_WORKER_SRC = (\"https:\",)\nCSP_CONNECT_SRC = (\n \"'self'\",\n \"https://bugzilla.mozilla.org/rest/bug\",\n)\nCSP_FONT_SRC = (\"'self'\",)\nCSP_IMG_SRC = (\n \"'self'\",\n \"https:\",\n # Needed for ACE editor images\n \"data:\",\n \"https://*.wp.com/pontoon.mozilla.org/\",\n \"https://www.google-analytics.com\",\n \"https://www.gravatar.com/avatar/\",\n)\nCSP_SCRIPT_SRC = (\n \"'self'\",\n \"'unsafe-eval'\",\n \"'sha256-fDsgbzHC0sNuBdM4W91nXVccgFLwIDkl197QEca/Cl4='\",\n # Rules related to Google Analytics\n \"'sha256-G5/M3dBlZdlvno5Cibw42fbeLr2PTEGd1M909Z7vPZE='\",\n \"https://www.google-analytics.com/analytics.js\",\n)\nCSP_STYLE_SRC = (\n \"'self'\",\n \"'unsafe-inline'\",\n)\n\n# Needed if site not hosted on HTTPS domains (like local setup)\nif not (HEROKU_DEMO or SITE_URL.startswith(\"https\")):\n CSP_IMG_SRC = CSP_IMG_SRC + (\"http://www.gravatar.com/avatar/\",)\n CSP_WORKER_SRC = CSP_FRAME_SRC = CSP_FRAME_SRC + (\"http:\",)\n\n# For absolute urls\ntry:\n DOMAIN = socket.gethostname()\nexcept OSError:\n DOMAIN = \"localhost\"\nPROTOCOL = \"http://\"\nPORT = 80\n\n# Names for slave databases from the DATABASES setting.\nSLAVE_DATABASES = []\n\n# Internationalization.\n\n# Enable timezone-aware datetimes.\nUSE_TZ = True\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = os.environ.get(\"TZ\", \"UTC\")\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = False\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale\nUSE_L10N = False\n\n# Enable Bugs tab on the team pages, pulling data from bugzilla.mozilla.org.\n# See bug 1567402 for details. A Mozilla-specific variable.\nENABLE_BUGS_TAB = os.environ.get(\"ENABLE_BUGS_TAB\", \"False\") != \"False\"\n\n# Enable Insights dashboards,\n# presenting data that needs to be collected by a scheduled job.\n# See docs/admin/deployment.rst for more information.\nENABLE_INSIGHTS = os.environ.get(\"ENABLE_INSIGHTS\", \"False\") != \"False\"\n\n# Bleach tags and attributes\nALLOWED_TAGS = [\n \"a\",\n \"abbr\",\n \"acronym\",\n \"b\",\n \"blockquote\",\n \"br\",\n \"code\",\n \"em\",\n \"i\",\n \"li\",\n \"ol\",\n \"p\",\n \"strong\",\n \"ul\",\n]\n\nALLOWED_ATTRIBUTES = {\n \"a\": [\"href\", \"title\", \"target\"],\n \"abbr\": [\"title\"],\n \"acronym\": [\"title\"],\n}\n\n# Multiple sync tasks for the same project cannot run concurrently to prevent\n# potential DB and VCS inconsistencies. We store the information about the\n# running task in cache and clear it after the task completes. In case of an\n# error, we might never clear the cache, so we use SYNC_TASK_TIMEOUT as the\n# longest possible period (in seconds) after which the cache is cleared and\n# the subsequent task can run. The value should exceed the longest sync task\n# of the instance.\ntry:\n SYNC_TASK_TIMEOUT = int(os.environ.get(\"SYNC_TASK_TIMEOUT\", \"\"))\nexcept ValueError:\n SYNC_TASK_TIMEOUT = 60 * 60 * 1 # 1 hour\n\nSYNC_LOG_RETENTION = 90 # days\n\nMANUAL_SYNC = os.environ.get(\"MANUAL_SYNC\", \"True\") != \"False\"\n\n# Celery\n\n# Execute celery tasks locally instead of in a worker unless the\n# environment is configured.\nCELERY_ALWAYS_EAGER = os.environ.get(\"CELERY_ALWAYS_EAGER\", \"True\") != \"False\"\n\n# Limit the number of tasks a celery worker can handle before being replaced.\ntry:\n CELERYD_MAX_TASKS_PER_CHILD = int(os.environ.get(\"CELERYD_MAX_TASKS_PER_CHILD\", \"\"))\nexcept ValueError:\n CELERYD_MAX_TASKS_PER_CHILD = 20\n\nBROKER_POOL_LIMIT = 1 # Limit to one connection per worker\nBROKER_CONNECTION_TIMEOUT = 30 # Give up connecting faster\nCELERY_RESULT_BACKEND = None # We don't store results\nCELERY_SEND_EVENTS = False # We aren't yet monitoring events\n\n# The default serializer since Celery 4 is 'json'\nCELERY_TASK_SERIALIZER = \"pickle\"\nCELERY_RESULT_SERIALIZER = \"pickle\"\nCELERY_ACCEPT_CONTENT = [\"pickle\"]\n\n# Settings related to the CORS mechanisms.\n# For the sake of integration with other sites,\n# all origins are allowed for the GraphQL endpoint.\nCORS_ALLOW_ALL_ORIGINS = True\nCORS_URLS_REGEX = r\"^/graphql/?$\"\n\nSOCIALACCOUNT_ENABLED = True\nSOCIALACCOUNT_ADAPTER = \"pontoon.base.adapter.PontoonSocialAdapter\"\n\n# Supported values: 'django', 'fxa', 'github', 'gitlab', 'google'\nAUTHENTICATION_METHOD = os.environ.get(\"AUTHENTICATION_METHOD\", \"django\")\n\n\ndef account_username(user):\n return user.name_or_email\n\n\n# django-allauth settings\nACCOUNT_AUTHENTICATED_METHOD = \"email\"\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = \"none\"\nACCOUNT_USER_DISPLAY = account_username\n\n# Mozilla Accounts (formerly Firefox Accounts)\nFXA_CLIENT_ID = os.environ.get(\"FXA_CLIENT_ID\")\nFXA_SECRET_KEY = os.environ.get(\"FXA_SECRET_KEY\")\nFXA_OAUTH_ENDPOINT = os.environ.get(\"FXA_OAUTH_ENDPOINT\", \"\")\nFXA_PROFILE_ENDPOINT = os.environ.get(\"FXA_PROFILE_ENDPOINT\", \"\")\nFXA_SCOPE = [\"profile:uid\", \"profile:display_name\", \"profile:email\"]\n\n# Github\nGITHUB_CLIENT_ID = os.environ.get(\"GITHUB_CLIENT_ID\")\nGITHUB_SECRET_KEY = os.environ.get(\"GITHUB_SECRET_KEY\")\n\n# GitLab\nGITLAB_URL = os.environ.get(\"GITLAB_URL\", \"https://gitlab.com\")\nGITLAB_CLIENT_ID = os.environ.get(\"GITLAB_CLIENT_ID\")\nGITLAB_SECRET_KEY = os.environ.get(\"GITLAB_SECRET_KEY\")\n\n# Google Accounts\nGOOGLE_CLIENT_ID = os.environ.get(\"GOOGLE_CLIENT_ID\")\nGOOGLE_SECRET_KEY = os.environ.get(\"GOOGLE_SECRET_KEY\")\n\n# Keycloak Accounts\nKEYCLOAK_CLIENT_ID = os.environ.get(\"KEYCLOAK_CLIENT_ID\")\nKEYCLOAK_CLIENT_SECRET = os.environ.get(\"KEYCLOAK_CLIENT_SECRET\")\n\n# All settings related to the AllAuth\nSOCIALACCOUNT_PROVIDERS = {\n \"fxa\": {\n \"SCOPE\": FXA_SCOPE,\n \"OAUTH_ENDPOINT\": FXA_OAUTH_ENDPOINT,\n \"PROFILE_ENDPOINT\": FXA_PROFILE_ENDPOINT,\n },\n \"gitlab\": {\"GITLAB_URL\": GITLAB_URL, \"SCOPE\": [\"read_user\"]},\n \"keycloak\": {\n \"KEYCLOAK_URL\": os.environ.get(\"KEYCLOAK_URL\"),\n \"KEYCLOAK_REALM\": os.environ.get(\"KEYCLOAK_REALM\"),\n },\n}\n\n# Configuration of `django-notifications-hq` app\nDJANGO_NOTIFICATIONS_CONFIG = {\n # Attach extra arguments passed to notify.send(...) to the .data attribute\n # of the Notification object.\n \"USE_JSONFIELD\": True,\n}\n\n# Maximum number of read notifications to display in the notifications menu\nNOTIFICATIONS_MAX_COUNT = 7\n\n# Integer representing a day of the week on which the `send_suggestion_notifications`\n# management command will run.\nSUGGESTION_NOTIFICATIONS_DAY = os.environ.get(\"SUGGESTION_NOTIFICATIONS_DAY\", 4)\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n", "path": "pontoon/settings/base.py" } ]
diff --git a/docs/admin/deployment.rst b/docs/admin/deployment.rst index 0c1f56164a..02bd4e7430 100644 --- a/docs/admin/deployment.rst +++ b/docs/admin/deployment.rst @@ -72,6 +72,10 @@ you create: Set to 'gitlab' if you want to use 'GitLab' (corresponding GITLAB_* settings must be set if required). Set to 'google' if you want to use 'Google' (corresponding GOOGLE_* settings must be set). +``BLOCKED_IPS`` + A comma-separated list of IP addresses to be blocked from accessing the app, + for example because they are DDoS'ing the server. + ``CELERY_ALWAYS_EAGER`` Controls whether asynchronous tasks (mainly used during sync) are sent to Celery or executed immediately and synchronously. Set this to ``False`` on diff --git a/docs/admin/maintenance.rst b/docs/admin/maintenance.rst index 9790d4e8d2..8f0ff28d27 100644 --- a/docs/admin/maintenance.rst +++ b/docs/admin/maintenance.rst @@ -40,3 +40,16 @@ Finally, you need to simply access the worker: # Replace my-app-name with your Heroku app's name. celery --broker=`heroku config:get RABBITMQ_URL --app=my-app-name` worker + +Mitigating DDoS attacks +----------------------- +In a distributed denial-of-service attack (`DDoS`_ attack), the incoming traffic +flooding the victim originates from many different sources. This stops everyone +else from accessing the website as there is too much traffic flowing to it. + +One way to mitigate DDoS attacks is to identify the IP addresses of the +attackers and block them. Find the attacking IP addresses in the Log +Management Add-On (Papertrail) and add them to the BLOCKED_IPs config variable +in Heroku Settings. + +.. _DDoS: https://en.wikipedia.org/wiki/Denial-of-service_attack diff --git a/pontoon/settings/base.py b/pontoon/settings/base.py index 453be98510..1f45038f7a 100644 --- a/pontoon/settings/base.py +++ b/pontoon/settings/base.py @@ -267,6 +267,7 @@ def _default_from_email(): "django_ace", ) +# A list of IP addresses to be blocked from accessing the app, because they are DDoS'ing the server BLOCKED_IPS = os.environ.get("BLOCKED_IPS", "").split(",") MIDDLEWARE = (
open-telemetry__opentelemetry-python-1653
Consider renaming Resource.create_empty() to Resource.get_empty() Specially given the fact a cached instance is returned, i.e. no actual creation happens.
[ { "content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis package implements `OpenTelemetry Resources\n<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md#resource-sdk>`_:\n\n *A Resource is an immutable representation of the entity producing\n telemetry. For example, a process producing telemetry that is running in\n a container on Kubernetes has a Pod name, it is in a namespace and\n possibly is part of a Deployment which also has a name. All three of\n these attributes can be included in the Resource.*\n\nResource objects are created with `Resource.create`, which accepts attributes\n(key-values). Resource attributes can also be passed at process invocation in\nthe :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable. You should\nregister your resource with the `opentelemetry.sdk.trace.TracerProvider` by\npassing them into their constructors. The `Resource` passed to a provider is\navailable to the exporter, which can send on this information as it sees fit.\n\n.. code-block:: python\n\n trace.set_tracer_provider(\n TracerProvider(\n resource=Resource.create({\n \"service.name\": \"shoppingcart\",\n \"service.instance.id\": \"instance-12\",\n }),\n ),\n )\n print(trace.get_tracer_provider().resource.attributes)\n\n {'telemetry.sdk.language': 'python',\n 'telemetry.sdk.name': 'opentelemetry',\n 'telemetry.sdk.version': '0.13.dev0',\n 'service.name': 'shoppingcart',\n 'service.instance.id': 'instance-12'}\n\nNote that the OpenTelemetry project documents certain `\"standard attributes\"\n<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/README.md>`_\nthat have prescribed semantic meanings, for example ``service.name`` in the\nabove example.\n\n.. envvar:: OTEL_RESOURCE_ATTRIBUTES\n\nThe :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable allows resource\nattributes to be passed to the SDK at process invocation. The attributes from\n:envvar:`OTEL_RESOURCE_ATTRIBUTES` are merged with those passed to\n`Resource.create`, meaning :envvar:`OTEL_RESOURCE_ATTRIBUTES` takes *lower*\npriority. Attributes should be in the format ``key1=value1,key2=value2``.\nAdditional details are available `in the specification\n<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md#specifying-resource-information-via-an-environment-variable>`_.\n\n.. code-block:: console\n\n $ OTEL_RESOURCE_ATTRIBUTES=\"service.name=shoppingcard,will_be_overridden=foo\" python - <<EOF\n import pprint\n from opentelemetry.sdk.resources import Resource\n pprint.pprint(Resource.create({\"will_be_overridden\": \"bar\"}).attributes)\n EOF\n {'service.name': 'shoppingcard',\n 'telemetry.sdk.language': 'python',\n 'telemetry.sdk.name': 'opentelemetry',\n 'telemetry.sdk.version': '0.13.dev0',\n 'will_be_overridden': 'bar'}\n \"\"\"\n\nimport abc\nimport concurrent.futures\nimport logging\nimport os\nimport typing\nfrom json import dumps\n\nimport pkg_resources\n\nfrom opentelemetry.sdk.environment_variables import OTEL_RESOURCE_ATTRIBUTES\n\nLabelValue = typing.Union[str, bool, int, float]\nAttributes = typing.Dict[str, LabelValue]\nlogger = logging.getLogger(__name__)\n\n\nCLOUD_PROVIDER = \"cloud.provider\"\nCLOUD_ACCOUNT_ID = \"cloud.account.id\"\nCLOUD_REGION = \"cloud.region\"\nCLOUD_ZONE = \"cloud.zone\"\nCONTAINER_NAME = \"container.name\"\nCONTAINER_ID = \"container.id\"\nCONTAINER_IMAGE_NAME = \"container.image.name\"\nCONTAINER_IMAGE_TAG = \"container.image.tag\"\nDEPLOYMENT_ENVIRONMENT = \"deployment.environment\"\nFAAS_NAME = \"faas.name\"\nFAAS_ID = \"faas.id\"\nFAAS_VERSION = \"faas.version\"\nFAAS_INSTANCE = \"faas.instance\"\nHOST_NAME = \"host.name\"\nHOST_TYPE = \"host.type\"\nHOST_IMAGE_NAME = \"host.image.name\"\nHOST_IMAGE_ID = \"host.image.id\"\nHOST_IMAGE_VERSION = \"host.image.version\"\nKUBERNETES_CLUSTER_NAME = \"k8s.cluster.name\"\nKUBERNETES_NAMESPACE_NAME = \"k8s.namespace.name\"\nKUBERNETES_POD_UID = \"k8s.pod.uid\"\nKUBERNETES_POD_NAME = \"k8s.pod.name\"\nKUBERNETES_CONTAINER_NAME = \"k8s.container.name\"\nKUBERNETES_REPLICA_SET_UID = \"k8s.replicaset.uid\"\nKUBERNETES_REPLICA_SET_NAME = \"k8s.replicaset.name\"\nKUBERNETES_DEPLOYMENT_UID = \"k8s.deployment.uid\"\nKUBERNETES_DEPLOYMENT_NAME = \"k8s.deployment.name\"\nKUBERNETES_STATEFUL_SET_UID = \"k8s.statefulset.uid\"\nKUBERNETES_STATEFUL_SET_NAME = \"k8s.statefulset.name\"\nKUBERNETES_DAEMON_SET_UID = \"k8s.daemonset.uid\"\nKUBERNETES_DAEMON_SET_NAME = \"k8s.daemonset.name\"\nKUBERNETES_JOB_UID = \"k8s.job.uid\"\nKUBERNETES_JOB_NAME = \"k8s.job.name\"\nKUBERNETES_CRON_JOB_UID = \"k8s.cronjob.uid\"\nKUBERNETES_CRON_JOB_NAME = \"k8s.cronjob.name\"\nOS_TYPE = \"os.type\"\nOS_DESCRIPTION = \"os.description\"\nPROCESS_PID = \"process.pid\"\nPROCESS_EXECUTABLE_NAME = \"process.executable.name\"\nPROCESS_EXECUTABLE_PATH = \"process.executable.path\"\nPROCESS_COMMAND = \"process.command\"\nPROCESS_COMMAND_LINE = \"process.command_line\"\nPROCESS_COMMAND_ARGS = \"process.command_args\"\nPROCESS_OWNER = \"process.owner\"\nPROCESS_RUNTIME_NAME = \"process.runtime.name\"\nPROCESS_RUNTIME_VERSION = \"process.runtime.version\"\nPROCESS_RUNTIME_DESCRIPTION = \"process.runtime.description\"\nSERVICE_NAME = \"service.name\"\nSERVICE_NAMESPACE = \"service.namespace\"\nSERVICE_INSTANCE_ID = \"service.instance.id\"\nSERVICE_VERSION = \"service.version\"\nTELEMETRY_SDK_NAME = \"telemetry.sdk.name\"\nTELEMETRY_SDK_VERSION = \"telemetry.sdk.version\"\nTELEMETRY_AUTO_VERSION = \"telemetry.auto.version\"\nTELEMETRY_SDK_LANGUAGE = \"telemetry.sdk.language\"\n\n\nOPENTELEMETRY_SDK_VERSION = pkg_resources.get_distribution(\n \"opentelemetry-sdk\"\n).version\n\n\nclass Resource:\n \"\"\"A Resource is an immutable representation of the entity producing telemetry as Attributes.\"\"\"\n\n def __init__(self, attributes: Attributes):\n self._attributes = attributes.copy()\n\n @staticmethod\n def create(attributes: typing.Optional[Attributes] = None) -> \"Resource\":\n \"\"\"Creates a new `Resource` from attributes.\n\n Args:\n attributes: Optional zero or more key-value pairs.\n\n Returns:\n The newly-created Resource.\n \"\"\"\n if not attributes:\n attributes = {}\n resource = _DEFAULT_RESOURCE.merge(\n OTELResourceDetector().detect()\n ).merge(Resource(attributes))\n if not resource.attributes.get(SERVICE_NAME, None):\n default_service_name = \"unknown_service\"\n process_executable_name = resource.attributes.get(\n PROCESS_EXECUTABLE_NAME, None\n )\n if process_executable_name:\n default_service_name += \":\" + process_executable_name\n resource = resource.merge(\n Resource({SERVICE_NAME: default_service_name})\n )\n return resource\n\n @staticmethod\n def create_empty() -> \"Resource\":\n return _EMPTY_RESOURCE\n\n @property\n def attributes(self) -> Attributes:\n return self._attributes.copy()\n\n def merge(self, other: \"Resource\") -> \"Resource\":\n \"\"\"Merges this resource and an updating resource into a new `Resource`.\n\n If a key exists on both the old and updating resource, the value of the\n updating resource will override the old resource value.\n\n Args:\n other: The other resource to be merged.\n\n Returns:\n The newly-created Resource.\n \"\"\"\n merged_attributes = self.attributes\n merged_attributes.update(other.attributes)\n return Resource(merged_attributes)\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, Resource):\n return False\n return self._attributes == other._attributes\n\n def __hash__(self):\n return hash(dumps(self._attributes, sort_keys=True))\n\n\n_EMPTY_RESOURCE = Resource({})\n_DEFAULT_RESOURCE = Resource(\n {\n TELEMETRY_SDK_LANGUAGE: \"python\",\n TELEMETRY_SDK_NAME: \"opentelemetry\",\n TELEMETRY_SDK_VERSION: OPENTELEMETRY_SDK_VERSION,\n }\n)\n\n\nclass ResourceDetector(abc.ABC):\n def __init__(self, raise_on_error=False):\n self.raise_on_error = raise_on_error\n\n @abc.abstractmethod\n def detect(self) -> \"Resource\":\n raise NotImplementedError()\n\n\nclass OTELResourceDetector(ResourceDetector):\n # pylint: disable=no-self-use\n def detect(self) -> \"Resource\":\n env_resources_items = os.environ.get(OTEL_RESOURCE_ATTRIBUTES)\n env_resource_map = {}\n if env_resources_items:\n env_resource_map = {\n key.strip(): value.strip()\n for key, value in (\n item.split(\"=\") for item in env_resources_items.split(\",\")\n )\n }\n return Resource(env_resource_map)\n\n\ndef get_aggregated_resources(\n detectors: typing.List[\"ResourceDetector\"],\n initial_resource: typing.Optional[Resource] = None,\n timeout=5,\n) -> \"Resource\":\n \"\"\"Retrieves resources from detectors in the order that they were passed\n\n :param detectors: List of resources in order of priority\n :param initial_resource: Static resource. This has highest priority\n :param timeout: Number of seconds to wait for each detector to return\n :return:\n \"\"\"\n final_resource = initial_resource or _EMPTY_RESOURCE\n detectors = [OTELResourceDetector()] + detectors\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:\n futures = [executor.submit(detector.detect) for detector in detectors]\n for detector_ind, future in enumerate(futures):\n detector = detectors[detector_ind]\n try:\n detected_resources = future.result(timeout=timeout)\n # pylint: disable=broad-except\n except Exception as ex:\n if detector.raise_on_error:\n raise ex\n logger.warning(\n \"Exception %s in detector %s, ignoring\", ex, detector\n )\n detected_resources = _EMPTY_RESOURCE\n finally:\n final_resource = final_resource.merge(detected_resources)\n return final_resource\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py" } ]
[ { "content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis package implements `OpenTelemetry Resources\n<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md#resource-sdk>`_:\n\n *A Resource is an immutable representation of the entity producing\n telemetry. For example, a process producing telemetry that is running in\n a container on Kubernetes has a Pod name, it is in a namespace and\n possibly is part of a Deployment which also has a name. All three of\n these attributes can be included in the Resource.*\n\nResource objects are created with `Resource.create`, which accepts attributes\n(key-values). Resource attributes can also be passed at process invocation in\nthe :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable. You should\nregister your resource with the `opentelemetry.sdk.trace.TracerProvider` by\npassing them into their constructors. The `Resource` passed to a provider is\navailable to the exporter, which can send on this information as it sees fit.\n\n.. code-block:: python\n\n trace.set_tracer_provider(\n TracerProvider(\n resource=Resource.create({\n \"service.name\": \"shoppingcart\",\n \"service.instance.id\": \"instance-12\",\n }),\n ),\n )\n print(trace.get_tracer_provider().resource.attributes)\n\n {'telemetry.sdk.language': 'python',\n 'telemetry.sdk.name': 'opentelemetry',\n 'telemetry.sdk.version': '0.13.dev0',\n 'service.name': 'shoppingcart',\n 'service.instance.id': 'instance-12'}\n\nNote that the OpenTelemetry project documents certain `\"standard attributes\"\n<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/README.md>`_\nthat have prescribed semantic meanings, for example ``service.name`` in the\nabove example.\n\n.. envvar:: OTEL_RESOURCE_ATTRIBUTES\n\nThe :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable allows resource\nattributes to be passed to the SDK at process invocation. The attributes from\n:envvar:`OTEL_RESOURCE_ATTRIBUTES` are merged with those passed to\n`Resource.create`, meaning :envvar:`OTEL_RESOURCE_ATTRIBUTES` takes *lower*\npriority. Attributes should be in the format ``key1=value1,key2=value2``.\nAdditional details are available `in the specification\n<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md#specifying-resource-information-via-an-environment-variable>`_.\n\n.. code-block:: console\n\n $ OTEL_RESOURCE_ATTRIBUTES=\"service.name=shoppingcard,will_be_overridden=foo\" python - <<EOF\n import pprint\n from opentelemetry.sdk.resources import Resource\n pprint.pprint(Resource.create({\"will_be_overridden\": \"bar\"}).attributes)\n EOF\n {'service.name': 'shoppingcard',\n 'telemetry.sdk.language': 'python',\n 'telemetry.sdk.name': 'opentelemetry',\n 'telemetry.sdk.version': '0.13.dev0',\n 'will_be_overridden': 'bar'}\n \"\"\"\n\nimport abc\nimport concurrent.futures\nimport logging\nimport os\nimport typing\nfrom json import dumps\n\nimport pkg_resources\n\nfrom opentelemetry.sdk.environment_variables import OTEL_RESOURCE_ATTRIBUTES\n\nLabelValue = typing.Union[str, bool, int, float]\nAttributes = typing.Dict[str, LabelValue]\nlogger = logging.getLogger(__name__)\n\n\nCLOUD_PROVIDER = \"cloud.provider\"\nCLOUD_ACCOUNT_ID = \"cloud.account.id\"\nCLOUD_REGION = \"cloud.region\"\nCLOUD_ZONE = \"cloud.zone\"\nCONTAINER_NAME = \"container.name\"\nCONTAINER_ID = \"container.id\"\nCONTAINER_IMAGE_NAME = \"container.image.name\"\nCONTAINER_IMAGE_TAG = \"container.image.tag\"\nDEPLOYMENT_ENVIRONMENT = \"deployment.environment\"\nFAAS_NAME = \"faas.name\"\nFAAS_ID = \"faas.id\"\nFAAS_VERSION = \"faas.version\"\nFAAS_INSTANCE = \"faas.instance\"\nHOST_NAME = \"host.name\"\nHOST_TYPE = \"host.type\"\nHOST_IMAGE_NAME = \"host.image.name\"\nHOST_IMAGE_ID = \"host.image.id\"\nHOST_IMAGE_VERSION = \"host.image.version\"\nKUBERNETES_CLUSTER_NAME = \"k8s.cluster.name\"\nKUBERNETES_NAMESPACE_NAME = \"k8s.namespace.name\"\nKUBERNETES_POD_UID = \"k8s.pod.uid\"\nKUBERNETES_POD_NAME = \"k8s.pod.name\"\nKUBERNETES_CONTAINER_NAME = \"k8s.container.name\"\nKUBERNETES_REPLICA_SET_UID = \"k8s.replicaset.uid\"\nKUBERNETES_REPLICA_SET_NAME = \"k8s.replicaset.name\"\nKUBERNETES_DEPLOYMENT_UID = \"k8s.deployment.uid\"\nKUBERNETES_DEPLOYMENT_NAME = \"k8s.deployment.name\"\nKUBERNETES_STATEFUL_SET_UID = \"k8s.statefulset.uid\"\nKUBERNETES_STATEFUL_SET_NAME = \"k8s.statefulset.name\"\nKUBERNETES_DAEMON_SET_UID = \"k8s.daemonset.uid\"\nKUBERNETES_DAEMON_SET_NAME = \"k8s.daemonset.name\"\nKUBERNETES_JOB_UID = \"k8s.job.uid\"\nKUBERNETES_JOB_NAME = \"k8s.job.name\"\nKUBERNETES_CRON_JOB_UID = \"k8s.cronjob.uid\"\nKUBERNETES_CRON_JOB_NAME = \"k8s.cronjob.name\"\nOS_TYPE = \"os.type\"\nOS_DESCRIPTION = \"os.description\"\nPROCESS_PID = \"process.pid\"\nPROCESS_EXECUTABLE_NAME = \"process.executable.name\"\nPROCESS_EXECUTABLE_PATH = \"process.executable.path\"\nPROCESS_COMMAND = \"process.command\"\nPROCESS_COMMAND_LINE = \"process.command_line\"\nPROCESS_COMMAND_ARGS = \"process.command_args\"\nPROCESS_OWNER = \"process.owner\"\nPROCESS_RUNTIME_NAME = \"process.runtime.name\"\nPROCESS_RUNTIME_VERSION = \"process.runtime.version\"\nPROCESS_RUNTIME_DESCRIPTION = \"process.runtime.description\"\nSERVICE_NAME = \"service.name\"\nSERVICE_NAMESPACE = \"service.namespace\"\nSERVICE_INSTANCE_ID = \"service.instance.id\"\nSERVICE_VERSION = \"service.version\"\nTELEMETRY_SDK_NAME = \"telemetry.sdk.name\"\nTELEMETRY_SDK_VERSION = \"telemetry.sdk.version\"\nTELEMETRY_AUTO_VERSION = \"telemetry.auto.version\"\nTELEMETRY_SDK_LANGUAGE = \"telemetry.sdk.language\"\n\n\nOPENTELEMETRY_SDK_VERSION = pkg_resources.get_distribution(\n \"opentelemetry-sdk\"\n).version\n\n\nclass Resource:\n \"\"\"A Resource is an immutable representation of the entity producing telemetry as Attributes.\"\"\"\n\n def __init__(self, attributes: Attributes):\n self._attributes = attributes.copy()\n\n @staticmethod\n def create(attributes: typing.Optional[Attributes] = None) -> \"Resource\":\n \"\"\"Creates a new `Resource` from attributes.\n\n Args:\n attributes: Optional zero or more key-value pairs.\n\n Returns:\n The newly-created Resource.\n \"\"\"\n if not attributes:\n attributes = {}\n resource = _DEFAULT_RESOURCE.merge(\n OTELResourceDetector().detect()\n ).merge(Resource(attributes))\n if not resource.attributes.get(SERVICE_NAME, None):\n default_service_name = \"unknown_service\"\n process_executable_name = resource.attributes.get(\n PROCESS_EXECUTABLE_NAME, None\n )\n if process_executable_name:\n default_service_name += \":\" + process_executable_name\n resource = resource.merge(\n Resource({SERVICE_NAME: default_service_name})\n )\n return resource\n\n @staticmethod\n def get_empty() -> \"Resource\":\n return _EMPTY_RESOURCE\n\n @property\n def attributes(self) -> Attributes:\n return self._attributes.copy()\n\n def merge(self, other: \"Resource\") -> \"Resource\":\n \"\"\"Merges this resource and an updating resource into a new `Resource`.\n\n If a key exists on both the old and updating resource, the value of the\n updating resource will override the old resource value.\n\n Args:\n other: The other resource to be merged.\n\n Returns:\n The newly-created Resource.\n \"\"\"\n merged_attributes = self.attributes\n merged_attributes.update(other.attributes)\n return Resource(merged_attributes)\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, Resource):\n return False\n return self._attributes == other._attributes\n\n def __hash__(self):\n return hash(dumps(self._attributes, sort_keys=True))\n\n\n_EMPTY_RESOURCE = Resource({})\n_DEFAULT_RESOURCE = Resource(\n {\n TELEMETRY_SDK_LANGUAGE: \"python\",\n TELEMETRY_SDK_NAME: \"opentelemetry\",\n TELEMETRY_SDK_VERSION: OPENTELEMETRY_SDK_VERSION,\n }\n)\n\n\nclass ResourceDetector(abc.ABC):\n def __init__(self, raise_on_error=False):\n self.raise_on_error = raise_on_error\n\n @abc.abstractmethod\n def detect(self) -> \"Resource\":\n raise NotImplementedError()\n\n\nclass OTELResourceDetector(ResourceDetector):\n # pylint: disable=no-self-use\n def detect(self) -> \"Resource\":\n env_resources_items = os.environ.get(OTEL_RESOURCE_ATTRIBUTES)\n env_resource_map = {}\n if env_resources_items:\n env_resource_map = {\n key.strip(): value.strip()\n for key, value in (\n item.split(\"=\") for item in env_resources_items.split(\",\")\n )\n }\n return Resource(env_resource_map)\n\n\ndef get_aggregated_resources(\n detectors: typing.List[\"ResourceDetector\"],\n initial_resource: typing.Optional[Resource] = None,\n timeout=5,\n) -> \"Resource\":\n \"\"\"Retrieves resources from detectors in the order that they were passed\n\n :param detectors: List of resources in order of priority\n :param initial_resource: Static resource. This has highest priority\n :param timeout: Number of seconds to wait for each detector to return\n :return:\n \"\"\"\n final_resource = initial_resource or _EMPTY_RESOURCE\n detectors = [OTELResourceDetector()] + detectors\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:\n futures = [executor.submit(detector.detect) for detector in detectors]\n for detector_ind, future in enumerate(futures):\n detector = detectors[detector_ind]\n try:\n detected_resources = future.result(timeout=timeout)\n # pylint: disable=broad-except\n except Exception as ex:\n if detector.raise_on_error:\n raise ex\n logger.warning(\n \"Exception %s in detector %s, ignoring\", ex, detector\n )\n detected_resources = _EMPTY_RESOURCE\n finally:\n final_resource = final_resource.merge(detected_resources)\n return final_resource\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ddc4e879b4..339d4b86741 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed - Rename `IdsGenerator` to `IdGenerator` ([#1651])(https://github.com/open-telemetry/opentelemetry-python/pull/1651) +- Rename Resource's `create_empty` to `get_empty` + ([#1653])(https://github.com/open-telemetry/opentelemetry-python/pull/1653) ## [0.18b0](https://github.com/open-telemetry/opentelemetry-python/releases/tag/v0.18b0) - 2021-02-16 diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py index c840e5f298e..a7fa6dd54c3 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py @@ -188,7 +188,7 @@ def create(attributes: typing.Optional[Attributes] = None) -> "Resource": return resource @staticmethod - def create_empty() -> "Resource": + def get_empty() -> "Resource": return _EMPTY_RESOURCE @property diff --git a/opentelemetry-sdk/tests/resources/test_resources.py b/opentelemetry-sdk/tests/resources/test_resources.py index 3effa5d2452..4151c2fbd8d 100644 --- a/opentelemetry-sdk/tests/resources/test_resources.py +++ b/opentelemetry-sdk/tests/resources/test_resources.py @@ -59,7 +59,7 @@ def test_create(self): self.assertEqual(resource.attributes, expected_with_envar) os.environ[resources.OTEL_RESOURCE_ATTRIBUTES] = "" - resource = resources.Resource.create_empty() + resource = resources.Resource.get_empty() self.assertEqual(resource, resources._EMPTY_RESOURCE) resource = resources.Resource.create(None) @@ -140,9 +140,7 @@ def test_service_name_using_process_name(self): def test_aggregated_resources_no_detectors(self): aggregated_resources = resources.get_aggregated_resources([]) - self.assertEqual( - aggregated_resources, resources.Resource.create_empty() - ) + self.assertEqual(aggregated_resources, resources.Resource.get_empty()) def test_aggregated_resources_with_static_resource(self): static_resource = resources.Resource({"static_key": "static_value"}) @@ -208,7 +206,7 @@ def test_resource_detector_ignore_error(self): resource_detector.raise_on_error = False self.assertEqual( resources.get_aggregated_resources([resource_detector]), - resources.Resource.create_empty(), + resources.Resource.get_empty(), ) def test_resource_detector_raise_error(self): @@ -245,7 +243,7 @@ def tearDown(self) -> None: def test_empty(self): detector = resources.OTELResourceDetector() os.environ[resources.OTEL_RESOURCE_ATTRIBUTES] = "" - self.assertEqual(detector.detect(), resources.Resource.create_empty()) + self.assertEqual(detector.detect(), resources.Resource.get_empty()) def test_one(self): detector = resources.OTELResourceDetector()
googleapis__python-spanner-django-109
djangotests: ERROR: test_extra_method_select_argument_with_dashes (basic.tests.ModelTest) ```shell ====================================================================== ERROR: test_extra_method_select_argument_with_dashes (basic.tests.ModelTest) ---------------------------------------------------------------------- Traceback (most recent call last): File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/google/api_core/grpc_helpers.py", line 79, in next return six.next(self._wrapped) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/grpc/_channel.py", line 364, in __next__ return self._next() File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/grpc/_channel.py", line 358, in _next raise self grpc._channel._Rendezvous: <_Rendezvous of RPC that terminated with: status = StatusCode.INVALID_ARGUMENT details = "Syntax error: Unexpected \"-\" [at 1:21]\nSELECT (1) AS dashed-value, (2) AS undashedvalue, basic_article.id, basic_art...\n ^" debug_error_string = "{"created":"@1575261818.820579000","description":"Error received from peer ipv4:172.217.11.170:443","file":"src/core/lib/surface/call.cc","file_line":1046,"grpc_message":"Syntax error: Unexpected \"-\" [at 1:21]\nSELECT (1) AS dashed-value, (2) AS undashedvalue, basic_article.id, basic_art...\n ^","grpc_status":3}" > The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/Users/emmanuelodeke/Desktop/spanner-orm/django_tests/django/tests/basic/tests.py", line 265, in test_extra_method_select_argument_with_dashes self.assertEqual(articles[0].undashedvalue, 2) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/Django-2.2.8.dev20191126193909-py3.7.egg/django/db/models/query.py", line 308, in __getitem__ qs._fetch_all() File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/Django-2.2.8.dev20191126193909-py3.7.egg/django/db/models/query.py", line 1242, in _fetch_all self._result_cache = list(self._iterable_class(self)) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/Django-2.2.8.dev20191126193909-py3.7.egg/django/db/models/query.py", line 55, in __iter__ results = compiler.execute_sql(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/Django-2.2.8.dev20191126193909-py3.7.egg/django/db/models/sql/compiler.py", line 1133, in execute_sql return list(result) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/Django-2.2.8.dev20191126193909-py3.7.egg/django/db/models/sql/compiler.py", line 1512, in cursor_iter for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel): File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/Django-2.2.8.dev20191126193909-py3.7.egg/django/db/models/sql/compiler.py", line 1512, in <lambda> for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel): File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/Django-2.2.8.dev20191126193909-py3.7.egg/django/db/utils.py", line 96, in inner return func(*args, **kwargs) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/spanner/dbapi/cursor.py", line 218, in fetchmany items.append(tuple(self.__next__())) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/spanner/dbapi/cursor.py", line 186, in __next__ return next(self.__itr) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/google/cloud/spanner_v1/streamed.py", line 143, in __iter__ self._consume_next() File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/google/cloud/spanner_v1/streamed.py", line 116, in _consume_next response = six.next(self._response_iterator) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/google/cloud/spanner_v1/snapshot.py", line 45, in _restart_on_unavailable for item in iterator: File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/google/api_core/grpc_helpers.py", line 81, in next six.raise_from(exceptions.from_grpc_error(exc), exc) File "<string>", line 3, in raise_from google.api_core.exceptions.InvalidArgument: 400 Syntax error: Unexpected \"-\" [at 1:21]\nSELECT (1) AS dashed-value, (2) AS undashedvalue, basic_article.id, basic_art...\n ^ ``` But really Cloud Spanner doesn't support dashed values as per ```SQL SELECT (1) AS dashed-value, blogpost.post_id FROM blogpost ```
[ { "content": "from datetime import datetime\n\nfrom django.conf import settings\nfrom django.db.backends.base.operations import BaseDatabaseOperations\nfrom django.utils import timezone\nfrom spanner.dbapi.parse_utils import TimestampStr\n\n\nclass DatabaseOperations(BaseDatabaseOperations):\n # Django's lookup names that require a different name in Spanner's\n # EXTRACT() function.\n # https://cloud.google.com/spanner/docs/functions-and-operators#extract\n extract_names = {\n 'week_day': 'dayofweek',\n 'iso_week': 'isoweek',\n 'iso_year': 'isoyear',\n }\n\n def quote_name(self, name):\n return name\n\n def bulk_insert_sql(self, fields, placeholder_rows):\n placeholder_rows_sql = (\", \".join(row) for row in placeholder_rows)\n values_sql = \", \".join(\"(%s)\" % sql for sql in placeholder_rows_sql)\n return \"VALUES \" + values_sql\n\n def sql_flush(self, style, tables, sequences, allow_cascade=False):\n # Cloud Spanner doesn't support TRUNCATE so DELETE instead.\n # A dummy WHERE clause is required.\n if tables:\n delete_sql = '%s %s %%s %s 1=1;' % (\n style.SQL_KEYWORD('DELETE'),\n style.SQL_KEYWORD('FROM'),\n style.SQL_KEYWORD('WHERE'),\n )\n return [\n delete_sql % style.SQL_FIELD(self.quote_name(table))\n for table in tables\n ]\n else:\n return []\n\n def adapt_datetimefield_value(self, value):\n if value is None:\n return None\n # Expression values are adapted by the database.\n if hasattr(value, 'resolve_expression'):\n return value\n # Cloud Spanner doesn't support tz-aware datetimes\n if timezone.is_aware(value):\n if settings.USE_TZ:\n value = timezone.make_naive(value, self.connection.timezone)\n else:\n raise ValueError(\"Cloud Spanner does not support timezone-aware datetimes when USE_TZ is False.\")\n return TimestampStr(value.isoformat(timespec='microseconds') + 'Z')\n\n def get_db_converters(self, expression):\n converters = super().get_db_converters(expression)\n internal_type = expression.output_field.get_internal_type()\n if internal_type == 'DateTimeField':\n converters.append(self.convert_datetimefield_value)\n return converters\n\n def convert_datetimefield_value(self, value, expression, connection):\n if value is None:\n return value\n # Cloud Spanner returns the\n # google.api_core.datetime_helpers.DatetimeWithNanoseconds subclass\n # of datetime with tzinfo=UTC (which should be replaced with the\n # connection's timezone). Django doesn't support nanoseconds so that\n # part is ignored.\n return datetime(\n value.year, value.month, value.day,\n value.hour, value.minute, value.second, value.microsecond,\n self.connection.timezone,\n )\n\n def date_extract_sql(self, lookup_type, field_name):\n lookup_type = self.extract_names.get(lookup_type, lookup_type)\n return 'EXTRACT(%s FROM %s)' % (lookup_type, field_name)\n\n def datetime_extract_sql(self, lookup_type, field_name, tzname):\n tzname = self.connection.timezone if settings.USE_TZ else 'UTC'\n lookup_type = self.extract_names.get(lookup_type, lookup_type)\n return 'EXTRACT(%s FROM %s AT TIME ZONE \"%s\")' % (lookup_type, field_name, tzname)\n", "path": "spanner/django/operations.py" } ]
[ { "content": "from datetime import datetime\n\nfrom django.conf import settings\nfrom django.db.backends.base.operations import BaseDatabaseOperations\nfrom django.utils import timezone\nfrom spanner.dbapi.parse_utils import TimestampStr\n\n\nclass DatabaseOperations(BaseDatabaseOperations):\n # Django's lookup names that require a different name in Spanner's\n # EXTRACT() function.\n # https://cloud.google.com/spanner/docs/functions-and-operators#extract\n extract_names = {\n 'week_day': 'dayofweek',\n 'iso_week': 'isoweek',\n 'iso_year': 'isoyear',\n }\n\n def quote_name(self, name):\n if '-' in name:\n return '`' + name + '`'\n return name\n\n def bulk_insert_sql(self, fields, placeholder_rows):\n placeholder_rows_sql = (\", \".join(row) for row in placeholder_rows)\n values_sql = \", \".join(\"(%s)\" % sql for sql in placeholder_rows_sql)\n return \"VALUES \" + values_sql\n\n def sql_flush(self, style, tables, sequences, allow_cascade=False):\n # Cloud Spanner doesn't support TRUNCATE so DELETE instead.\n # A dummy WHERE clause is required.\n if tables:\n delete_sql = '%s %s %%s %s 1=1;' % (\n style.SQL_KEYWORD('DELETE'),\n style.SQL_KEYWORD('FROM'),\n style.SQL_KEYWORD('WHERE'),\n )\n return [\n delete_sql % style.SQL_FIELD(self.quote_name(table))\n for table in tables\n ]\n else:\n return []\n\n def adapt_datetimefield_value(self, value):\n if value is None:\n return None\n # Expression values are adapted by the database.\n if hasattr(value, 'resolve_expression'):\n return value\n # Cloud Spanner doesn't support tz-aware datetimes\n if timezone.is_aware(value):\n if settings.USE_TZ:\n value = timezone.make_naive(value, self.connection.timezone)\n else:\n raise ValueError(\"Cloud Spanner does not support timezone-aware datetimes when USE_TZ is False.\")\n return TimestampStr(value.isoformat(timespec='microseconds') + 'Z')\n\n def get_db_converters(self, expression):\n converters = super().get_db_converters(expression)\n internal_type = expression.output_field.get_internal_type()\n if internal_type == 'DateTimeField':\n converters.append(self.convert_datetimefield_value)\n return converters\n\n def convert_datetimefield_value(self, value, expression, connection):\n if value is None:\n return value\n # Cloud Spanner returns the\n # google.api_core.datetime_helpers.DatetimeWithNanoseconds subclass\n # of datetime with tzinfo=UTC (which should be replaced with the\n # connection's timezone). Django doesn't support nanoseconds so that\n # part is ignored.\n return datetime(\n value.year, value.month, value.day,\n value.hour, value.minute, value.second, value.microsecond,\n self.connection.timezone,\n )\n\n def date_extract_sql(self, lookup_type, field_name):\n lookup_type = self.extract_names.get(lookup_type, lookup_type)\n return 'EXTRACT(%s FROM %s)' % (lookup_type, field_name)\n\n def datetime_extract_sql(self, lookup_type, field_name, tzname):\n tzname = self.connection.timezone if settings.USE_TZ else 'UTC'\n lookup_type = self.extract_names.get(lookup_type, lookup_type)\n return 'EXTRACT(%s FROM %s AT TIME ZONE \"%s\")' % (lookup_type, field_name, tzname)\n", "path": "spanner/django/operations.py" } ]
diff --git a/spanner/django/operations.py b/spanner/django/operations.py index afd8f1b89c..72edac74d9 100644 --- a/spanner/django/operations.py +++ b/spanner/django/operations.py @@ -17,6 +17,8 @@ class DatabaseOperations(BaseDatabaseOperations): } def quote_name(self, name): + if '-' in name: + return '`' + name + '`' return name def bulk_insert_sql(self, fields, placeholder_rows):
tobymao__sqlglot-2800
ParseError when using LIKE/ILIKE on an element in an object in Snowflake I'm getting `ParseError: Invalid expression / Unexpected token` when using `LIKE` or `ILIKE` on an element within an object in Snowflake. Example: ``` import sqlglot sqlglot.parse(""" select parse_json('{"x": "hello"}'):x like 'hello' """, read="snowflake") sqlglot.parse(""" select data:x like 'hello' from some_table """, read="snowflake") ``` Both of these cause the parsing error, but both are valid Snowflake statements.
[ { "content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens, transforms\nfrom sqlglot._typing import E\nfrom sqlglot.dialects.dialect import (\n Dialect,\n NormalizationStrategy,\n binary_from_function,\n date_delta_sql,\n date_trunc_to_time,\n datestrtodate_sql,\n format_time_lambda,\n if_sql,\n inline_array_sql,\n json_keyvalue_comma_sql,\n max_or_greatest,\n min_or_least,\n rename_func,\n timestamptrunc_sql,\n timestrtotime_sql,\n var_map_sql,\n)\nfrom sqlglot.expressions import Literal\nfrom sqlglot.helper import seq_get\nfrom sqlglot.tokens import TokenType\n\n\ndef _check_int(s: str) -> bool:\n if s[0] in (\"-\", \"+\"):\n return s[1:].isdigit()\n return s.isdigit()\n\n\n# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html\ndef _parse_to_timestamp(args: t.List) -> t.Union[exp.StrToTime, exp.UnixToTime, exp.TimeStrToTime]:\n if len(args) == 2:\n first_arg, second_arg = args\n if second_arg.is_string:\n # case: <string_expr> [ , <format> ]\n return format_time_lambda(exp.StrToTime, \"snowflake\")(args)\n\n # case: <numeric_expr> [ , <scale> ]\n if second_arg.name not in [\"0\", \"3\", \"9\"]:\n raise ValueError(\n f\"Scale for snowflake numeric timestamp is {second_arg}, but should be 0, 3, or 9\"\n )\n\n if second_arg.name == \"0\":\n timescale = exp.UnixToTime.SECONDS\n elif second_arg.name == \"3\":\n timescale = exp.UnixToTime.MILLIS\n elif second_arg.name == \"9\":\n timescale = exp.UnixToTime.NANOS\n\n return exp.UnixToTime(this=first_arg, scale=timescale)\n\n from sqlglot.optimizer.simplify import simplify_literals\n\n # The first argument might be an expression like 40 * 365 * 86400, so we try to\n # reduce it using `simplify_literals` first and then check if it's a Literal.\n first_arg = seq_get(args, 0)\n if not isinstance(simplify_literals(first_arg, root=True), Literal):\n # case: <variant_expr> or other expressions such as columns\n return exp.TimeStrToTime.from_arg_list(args)\n\n if first_arg.is_string:\n if _check_int(first_arg.this):\n # case: <integer>\n return exp.UnixToTime.from_arg_list(args)\n\n # case: <date_expr>\n return format_time_lambda(exp.StrToTime, \"snowflake\", default=True)(args)\n\n # case: <numeric_expr>\n return exp.UnixToTime.from_arg_list(args)\n\n\ndef _parse_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:\n expression = parser.parse_var_map(args)\n\n if isinstance(expression, exp.StarMap):\n return expression\n\n return exp.Struct(\n expressions=[\n t.cast(exp.Condition, k).eq(v) for k, v in zip(expression.keys, expression.values)\n ]\n )\n\n\ndef _parse_datediff(args: t.List) -> exp.DateDiff:\n return exp.DateDiff(\n this=seq_get(args, 2), expression=seq_get(args, 1), unit=_map_date_part(seq_get(args, 0))\n )\n\n\ndef _unix_to_time_sql(self: Snowflake.Generator, expression: exp.UnixToTime) -> str:\n scale = expression.args.get(\"scale\")\n timestamp = self.sql(expression, \"this\")\n if scale in (None, exp.UnixToTime.SECONDS):\n return f\"TO_TIMESTAMP({timestamp})\"\n if scale == exp.UnixToTime.MILLIS:\n return f\"TO_TIMESTAMP({timestamp}, 3)\"\n if scale == exp.UnixToTime.MICROS:\n return f\"TO_TIMESTAMP({timestamp} / 1000, 3)\"\n if scale == exp.UnixToTime.NANOS:\n return f\"TO_TIMESTAMP({timestamp}, 9)\"\n\n self.unsupported(f\"Unsupported scale for timestamp: {scale}.\")\n return \"\"\n\n\n# https://docs.snowflake.com/en/sql-reference/functions/date_part.html\n# https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts\ndef _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:\n this = self._parse_var() or self._parse_type()\n\n if not this:\n return None\n\n self._match(TokenType.COMMA)\n expression = self._parse_bitwise()\n this = _map_date_part(this)\n name = this.name.upper()\n\n if name.startswith(\"EPOCH\"):\n if name == \"EPOCH_MILLISECOND\":\n scale = 10**3\n elif name == \"EPOCH_MICROSECOND\":\n scale = 10**6\n elif name == \"EPOCH_NANOSECOND\":\n scale = 10**9\n else:\n scale = None\n\n ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build(\"TIMESTAMP\"))\n to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)\n\n if scale:\n to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))\n\n return to_unix\n\n return self.expression(exp.Extract, this=this, expression=expression)\n\n\n# https://docs.snowflake.com/en/sql-reference/functions/div0\ndef _div0_to_if(args: t.List) -> exp.If:\n cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0))\n true = exp.Literal.number(0)\n false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))\n return exp.If(this=cond, true=true, false=false)\n\n\n# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull\ndef _zeroifnull_to_if(args: t.List) -> exp.If:\n cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())\n return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))\n\n\n# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull\ndef _nullifzero_to_if(args: t.List) -> exp.If:\n cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))\n return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))\n\n\ndef _datatype_sql(self: Snowflake.Generator, expression: exp.DataType) -> str:\n if expression.is_type(\"array\"):\n return \"ARRAY\"\n elif expression.is_type(\"map\"):\n return \"OBJECT\"\n return self.datatype_sql(expression)\n\n\ndef _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:\n flag = expression.text(\"flag\")\n\n if \"i\" not in flag:\n flag += \"i\"\n\n return self.func(\n \"REGEXP_LIKE\", expression.this, expression.expression, exp.Literal.string(flag)\n )\n\n\ndef _parse_convert_timezone(args: t.List) -> t.Union[exp.Anonymous, exp.AtTimeZone]:\n if len(args) == 3:\n return exp.Anonymous(this=\"CONVERT_TIMEZONE\", expressions=args)\n return exp.AtTimeZone(this=seq_get(args, 1), zone=seq_get(args, 0))\n\n\ndef _parse_regexp_replace(args: t.List) -> exp.RegexpReplace:\n regexp_replace = exp.RegexpReplace.from_arg_list(args)\n\n if not regexp_replace.args.get(\"replacement\"):\n regexp_replace.set(\"replacement\", exp.Literal.string(\"\"))\n\n return regexp_replace\n\n\ndef _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:\n def _parse(self: Snowflake.Parser) -> exp.Show:\n return self._parse_show_snowflake(*args, **kwargs)\n\n return _parse\n\n\nDATE_PART_MAPPING = {\n \"Y\": \"YEAR\",\n \"YY\": \"YEAR\",\n \"YYY\": \"YEAR\",\n \"YYYY\": \"YEAR\",\n \"YR\": \"YEAR\",\n \"YEARS\": \"YEAR\",\n \"YRS\": \"YEAR\",\n \"MM\": \"MONTH\",\n \"MON\": \"MONTH\",\n \"MONS\": \"MONTH\",\n \"MONTHS\": \"MONTH\",\n \"D\": \"DAY\",\n \"DD\": \"DAY\",\n \"DAYS\": \"DAY\",\n \"DAYOFMONTH\": \"DAY\",\n \"WEEKDAY\": \"DAYOFWEEK\",\n \"DOW\": \"DAYOFWEEK\",\n \"DW\": \"DAYOFWEEK\",\n \"WEEKDAY_ISO\": \"DAYOFWEEKISO\",\n \"DOW_ISO\": \"DAYOFWEEKISO\",\n \"DW_ISO\": \"DAYOFWEEKISO\",\n \"YEARDAY\": \"DAYOFYEAR\",\n \"DOY\": \"DAYOFYEAR\",\n \"DY\": \"DAYOFYEAR\",\n \"W\": \"WEEK\",\n \"WK\": \"WEEK\",\n \"WEEKOFYEAR\": \"WEEK\",\n \"WOY\": \"WEEK\",\n \"WY\": \"WEEK\",\n \"WEEK_ISO\": \"WEEKISO\",\n \"WEEKOFYEARISO\": \"WEEKISO\",\n \"WEEKOFYEAR_ISO\": \"WEEKISO\",\n \"Q\": \"QUARTER\",\n \"QTR\": \"QUARTER\",\n \"QTRS\": \"QUARTER\",\n \"QUARTERS\": \"QUARTER\",\n \"H\": \"HOUR\",\n \"HH\": \"HOUR\",\n \"HR\": \"HOUR\",\n \"HOURS\": \"HOUR\",\n \"HRS\": \"HOUR\",\n \"M\": \"MINUTE\",\n \"MI\": \"MINUTE\",\n \"MIN\": \"MINUTE\",\n \"MINUTES\": \"MINUTE\",\n \"MINS\": \"MINUTE\",\n \"S\": \"SECOND\",\n \"SEC\": \"SECOND\",\n \"SECONDS\": \"SECOND\",\n \"SECS\": \"SECOND\",\n \"MS\": \"MILLISECOND\",\n \"MSEC\": \"MILLISECOND\",\n \"MILLISECONDS\": \"MILLISECOND\",\n \"US\": \"MICROSECOND\",\n \"USEC\": \"MICROSECOND\",\n \"MICROSECONDS\": \"MICROSECOND\",\n \"NS\": \"NANOSECOND\",\n \"NSEC\": \"NANOSECOND\",\n \"NANOSEC\": \"NANOSECOND\",\n \"NSECOND\": \"NANOSECOND\",\n \"NSECONDS\": \"NANOSECOND\",\n \"NANOSECS\": \"NANOSECOND\",\n \"NSECONDS\": \"NANOSECOND\",\n \"EPOCH\": \"EPOCH_SECOND\",\n \"EPOCH_SECONDS\": \"EPOCH_SECOND\",\n \"EPOCH_MILLISECONDS\": \"EPOCH_MILLISECOND\",\n \"EPOCH_MICROSECONDS\": \"EPOCH_MICROSECOND\",\n \"EPOCH_NANOSECONDS\": \"EPOCH_NANOSECOND\",\n \"TZH\": \"TIMEZONE_HOUR\",\n \"TZM\": \"TIMEZONE_MINUTE\",\n}\n\n\[email protected]\ndef _map_date_part(part: exp.Expression) -> exp.Var:\n pass\n\n\[email protected]\ndef _map_date_part(part: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:\n pass\n\n\ndef _map_date_part(part):\n mapped = DATE_PART_MAPPING.get(part.name.upper()) if part else None\n return exp.var(mapped) if mapped else part\n\n\ndef _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:\n trunc = date_trunc_to_time(args)\n trunc.set(\"unit\", _map_date_part(trunc.args[\"unit\"]))\n return trunc\n\n\ndef _parse_colon_get_path(\n self: parser.Parser, this: t.Optional[exp.Expression]\n) -> t.Optional[exp.Expression]:\n while True:\n path = self._parse_bitwise()\n\n # The cast :: operator has a lower precedence than the extraction operator :, so\n # we rearrange the AST appropriately to avoid casting the 2nd argument of GET_PATH\n if isinstance(path, exp.Cast):\n target_type = path.to\n path = path.this\n else:\n target_type = None\n\n if isinstance(path, exp.Expression):\n path = exp.Literal.string(path.sql(dialect=\"snowflake\"))\n\n # The extraction operator : is left-associative\n this = self.expression(exp.GetPath, this=this, expression=path)\n\n if target_type:\n this = exp.cast(this, target_type)\n\n if not self._match(TokenType.COLON):\n break\n\n return this\n\n\ndef _parse_timestamp_from_parts(args: t.List) -> exp.Func:\n if len(args) == 2:\n # Other dialects don't have the TIMESTAMP_FROM_PARTS(date, time) concept,\n # so we parse this into Anonymous for now instead of introducing complexity\n return exp.Anonymous(this=\"TIMESTAMP_FROM_PARTS\", expressions=args)\n\n return exp.TimestampFromParts.from_arg_list(args)\n\n\ndef _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:\n \"\"\"\n Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,\n so we need to unqualify them.\n\n Example:\n >>> from sqlglot import parse_one\n >>> expr = parse_one(\"SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))\")\n >>> print(_unqualify_unpivot_columns(expr).sql(dialect=\"snowflake\"))\n SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))\n \"\"\"\n if isinstance(expression, exp.Pivot) and expression.unpivot:\n expression = transforms.unqualify_columns(expression)\n\n return expression\n\n\nclass Snowflake(Dialect):\n # https://docs.snowflake.com/en/sql-reference/identifiers-syntax\n NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE\n NULL_ORDERING = \"nulls_are_large\"\n TIME_FORMAT = \"'YYYY-MM-DD HH24:MI:SS'\"\n SUPPORTS_USER_DEFINED_TYPES = False\n SUPPORTS_SEMI_ANTI_JOIN = False\n PREFER_CTE_ALIAS_COLUMN = True\n TABLESAMPLE_SIZE_IS_PERCENT = True\n\n TIME_MAPPING = {\n \"YYYY\": \"%Y\",\n \"yyyy\": \"%Y\",\n \"YY\": \"%y\",\n \"yy\": \"%y\",\n \"MMMM\": \"%B\",\n \"mmmm\": \"%B\",\n \"MON\": \"%b\",\n \"mon\": \"%b\",\n \"MM\": \"%m\",\n \"mm\": \"%m\",\n \"DD\": \"%d\",\n \"dd\": \"%-d\",\n \"DY\": \"%a\",\n \"dy\": \"%w\",\n \"HH24\": \"%H\",\n \"hh24\": \"%H\",\n \"HH12\": \"%I\",\n \"hh12\": \"%I\",\n \"MI\": \"%M\",\n \"mi\": \"%M\",\n \"SS\": \"%S\",\n \"ss\": \"%S\",\n \"FF\": \"%f\",\n \"ff\": \"%f\",\n \"FF6\": \"%f\",\n \"ff6\": \"%f\",\n }\n\n def quote_identifier(self, expression: E, identify: bool = True) -> E:\n # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an\n # unquoted DUAL keyword in a special way and does not map it to a user-defined table\n if (\n isinstance(expression, exp.Identifier)\n and isinstance(expression.parent, exp.Table)\n and expression.name.lower() == \"dual\"\n ):\n return t.cast(E, expression)\n\n return super().quote_identifier(expression, identify=identify)\n\n class Parser(parser.Parser):\n IDENTIFY_PIVOT_STRINGS = True\n\n TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}\n\n FUNCTIONS = {\n **parser.Parser.FUNCTIONS,\n \"ARRAYAGG\": exp.ArrayAgg.from_arg_list,\n \"ARRAY_CONSTRUCT\": exp.Array.from_arg_list,\n \"ARRAY_CONTAINS\": lambda args: exp.ArrayContains(\n this=seq_get(args, 1), expression=seq_get(args, 0)\n ),\n \"ARRAY_GENERATE_RANGE\": lambda args: exp.GenerateSeries(\n # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive\n start=seq_get(args, 0),\n end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),\n step=seq_get(args, 2),\n ),\n \"ARRAY_TO_STRING\": exp.ArrayJoin.from_arg_list,\n \"BITXOR\": binary_from_function(exp.BitwiseXor),\n \"BIT_XOR\": binary_from_function(exp.BitwiseXor),\n \"BOOLXOR\": binary_from_function(exp.Xor),\n \"CONVERT_TIMEZONE\": _parse_convert_timezone,\n \"DATE_TRUNC\": _date_trunc_to_time,\n \"DATEADD\": lambda args: exp.DateAdd(\n this=seq_get(args, 2),\n expression=seq_get(args, 1),\n unit=_map_date_part(seq_get(args, 0)),\n ),\n \"DATEDIFF\": _parse_datediff,\n \"DIV0\": _div0_to_if,\n \"FLATTEN\": exp.Explode.from_arg_list,\n \"IFF\": exp.If.from_arg_list,\n \"LAST_DAY\": lambda args: exp.LastDay(\n this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))\n ),\n \"LISTAGG\": exp.GroupConcat.from_arg_list,\n \"NULLIFZERO\": _nullifzero_to_if,\n \"OBJECT_CONSTRUCT\": _parse_object_construct,\n \"REGEXP_REPLACE\": _parse_regexp_replace,\n \"REGEXP_SUBSTR\": exp.RegexpExtract.from_arg_list,\n \"RLIKE\": exp.RegexpLike.from_arg_list,\n \"SQUARE\": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),\n \"TIMEDIFF\": _parse_datediff,\n \"TIMESTAMPDIFF\": _parse_datediff,\n \"TIMESTAMPFROMPARTS\": _parse_timestamp_from_parts,\n \"TIMESTAMP_FROM_PARTS\": _parse_timestamp_from_parts,\n \"TO_TIMESTAMP\": _parse_to_timestamp,\n \"TO_VARCHAR\": exp.ToChar.from_arg_list,\n \"ZEROIFNULL\": _zeroifnull_to_if,\n }\n\n FUNCTION_PARSERS = {\n **parser.Parser.FUNCTION_PARSERS,\n \"DATE_PART\": _parse_date_part,\n \"OBJECT_CONSTRUCT_KEEP_NULL\": lambda self: self._parse_json_object(),\n }\n FUNCTION_PARSERS.pop(\"TRIM\")\n\n TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}\n\n RANGE_PARSERS = {\n **parser.Parser.RANGE_PARSERS,\n TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),\n TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),\n TokenType.COLON: _parse_colon_get_path,\n }\n\n ALTER_PARSERS = {\n **parser.Parser.ALTER_PARSERS,\n \"SET\": lambda self: self._parse_set(tag=self._match_text_seq(\"TAG\")),\n \"UNSET\": lambda self: self.expression(\n exp.Set,\n tag=self._match_text_seq(\"TAG\"),\n expressions=self._parse_csv(self._parse_id_var),\n unset=True,\n ),\n \"SWAP\": lambda self: self._parse_alter_table_swap(),\n }\n\n STATEMENT_PARSERS = {\n **parser.Parser.STATEMENT_PARSERS,\n TokenType.SHOW: lambda self: self._parse_show(),\n }\n\n PROPERTY_PARSERS = {\n **parser.Parser.PROPERTY_PARSERS,\n \"LOCATION\": lambda self: self._parse_location(),\n }\n\n SHOW_PARSERS = {\n \"PRIMARY KEYS\": _show_parser(\"PRIMARY KEYS\"),\n \"TERSE PRIMARY KEYS\": _show_parser(\"PRIMARY KEYS\"),\n \"COLUMNS\": _show_parser(\"COLUMNS\"),\n }\n\n STAGED_FILE_SINGLE_TOKENS = {\n TokenType.DOT,\n TokenType.MOD,\n TokenType.SLASH,\n }\n\n FLATTEN_COLUMNS = [\"SEQ\", \"KEY\", \"PATH\", \"INDEX\", \"VALUE\", \"THIS\"]\n\n def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:\n if is_map:\n # Keys are strings in Snowflake's objects, see also:\n # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured\n # - https://docs.snowflake.com/en/sql-reference/functions/object_construct\n return self._parse_slice(self._parse_string())\n\n return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))\n\n def _parse_lateral(self) -> t.Optional[exp.Lateral]:\n lateral = super()._parse_lateral()\n if not lateral:\n return lateral\n\n if isinstance(lateral.this, exp.Explode):\n table_alias = lateral.args.get(\"alias\")\n columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]\n if table_alias and not table_alias.args.get(\"columns\"):\n table_alias.set(\"columns\", columns)\n elif not table_alias:\n exp.alias_(lateral, \"_flattened\", table=columns, copy=False)\n\n return lateral\n\n def _parse_at_before(self, table: exp.Table) -> exp.Table:\n # https://docs.snowflake.com/en/sql-reference/constructs/at-before\n index = self._index\n if self._match_texts((\"AT\", \"BEFORE\")):\n this = self._prev.text.upper()\n kind = (\n self._match(TokenType.L_PAREN)\n and self._match_texts(self.HISTORICAL_DATA_KIND)\n and self._prev.text.upper()\n )\n expression = self._match(TokenType.FARROW) and self._parse_bitwise()\n\n if expression:\n self._match_r_paren()\n when = self.expression(\n exp.HistoricalData, this=this, kind=kind, expression=expression\n )\n table.set(\"when\", when)\n else:\n self._retreat(index)\n\n return table\n\n def _parse_table_parts(self, schema: bool = False) -> exp.Table:\n # https://docs.snowflake.com/en/user-guide/querying-stage\n if self._match(TokenType.STRING, advance=False):\n table = self._parse_string()\n elif self._match_text_seq(\"@\", advance=False):\n table = self._parse_location_path()\n else:\n table = None\n\n if table:\n file_format = None\n pattern = None\n\n self._match(TokenType.L_PAREN)\n while self._curr and not self._match(TokenType.R_PAREN):\n if self._match_text_seq(\"FILE_FORMAT\", \"=>\"):\n file_format = self._parse_string() or super()._parse_table_parts()\n elif self._match_text_seq(\"PATTERN\", \"=>\"):\n pattern = self._parse_string()\n else:\n break\n\n self._match(TokenType.COMMA)\n\n table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)\n else:\n table = super()._parse_table_parts(schema=schema)\n\n return self._parse_at_before(table)\n\n def _parse_id_var(\n self,\n any_token: bool = True,\n tokens: t.Optional[t.Collection[TokenType]] = None,\n ) -> t.Optional[exp.Expression]:\n if self._match_text_seq(\"IDENTIFIER\", \"(\"):\n identifier = (\n super()._parse_id_var(any_token=any_token, tokens=tokens)\n or self._parse_string()\n )\n self._match_r_paren()\n return self.expression(exp.Anonymous, this=\"IDENTIFIER\", expressions=[identifier])\n\n return super()._parse_id_var(any_token=any_token, tokens=tokens)\n\n def _parse_show_snowflake(self, this: str) -> exp.Show:\n scope = None\n scope_kind = None\n\n like = self._parse_string() if self._match(TokenType.LIKE) else None\n\n if self._match(TokenType.IN):\n if self._match_text_seq(\"ACCOUNT\"):\n scope_kind = \"ACCOUNT\"\n elif self._match_set(self.DB_CREATABLES):\n scope_kind = self._prev.text\n if self._curr:\n scope = self._parse_table()\n elif self._curr:\n scope_kind = \"TABLE\"\n scope = self._parse_table()\n\n return self.expression(\n exp.Show, this=this, like=like, scope=scope, scope_kind=scope_kind\n )\n\n def _parse_alter_table_swap(self) -> exp.SwapTable:\n self._match_text_seq(\"WITH\")\n return self.expression(exp.SwapTable, this=self._parse_table(schema=True))\n\n def _parse_location(self) -> exp.LocationProperty:\n self._match(TokenType.EQ)\n return self.expression(exp.LocationProperty, this=self._parse_location_path())\n\n def _parse_location_path(self) -> exp.Var:\n parts = [self._advance_any(ignore_reserved=True)]\n\n # We avoid consuming a comma token because external tables like @foo and @bar\n # can be joined in a query with a comma separator.\n while self._is_connected() and not self._match(TokenType.COMMA, advance=False):\n parts.append(self._advance_any(ignore_reserved=True))\n\n return exp.var(\"\".join(part.text for part in parts if part))\n\n class Tokenizer(tokens.Tokenizer):\n STRING_ESCAPES = [\"\\\\\", \"'\"]\n HEX_STRINGS = [(\"x'\", \"'\"), (\"X'\", \"'\")]\n RAW_STRINGS = [\"$$\"]\n COMMENTS = [\"--\", \"//\", (\"/*\", \"*/\")]\n\n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n \"BYTEINT\": TokenType.INT,\n \"CHAR VARYING\": TokenType.VARCHAR,\n \"CHARACTER VARYING\": TokenType.VARCHAR,\n \"EXCLUDE\": TokenType.EXCEPT,\n \"ILIKE ANY\": TokenType.ILIKE_ANY,\n \"LIKE ANY\": TokenType.LIKE_ANY,\n \"MATCH_RECOGNIZE\": TokenType.MATCH_RECOGNIZE,\n \"MINUS\": TokenType.EXCEPT,\n \"NCHAR VARYING\": TokenType.VARCHAR,\n \"PUT\": TokenType.COMMAND,\n \"RENAME\": TokenType.REPLACE,\n \"SAMPLE\": TokenType.TABLE_SAMPLE,\n \"SQL_DOUBLE\": TokenType.DOUBLE,\n \"SQL_VARCHAR\": TokenType.VARCHAR,\n \"TIMESTAMP_LTZ\": TokenType.TIMESTAMPLTZ,\n \"TIMESTAMP_NTZ\": TokenType.TIMESTAMP,\n \"TIMESTAMP_TZ\": TokenType.TIMESTAMPTZ,\n \"TIMESTAMPNTZ\": TokenType.TIMESTAMP,\n \"TOP\": TokenType.TOP,\n }\n\n SINGLE_TOKENS = {\n **tokens.Tokenizer.SINGLE_TOKENS,\n \"$\": TokenType.PARAMETER,\n }\n\n VAR_SINGLE_TOKENS = {\"$\"}\n\n COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}\n\n class Generator(generator.Generator):\n PARAMETER_TOKEN = \"$\"\n MATCHED_BY_SOURCE = False\n SINGLE_STRING_INTERVAL = True\n JOIN_HINTS = False\n TABLE_HINTS = False\n QUERY_HINTS = False\n AGGREGATE_FILTER_SUPPORTED = False\n SUPPORTS_TABLE_COPY = False\n COLLATE_IS_FUNC = True\n LIMIT_ONLY_LITERALS = True\n\n TRANSFORMS = {\n **generator.Generator.TRANSFORMS,\n exp.ArgMax: rename_func(\"MAX_BY\"),\n exp.ArgMin: rename_func(\"MIN_BY\"),\n exp.Array: inline_array_sql,\n exp.ArrayConcat: rename_func(\"ARRAY_CAT\"),\n exp.ArrayContains: lambda self, e: self.func(\"ARRAY_CONTAINS\", e.expression, e.this),\n exp.ArrayJoin: rename_func(\"ARRAY_TO_STRING\"),\n exp.AtTimeZone: lambda self, e: self.func(\n \"CONVERT_TIMEZONE\", e.args.get(\"zone\"), e.this\n ),\n exp.BitwiseXor: rename_func(\"BITXOR\"),\n exp.DateAdd: date_delta_sql(\"DATEADD\"),\n exp.DateDiff: date_delta_sql(\"DATEDIFF\"),\n exp.DateStrToDate: datestrtodate_sql,\n exp.DataType: _datatype_sql,\n exp.DayOfMonth: rename_func(\"DAYOFMONTH\"),\n exp.DayOfWeek: rename_func(\"DAYOFWEEK\"),\n exp.DayOfYear: rename_func(\"DAYOFYEAR\"),\n exp.Explode: rename_func(\"FLATTEN\"),\n exp.Extract: rename_func(\"DATE_PART\"),\n exp.GenerateSeries: lambda self, e: self.func(\n \"ARRAY_GENERATE_RANGE\", e.args[\"start\"], e.args[\"end\"] + 1, e.args.get(\"step\")\n ),\n exp.GroupConcat: rename_func(\"LISTAGG\"),\n exp.If: if_sql(name=\"IFF\", false_value=\"NULL\"),\n exp.JSONExtract: lambda self, e: f\"{self.sql(e, 'this')}[{self.sql(e, 'expression')}]\",\n exp.JSONKeyValue: json_keyvalue_comma_sql,\n exp.JSONObject: lambda self, e: self.func(\"OBJECT_CONSTRUCT_KEEP_NULL\", *e.expressions),\n exp.LogicalAnd: rename_func(\"BOOLAND_AGG\"),\n exp.LogicalOr: rename_func(\"BOOLOR_AGG\"),\n exp.Map: lambda self, e: var_map_sql(self, e, \"OBJECT_CONSTRUCT\"),\n exp.Max: max_or_greatest,\n exp.Min: min_or_least,\n exp.PartitionedByProperty: lambda self, e: f\"PARTITION BY {self.sql(e, 'this')}\",\n exp.PercentileCont: transforms.preprocess(\n [transforms.add_within_group_for_percentiles]\n ),\n exp.PercentileDisc: transforms.preprocess(\n [transforms.add_within_group_for_percentiles]\n ),\n exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),\n exp.RegexpILike: _regexpilike_sql,\n exp.Rand: rename_func(\"RANDOM\"),\n exp.Select: transforms.preprocess(\n [\n transforms.eliminate_distinct_on,\n transforms.explode_to_unnest(),\n transforms.eliminate_semi_and_anti_joins,\n ]\n ),\n exp.SHA: rename_func(\"SHA1\"),\n exp.StarMap: rename_func(\"OBJECT_CONSTRUCT\"),\n exp.StartsWith: rename_func(\"STARTSWITH\"),\n exp.StrPosition: lambda self, e: self.func(\n \"POSITION\", e.args.get(\"substr\"), e.this, e.args.get(\"position\")\n ),\n exp.StrToTime: lambda self, e: f\"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.Struct: lambda self, e: self.func(\n \"OBJECT_CONSTRUCT\",\n *(arg for expression in e.expressions for arg in expression.flatten()),\n ),\n exp.Stuff: rename_func(\"INSERT\"),\n exp.TimestampDiff: lambda self, e: self.func(\n \"TIMESTAMPDIFF\", e.unit, e.expression, e.this\n ),\n exp.TimestampTrunc: timestamptrunc_sql,\n exp.TimeStrToTime: timestrtotime_sql,\n exp.TimeToStr: lambda self, e: self.func(\n \"TO_CHAR\", exp.cast(e.this, \"timestamp\"), self.format_time(e)\n ),\n exp.TimeToUnix: lambda self, e: f\"EXTRACT(epoch_second FROM {self.sql(e, 'this')})\",\n exp.ToArray: rename_func(\"TO_ARRAY\"),\n exp.ToChar: lambda self, e: self.function_fallback_sql(e),\n exp.Trim: lambda self, e: self.func(\"TRIM\", e.this, e.expression),\n exp.TsOrDsAdd: date_delta_sql(\"DATEADD\", cast=True),\n exp.TsOrDsDiff: date_delta_sql(\"DATEDIFF\"),\n exp.UnixToTime: _unix_to_time_sql,\n exp.VarMap: lambda self, e: var_map_sql(self, e, \"OBJECT_CONSTRUCT\"),\n exp.WeekOfYear: rename_func(\"WEEKOFYEAR\"),\n exp.Xor: rename_func(\"BOOLXOR\"),\n }\n\n TYPE_MAPPING = {\n **generator.Generator.TYPE_MAPPING,\n exp.DataType.Type.TIMESTAMP: \"TIMESTAMPNTZ\",\n }\n\n STAR_MAPPING = {\n \"except\": \"EXCLUDE\",\n \"replace\": \"RENAME\",\n }\n\n PROPERTIES_LOCATION = {\n **generator.Generator.PROPERTIES_LOCATION,\n exp.SetProperty: exp.Properties.Location.UNSUPPORTED,\n exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,\n }\n\n def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:\n milli = expression.args.get(\"milli\")\n if milli is not None:\n milli_to_nano = milli.pop() * exp.Literal.number(1000000)\n expression.set(\"nano\", milli_to_nano)\n\n return rename_func(\"TIMESTAMP_FROM_PARTS\")(self, expression)\n\n def trycast_sql(self, expression: exp.TryCast) -> str:\n value = expression.this\n\n if value.type is None:\n from sqlglot.optimizer.annotate_types import annotate_types\n\n value = annotate_types(value)\n\n if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):\n return super().trycast_sql(expression)\n\n # TRY_CAST only works for string values in Snowflake\n return self.cast_sql(expression)\n\n def log_sql(self, expression: exp.Log) -> str:\n if not expression.expression:\n return self.func(\"LN\", expression.this)\n\n return super().log_sql(expression)\n\n def unnest_sql(self, expression: exp.Unnest) -> str:\n unnest_alias = expression.args.get(\"alias\")\n offset = expression.args.get(\"offset\")\n\n columns = [\n exp.to_identifier(\"seq\"),\n exp.to_identifier(\"key\"),\n exp.to_identifier(\"path\"),\n offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier(\"index\"),\n seq_get(unnest_alias.columns if unnest_alias else [], 0)\n or exp.to_identifier(\"value\"),\n exp.to_identifier(\"this\"),\n ]\n\n if unnest_alias:\n unnest_alias.set(\"columns\", columns)\n else:\n unnest_alias = exp.TableAlias(this=\"_u\", columns=columns)\n\n explode = f\"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))\"\n alias = self.sql(unnest_alias)\n alias = f\" AS {alias}\" if alias else \"\"\n return f\"{explode}{alias}\"\n\n def show_sql(self, expression: exp.Show) -> str:\n like = self.sql(expression, \"like\")\n like = f\" LIKE {like}\" if like else \"\"\n\n scope = self.sql(expression, \"scope\")\n scope = f\" {scope}\" if scope else \"\"\n\n scope_kind = self.sql(expression, \"scope_kind\")\n if scope_kind:\n scope_kind = f\" IN {scope_kind}\"\n\n return f\"SHOW {expression.name}{like}{scope_kind}{scope}\"\n\n def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:\n # Other dialects don't support all of the following parameters, so we need to\n # generate default values as necessary to ensure the transpilation is correct\n group = expression.args.get(\"group\")\n parameters = expression.args.get(\"parameters\") or (group and exp.Literal.string(\"c\"))\n occurrence = expression.args.get(\"occurrence\") or (parameters and exp.Literal.number(1))\n position = expression.args.get(\"position\") or (occurrence and exp.Literal.number(1))\n\n return self.func(\n \"REGEXP_SUBSTR\",\n expression.this,\n expression.expression,\n position,\n occurrence,\n parameters,\n group,\n )\n\n def except_op(self, expression: exp.Except) -> str:\n if not expression.args.get(\"distinct\", False):\n self.unsupported(\"EXCEPT with All is not supported in Snowflake\")\n return super().except_op(expression)\n\n def intersect_op(self, expression: exp.Intersect) -> str:\n if not expression.args.get(\"distinct\", False):\n self.unsupported(\"INTERSECT with All is not supported in Snowflake\")\n return super().intersect_op(expression)\n\n def describe_sql(self, expression: exp.Describe) -> str:\n # Default to table if kind is unknown\n kind_value = expression.args.get(\"kind\") or \"TABLE\"\n kind = f\" {kind_value}\" if kind_value else \"\"\n this = f\" {self.sql(expression, 'this')}\"\n expressions = self.expressions(expression, flat=True)\n expressions = f\" {expressions}\" if expressions else \"\"\n return f\"DESCRIBE{kind}{this}{expressions}\"\n\n def generatedasidentitycolumnconstraint_sql(\n self, expression: exp.GeneratedAsIdentityColumnConstraint\n ) -> str:\n start = expression.args.get(\"start\")\n start = f\" START {start}\" if start else \"\"\n increment = expression.args.get(\"increment\")\n increment = f\" INCREMENT {increment}\" if increment else \"\"\n return f\"AUTOINCREMENT{start}{increment}\"\n\n def swaptable_sql(self, expression: exp.SwapTable) -> str:\n this = self.sql(expression, \"this\")\n return f\"SWAP WITH {this}\"\n\n def with_properties(self, properties: exp.Properties) -> str:\n return self.properties(properties, wrapped=False, prefix=self.seg(\"\"), sep=\" \")\n", "path": "sqlglot/dialects/snowflake.py" } ]
[ { "content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens, transforms\nfrom sqlglot._typing import E\nfrom sqlglot.dialects.dialect import (\n Dialect,\n NormalizationStrategy,\n binary_from_function,\n date_delta_sql,\n date_trunc_to_time,\n datestrtodate_sql,\n format_time_lambda,\n if_sql,\n inline_array_sql,\n json_keyvalue_comma_sql,\n max_or_greatest,\n min_or_least,\n rename_func,\n timestamptrunc_sql,\n timestrtotime_sql,\n var_map_sql,\n)\nfrom sqlglot.expressions import Literal\nfrom sqlglot.helper import seq_get\nfrom sqlglot.tokens import TokenType\n\n\ndef _check_int(s: str) -> bool:\n if s[0] in (\"-\", \"+\"):\n return s[1:].isdigit()\n return s.isdigit()\n\n\n# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html\ndef _parse_to_timestamp(args: t.List) -> t.Union[exp.StrToTime, exp.UnixToTime, exp.TimeStrToTime]:\n if len(args) == 2:\n first_arg, second_arg = args\n if second_arg.is_string:\n # case: <string_expr> [ , <format> ]\n return format_time_lambda(exp.StrToTime, \"snowflake\")(args)\n\n # case: <numeric_expr> [ , <scale> ]\n if second_arg.name not in [\"0\", \"3\", \"9\"]:\n raise ValueError(\n f\"Scale for snowflake numeric timestamp is {second_arg}, but should be 0, 3, or 9\"\n )\n\n if second_arg.name == \"0\":\n timescale = exp.UnixToTime.SECONDS\n elif second_arg.name == \"3\":\n timescale = exp.UnixToTime.MILLIS\n elif second_arg.name == \"9\":\n timescale = exp.UnixToTime.NANOS\n\n return exp.UnixToTime(this=first_arg, scale=timescale)\n\n from sqlglot.optimizer.simplify import simplify_literals\n\n # The first argument might be an expression like 40 * 365 * 86400, so we try to\n # reduce it using `simplify_literals` first and then check if it's a Literal.\n first_arg = seq_get(args, 0)\n if not isinstance(simplify_literals(first_arg, root=True), Literal):\n # case: <variant_expr> or other expressions such as columns\n return exp.TimeStrToTime.from_arg_list(args)\n\n if first_arg.is_string:\n if _check_int(first_arg.this):\n # case: <integer>\n return exp.UnixToTime.from_arg_list(args)\n\n # case: <date_expr>\n return format_time_lambda(exp.StrToTime, \"snowflake\", default=True)(args)\n\n # case: <numeric_expr>\n return exp.UnixToTime.from_arg_list(args)\n\n\ndef _parse_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:\n expression = parser.parse_var_map(args)\n\n if isinstance(expression, exp.StarMap):\n return expression\n\n return exp.Struct(\n expressions=[\n t.cast(exp.Condition, k).eq(v) for k, v in zip(expression.keys, expression.values)\n ]\n )\n\n\ndef _parse_datediff(args: t.List) -> exp.DateDiff:\n return exp.DateDiff(\n this=seq_get(args, 2), expression=seq_get(args, 1), unit=_map_date_part(seq_get(args, 0))\n )\n\n\ndef _unix_to_time_sql(self: Snowflake.Generator, expression: exp.UnixToTime) -> str:\n scale = expression.args.get(\"scale\")\n timestamp = self.sql(expression, \"this\")\n if scale in (None, exp.UnixToTime.SECONDS):\n return f\"TO_TIMESTAMP({timestamp})\"\n if scale == exp.UnixToTime.MILLIS:\n return f\"TO_TIMESTAMP({timestamp}, 3)\"\n if scale == exp.UnixToTime.MICROS:\n return f\"TO_TIMESTAMP({timestamp} / 1000, 3)\"\n if scale == exp.UnixToTime.NANOS:\n return f\"TO_TIMESTAMP({timestamp}, 9)\"\n\n self.unsupported(f\"Unsupported scale for timestamp: {scale}.\")\n return \"\"\n\n\n# https://docs.snowflake.com/en/sql-reference/functions/date_part.html\n# https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts\ndef _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:\n this = self._parse_var() or self._parse_type()\n\n if not this:\n return None\n\n self._match(TokenType.COMMA)\n expression = self._parse_bitwise()\n this = _map_date_part(this)\n name = this.name.upper()\n\n if name.startswith(\"EPOCH\"):\n if name == \"EPOCH_MILLISECOND\":\n scale = 10**3\n elif name == \"EPOCH_MICROSECOND\":\n scale = 10**6\n elif name == \"EPOCH_NANOSECOND\":\n scale = 10**9\n else:\n scale = None\n\n ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build(\"TIMESTAMP\"))\n to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)\n\n if scale:\n to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))\n\n return to_unix\n\n return self.expression(exp.Extract, this=this, expression=expression)\n\n\n# https://docs.snowflake.com/en/sql-reference/functions/div0\ndef _div0_to_if(args: t.List) -> exp.If:\n cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0))\n true = exp.Literal.number(0)\n false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))\n return exp.If(this=cond, true=true, false=false)\n\n\n# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull\ndef _zeroifnull_to_if(args: t.List) -> exp.If:\n cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())\n return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))\n\n\n# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull\ndef _nullifzero_to_if(args: t.List) -> exp.If:\n cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))\n return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))\n\n\ndef _datatype_sql(self: Snowflake.Generator, expression: exp.DataType) -> str:\n if expression.is_type(\"array\"):\n return \"ARRAY\"\n elif expression.is_type(\"map\"):\n return \"OBJECT\"\n return self.datatype_sql(expression)\n\n\ndef _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:\n flag = expression.text(\"flag\")\n\n if \"i\" not in flag:\n flag += \"i\"\n\n return self.func(\n \"REGEXP_LIKE\", expression.this, expression.expression, exp.Literal.string(flag)\n )\n\n\ndef _parse_convert_timezone(args: t.List) -> t.Union[exp.Anonymous, exp.AtTimeZone]:\n if len(args) == 3:\n return exp.Anonymous(this=\"CONVERT_TIMEZONE\", expressions=args)\n return exp.AtTimeZone(this=seq_get(args, 1), zone=seq_get(args, 0))\n\n\ndef _parse_regexp_replace(args: t.List) -> exp.RegexpReplace:\n regexp_replace = exp.RegexpReplace.from_arg_list(args)\n\n if not regexp_replace.args.get(\"replacement\"):\n regexp_replace.set(\"replacement\", exp.Literal.string(\"\"))\n\n return regexp_replace\n\n\ndef _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:\n def _parse(self: Snowflake.Parser) -> exp.Show:\n return self._parse_show_snowflake(*args, **kwargs)\n\n return _parse\n\n\nDATE_PART_MAPPING = {\n \"Y\": \"YEAR\",\n \"YY\": \"YEAR\",\n \"YYY\": \"YEAR\",\n \"YYYY\": \"YEAR\",\n \"YR\": \"YEAR\",\n \"YEARS\": \"YEAR\",\n \"YRS\": \"YEAR\",\n \"MM\": \"MONTH\",\n \"MON\": \"MONTH\",\n \"MONS\": \"MONTH\",\n \"MONTHS\": \"MONTH\",\n \"D\": \"DAY\",\n \"DD\": \"DAY\",\n \"DAYS\": \"DAY\",\n \"DAYOFMONTH\": \"DAY\",\n \"WEEKDAY\": \"DAYOFWEEK\",\n \"DOW\": \"DAYOFWEEK\",\n \"DW\": \"DAYOFWEEK\",\n \"WEEKDAY_ISO\": \"DAYOFWEEKISO\",\n \"DOW_ISO\": \"DAYOFWEEKISO\",\n \"DW_ISO\": \"DAYOFWEEKISO\",\n \"YEARDAY\": \"DAYOFYEAR\",\n \"DOY\": \"DAYOFYEAR\",\n \"DY\": \"DAYOFYEAR\",\n \"W\": \"WEEK\",\n \"WK\": \"WEEK\",\n \"WEEKOFYEAR\": \"WEEK\",\n \"WOY\": \"WEEK\",\n \"WY\": \"WEEK\",\n \"WEEK_ISO\": \"WEEKISO\",\n \"WEEKOFYEARISO\": \"WEEKISO\",\n \"WEEKOFYEAR_ISO\": \"WEEKISO\",\n \"Q\": \"QUARTER\",\n \"QTR\": \"QUARTER\",\n \"QTRS\": \"QUARTER\",\n \"QUARTERS\": \"QUARTER\",\n \"H\": \"HOUR\",\n \"HH\": \"HOUR\",\n \"HR\": \"HOUR\",\n \"HOURS\": \"HOUR\",\n \"HRS\": \"HOUR\",\n \"M\": \"MINUTE\",\n \"MI\": \"MINUTE\",\n \"MIN\": \"MINUTE\",\n \"MINUTES\": \"MINUTE\",\n \"MINS\": \"MINUTE\",\n \"S\": \"SECOND\",\n \"SEC\": \"SECOND\",\n \"SECONDS\": \"SECOND\",\n \"SECS\": \"SECOND\",\n \"MS\": \"MILLISECOND\",\n \"MSEC\": \"MILLISECOND\",\n \"MILLISECONDS\": \"MILLISECOND\",\n \"US\": \"MICROSECOND\",\n \"USEC\": \"MICROSECOND\",\n \"MICROSECONDS\": \"MICROSECOND\",\n \"NS\": \"NANOSECOND\",\n \"NSEC\": \"NANOSECOND\",\n \"NANOSEC\": \"NANOSECOND\",\n \"NSECOND\": \"NANOSECOND\",\n \"NSECONDS\": \"NANOSECOND\",\n \"NANOSECS\": \"NANOSECOND\",\n \"NSECONDS\": \"NANOSECOND\",\n \"EPOCH\": \"EPOCH_SECOND\",\n \"EPOCH_SECONDS\": \"EPOCH_SECOND\",\n \"EPOCH_MILLISECONDS\": \"EPOCH_MILLISECOND\",\n \"EPOCH_MICROSECONDS\": \"EPOCH_MICROSECOND\",\n \"EPOCH_NANOSECONDS\": \"EPOCH_NANOSECOND\",\n \"TZH\": \"TIMEZONE_HOUR\",\n \"TZM\": \"TIMEZONE_MINUTE\",\n}\n\n\[email protected]\ndef _map_date_part(part: exp.Expression) -> exp.Var:\n pass\n\n\[email protected]\ndef _map_date_part(part: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:\n pass\n\n\ndef _map_date_part(part):\n mapped = DATE_PART_MAPPING.get(part.name.upper()) if part else None\n return exp.var(mapped) if mapped else part\n\n\ndef _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:\n trunc = date_trunc_to_time(args)\n trunc.set(\"unit\", _map_date_part(trunc.args[\"unit\"]))\n return trunc\n\n\ndef _parse_colon_get_path(\n self: parser.Parser, this: t.Optional[exp.Expression]\n) -> t.Optional[exp.Expression]:\n while True:\n path = self._parse_bitwise()\n\n # The cast :: operator has a lower precedence than the extraction operator :, so\n # we rearrange the AST appropriately to avoid casting the 2nd argument of GET_PATH\n if isinstance(path, exp.Cast):\n target_type = path.to\n path = path.this\n else:\n target_type = None\n\n if isinstance(path, exp.Expression):\n path = exp.Literal.string(path.sql(dialect=\"snowflake\"))\n\n # The extraction operator : is left-associative\n this = self.expression(exp.GetPath, this=this, expression=path)\n\n if target_type:\n this = exp.cast(this, target_type)\n\n if not self._match(TokenType.COLON):\n break\n\n if self._match_set(self.RANGE_PARSERS):\n this = self.RANGE_PARSERS[self._prev.token_type](self, this) or this\n\n return this\n\n\ndef _parse_timestamp_from_parts(args: t.List) -> exp.Func:\n if len(args) == 2:\n # Other dialects don't have the TIMESTAMP_FROM_PARTS(date, time) concept,\n # so we parse this into Anonymous for now instead of introducing complexity\n return exp.Anonymous(this=\"TIMESTAMP_FROM_PARTS\", expressions=args)\n\n return exp.TimestampFromParts.from_arg_list(args)\n\n\ndef _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:\n \"\"\"\n Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,\n so we need to unqualify them.\n\n Example:\n >>> from sqlglot import parse_one\n >>> expr = parse_one(\"SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))\")\n >>> print(_unqualify_unpivot_columns(expr).sql(dialect=\"snowflake\"))\n SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))\n \"\"\"\n if isinstance(expression, exp.Pivot) and expression.unpivot:\n expression = transforms.unqualify_columns(expression)\n\n return expression\n\n\nclass Snowflake(Dialect):\n # https://docs.snowflake.com/en/sql-reference/identifiers-syntax\n NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE\n NULL_ORDERING = \"nulls_are_large\"\n TIME_FORMAT = \"'YYYY-MM-DD HH24:MI:SS'\"\n SUPPORTS_USER_DEFINED_TYPES = False\n SUPPORTS_SEMI_ANTI_JOIN = False\n PREFER_CTE_ALIAS_COLUMN = True\n TABLESAMPLE_SIZE_IS_PERCENT = True\n\n TIME_MAPPING = {\n \"YYYY\": \"%Y\",\n \"yyyy\": \"%Y\",\n \"YY\": \"%y\",\n \"yy\": \"%y\",\n \"MMMM\": \"%B\",\n \"mmmm\": \"%B\",\n \"MON\": \"%b\",\n \"mon\": \"%b\",\n \"MM\": \"%m\",\n \"mm\": \"%m\",\n \"DD\": \"%d\",\n \"dd\": \"%-d\",\n \"DY\": \"%a\",\n \"dy\": \"%w\",\n \"HH24\": \"%H\",\n \"hh24\": \"%H\",\n \"HH12\": \"%I\",\n \"hh12\": \"%I\",\n \"MI\": \"%M\",\n \"mi\": \"%M\",\n \"SS\": \"%S\",\n \"ss\": \"%S\",\n \"FF\": \"%f\",\n \"ff\": \"%f\",\n \"FF6\": \"%f\",\n \"ff6\": \"%f\",\n }\n\n def quote_identifier(self, expression: E, identify: bool = True) -> E:\n # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an\n # unquoted DUAL keyword in a special way and does not map it to a user-defined table\n if (\n isinstance(expression, exp.Identifier)\n and isinstance(expression.parent, exp.Table)\n and expression.name.lower() == \"dual\"\n ):\n return t.cast(E, expression)\n\n return super().quote_identifier(expression, identify=identify)\n\n class Parser(parser.Parser):\n IDENTIFY_PIVOT_STRINGS = True\n\n TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}\n\n FUNCTIONS = {\n **parser.Parser.FUNCTIONS,\n \"ARRAYAGG\": exp.ArrayAgg.from_arg_list,\n \"ARRAY_CONSTRUCT\": exp.Array.from_arg_list,\n \"ARRAY_CONTAINS\": lambda args: exp.ArrayContains(\n this=seq_get(args, 1), expression=seq_get(args, 0)\n ),\n \"ARRAY_GENERATE_RANGE\": lambda args: exp.GenerateSeries(\n # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive\n start=seq_get(args, 0),\n end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),\n step=seq_get(args, 2),\n ),\n \"ARRAY_TO_STRING\": exp.ArrayJoin.from_arg_list,\n \"BITXOR\": binary_from_function(exp.BitwiseXor),\n \"BIT_XOR\": binary_from_function(exp.BitwiseXor),\n \"BOOLXOR\": binary_from_function(exp.Xor),\n \"CONVERT_TIMEZONE\": _parse_convert_timezone,\n \"DATE_TRUNC\": _date_trunc_to_time,\n \"DATEADD\": lambda args: exp.DateAdd(\n this=seq_get(args, 2),\n expression=seq_get(args, 1),\n unit=_map_date_part(seq_get(args, 0)),\n ),\n \"DATEDIFF\": _parse_datediff,\n \"DIV0\": _div0_to_if,\n \"FLATTEN\": exp.Explode.from_arg_list,\n \"IFF\": exp.If.from_arg_list,\n \"LAST_DAY\": lambda args: exp.LastDay(\n this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))\n ),\n \"LISTAGG\": exp.GroupConcat.from_arg_list,\n \"NULLIFZERO\": _nullifzero_to_if,\n \"OBJECT_CONSTRUCT\": _parse_object_construct,\n \"REGEXP_REPLACE\": _parse_regexp_replace,\n \"REGEXP_SUBSTR\": exp.RegexpExtract.from_arg_list,\n \"RLIKE\": exp.RegexpLike.from_arg_list,\n \"SQUARE\": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),\n \"TIMEDIFF\": _parse_datediff,\n \"TIMESTAMPDIFF\": _parse_datediff,\n \"TIMESTAMPFROMPARTS\": _parse_timestamp_from_parts,\n \"TIMESTAMP_FROM_PARTS\": _parse_timestamp_from_parts,\n \"TO_TIMESTAMP\": _parse_to_timestamp,\n \"TO_VARCHAR\": exp.ToChar.from_arg_list,\n \"ZEROIFNULL\": _zeroifnull_to_if,\n }\n\n FUNCTION_PARSERS = {\n **parser.Parser.FUNCTION_PARSERS,\n \"DATE_PART\": _parse_date_part,\n \"OBJECT_CONSTRUCT_KEEP_NULL\": lambda self: self._parse_json_object(),\n }\n FUNCTION_PARSERS.pop(\"TRIM\")\n\n TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}\n\n RANGE_PARSERS = {\n **parser.Parser.RANGE_PARSERS,\n TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),\n TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),\n TokenType.COLON: _parse_colon_get_path,\n }\n\n ALTER_PARSERS = {\n **parser.Parser.ALTER_PARSERS,\n \"SET\": lambda self: self._parse_set(tag=self._match_text_seq(\"TAG\")),\n \"UNSET\": lambda self: self.expression(\n exp.Set,\n tag=self._match_text_seq(\"TAG\"),\n expressions=self._parse_csv(self._parse_id_var),\n unset=True,\n ),\n \"SWAP\": lambda self: self._parse_alter_table_swap(),\n }\n\n STATEMENT_PARSERS = {\n **parser.Parser.STATEMENT_PARSERS,\n TokenType.SHOW: lambda self: self._parse_show(),\n }\n\n PROPERTY_PARSERS = {\n **parser.Parser.PROPERTY_PARSERS,\n \"LOCATION\": lambda self: self._parse_location(),\n }\n\n SHOW_PARSERS = {\n \"PRIMARY KEYS\": _show_parser(\"PRIMARY KEYS\"),\n \"TERSE PRIMARY KEYS\": _show_parser(\"PRIMARY KEYS\"),\n \"COLUMNS\": _show_parser(\"COLUMNS\"),\n }\n\n STAGED_FILE_SINGLE_TOKENS = {\n TokenType.DOT,\n TokenType.MOD,\n TokenType.SLASH,\n }\n\n FLATTEN_COLUMNS = [\"SEQ\", \"KEY\", \"PATH\", \"INDEX\", \"VALUE\", \"THIS\"]\n\n def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:\n if is_map:\n # Keys are strings in Snowflake's objects, see also:\n # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured\n # - https://docs.snowflake.com/en/sql-reference/functions/object_construct\n return self._parse_slice(self._parse_string())\n\n return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))\n\n def _parse_lateral(self) -> t.Optional[exp.Lateral]:\n lateral = super()._parse_lateral()\n if not lateral:\n return lateral\n\n if isinstance(lateral.this, exp.Explode):\n table_alias = lateral.args.get(\"alias\")\n columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]\n if table_alias and not table_alias.args.get(\"columns\"):\n table_alias.set(\"columns\", columns)\n elif not table_alias:\n exp.alias_(lateral, \"_flattened\", table=columns, copy=False)\n\n return lateral\n\n def _parse_at_before(self, table: exp.Table) -> exp.Table:\n # https://docs.snowflake.com/en/sql-reference/constructs/at-before\n index = self._index\n if self._match_texts((\"AT\", \"BEFORE\")):\n this = self._prev.text.upper()\n kind = (\n self._match(TokenType.L_PAREN)\n and self._match_texts(self.HISTORICAL_DATA_KIND)\n and self._prev.text.upper()\n )\n expression = self._match(TokenType.FARROW) and self._parse_bitwise()\n\n if expression:\n self._match_r_paren()\n when = self.expression(\n exp.HistoricalData, this=this, kind=kind, expression=expression\n )\n table.set(\"when\", when)\n else:\n self._retreat(index)\n\n return table\n\n def _parse_table_parts(self, schema: bool = False) -> exp.Table:\n # https://docs.snowflake.com/en/user-guide/querying-stage\n if self._match(TokenType.STRING, advance=False):\n table = self._parse_string()\n elif self._match_text_seq(\"@\", advance=False):\n table = self._parse_location_path()\n else:\n table = None\n\n if table:\n file_format = None\n pattern = None\n\n self._match(TokenType.L_PAREN)\n while self._curr and not self._match(TokenType.R_PAREN):\n if self._match_text_seq(\"FILE_FORMAT\", \"=>\"):\n file_format = self._parse_string() or super()._parse_table_parts()\n elif self._match_text_seq(\"PATTERN\", \"=>\"):\n pattern = self._parse_string()\n else:\n break\n\n self._match(TokenType.COMMA)\n\n table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)\n else:\n table = super()._parse_table_parts(schema=schema)\n\n return self._parse_at_before(table)\n\n def _parse_id_var(\n self,\n any_token: bool = True,\n tokens: t.Optional[t.Collection[TokenType]] = None,\n ) -> t.Optional[exp.Expression]:\n if self._match_text_seq(\"IDENTIFIER\", \"(\"):\n identifier = (\n super()._parse_id_var(any_token=any_token, tokens=tokens)\n or self._parse_string()\n )\n self._match_r_paren()\n return self.expression(exp.Anonymous, this=\"IDENTIFIER\", expressions=[identifier])\n\n return super()._parse_id_var(any_token=any_token, tokens=tokens)\n\n def _parse_show_snowflake(self, this: str) -> exp.Show:\n scope = None\n scope_kind = None\n\n like = self._parse_string() if self._match(TokenType.LIKE) else None\n\n if self._match(TokenType.IN):\n if self._match_text_seq(\"ACCOUNT\"):\n scope_kind = \"ACCOUNT\"\n elif self._match_set(self.DB_CREATABLES):\n scope_kind = self._prev.text\n if self._curr:\n scope = self._parse_table()\n elif self._curr:\n scope_kind = \"TABLE\"\n scope = self._parse_table()\n\n return self.expression(\n exp.Show, this=this, like=like, scope=scope, scope_kind=scope_kind\n )\n\n def _parse_alter_table_swap(self) -> exp.SwapTable:\n self._match_text_seq(\"WITH\")\n return self.expression(exp.SwapTable, this=self._parse_table(schema=True))\n\n def _parse_location(self) -> exp.LocationProperty:\n self._match(TokenType.EQ)\n return self.expression(exp.LocationProperty, this=self._parse_location_path())\n\n def _parse_location_path(self) -> exp.Var:\n parts = [self._advance_any(ignore_reserved=True)]\n\n # We avoid consuming a comma token because external tables like @foo and @bar\n # can be joined in a query with a comma separator.\n while self._is_connected() and not self._match(TokenType.COMMA, advance=False):\n parts.append(self._advance_any(ignore_reserved=True))\n\n return exp.var(\"\".join(part.text for part in parts if part))\n\n class Tokenizer(tokens.Tokenizer):\n STRING_ESCAPES = [\"\\\\\", \"'\"]\n HEX_STRINGS = [(\"x'\", \"'\"), (\"X'\", \"'\")]\n RAW_STRINGS = [\"$$\"]\n COMMENTS = [\"--\", \"//\", (\"/*\", \"*/\")]\n\n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n \"BYTEINT\": TokenType.INT,\n \"CHAR VARYING\": TokenType.VARCHAR,\n \"CHARACTER VARYING\": TokenType.VARCHAR,\n \"EXCLUDE\": TokenType.EXCEPT,\n \"ILIKE ANY\": TokenType.ILIKE_ANY,\n \"LIKE ANY\": TokenType.LIKE_ANY,\n \"MATCH_RECOGNIZE\": TokenType.MATCH_RECOGNIZE,\n \"MINUS\": TokenType.EXCEPT,\n \"NCHAR VARYING\": TokenType.VARCHAR,\n \"PUT\": TokenType.COMMAND,\n \"RENAME\": TokenType.REPLACE,\n \"SAMPLE\": TokenType.TABLE_SAMPLE,\n \"SQL_DOUBLE\": TokenType.DOUBLE,\n \"SQL_VARCHAR\": TokenType.VARCHAR,\n \"TIMESTAMP_LTZ\": TokenType.TIMESTAMPLTZ,\n \"TIMESTAMP_NTZ\": TokenType.TIMESTAMP,\n \"TIMESTAMP_TZ\": TokenType.TIMESTAMPTZ,\n \"TIMESTAMPNTZ\": TokenType.TIMESTAMP,\n \"TOP\": TokenType.TOP,\n }\n\n SINGLE_TOKENS = {\n **tokens.Tokenizer.SINGLE_TOKENS,\n \"$\": TokenType.PARAMETER,\n }\n\n VAR_SINGLE_TOKENS = {\"$\"}\n\n COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}\n\n class Generator(generator.Generator):\n PARAMETER_TOKEN = \"$\"\n MATCHED_BY_SOURCE = False\n SINGLE_STRING_INTERVAL = True\n JOIN_HINTS = False\n TABLE_HINTS = False\n QUERY_HINTS = False\n AGGREGATE_FILTER_SUPPORTED = False\n SUPPORTS_TABLE_COPY = False\n COLLATE_IS_FUNC = True\n LIMIT_ONLY_LITERALS = True\n\n TRANSFORMS = {\n **generator.Generator.TRANSFORMS,\n exp.ArgMax: rename_func(\"MAX_BY\"),\n exp.ArgMin: rename_func(\"MIN_BY\"),\n exp.Array: inline_array_sql,\n exp.ArrayConcat: rename_func(\"ARRAY_CAT\"),\n exp.ArrayContains: lambda self, e: self.func(\"ARRAY_CONTAINS\", e.expression, e.this),\n exp.ArrayJoin: rename_func(\"ARRAY_TO_STRING\"),\n exp.AtTimeZone: lambda self, e: self.func(\n \"CONVERT_TIMEZONE\", e.args.get(\"zone\"), e.this\n ),\n exp.BitwiseXor: rename_func(\"BITXOR\"),\n exp.DateAdd: date_delta_sql(\"DATEADD\"),\n exp.DateDiff: date_delta_sql(\"DATEDIFF\"),\n exp.DateStrToDate: datestrtodate_sql,\n exp.DataType: _datatype_sql,\n exp.DayOfMonth: rename_func(\"DAYOFMONTH\"),\n exp.DayOfWeek: rename_func(\"DAYOFWEEK\"),\n exp.DayOfYear: rename_func(\"DAYOFYEAR\"),\n exp.Explode: rename_func(\"FLATTEN\"),\n exp.Extract: rename_func(\"DATE_PART\"),\n exp.GenerateSeries: lambda self, e: self.func(\n \"ARRAY_GENERATE_RANGE\", e.args[\"start\"], e.args[\"end\"] + 1, e.args.get(\"step\")\n ),\n exp.GroupConcat: rename_func(\"LISTAGG\"),\n exp.If: if_sql(name=\"IFF\", false_value=\"NULL\"),\n exp.JSONExtract: lambda self, e: f\"{self.sql(e, 'this')}[{self.sql(e, 'expression')}]\",\n exp.JSONKeyValue: json_keyvalue_comma_sql,\n exp.JSONObject: lambda self, e: self.func(\"OBJECT_CONSTRUCT_KEEP_NULL\", *e.expressions),\n exp.LogicalAnd: rename_func(\"BOOLAND_AGG\"),\n exp.LogicalOr: rename_func(\"BOOLOR_AGG\"),\n exp.Map: lambda self, e: var_map_sql(self, e, \"OBJECT_CONSTRUCT\"),\n exp.Max: max_or_greatest,\n exp.Min: min_or_least,\n exp.PartitionedByProperty: lambda self, e: f\"PARTITION BY {self.sql(e, 'this')}\",\n exp.PercentileCont: transforms.preprocess(\n [transforms.add_within_group_for_percentiles]\n ),\n exp.PercentileDisc: transforms.preprocess(\n [transforms.add_within_group_for_percentiles]\n ),\n exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),\n exp.RegexpILike: _regexpilike_sql,\n exp.Rand: rename_func(\"RANDOM\"),\n exp.Select: transforms.preprocess(\n [\n transforms.eliminate_distinct_on,\n transforms.explode_to_unnest(),\n transforms.eliminate_semi_and_anti_joins,\n ]\n ),\n exp.SHA: rename_func(\"SHA1\"),\n exp.StarMap: rename_func(\"OBJECT_CONSTRUCT\"),\n exp.StartsWith: rename_func(\"STARTSWITH\"),\n exp.StrPosition: lambda self, e: self.func(\n \"POSITION\", e.args.get(\"substr\"), e.this, e.args.get(\"position\")\n ),\n exp.StrToTime: lambda self, e: f\"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.Struct: lambda self, e: self.func(\n \"OBJECT_CONSTRUCT\",\n *(arg for expression in e.expressions for arg in expression.flatten()),\n ),\n exp.Stuff: rename_func(\"INSERT\"),\n exp.TimestampDiff: lambda self, e: self.func(\n \"TIMESTAMPDIFF\", e.unit, e.expression, e.this\n ),\n exp.TimestampTrunc: timestamptrunc_sql,\n exp.TimeStrToTime: timestrtotime_sql,\n exp.TimeToStr: lambda self, e: self.func(\n \"TO_CHAR\", exp.cast(e.this, \"timestamp\"), self.format_time(e)\n ),\n exp.TimeToUnix: lambda self, e: f\"EXTRACT(epoch_second FROM {self.sql(e, 'this')})\",\n exp.ToArray: rename_func(\"TO_ARRAY\"),\n exp.ToChar: lambda self, e: self.function_fallback_sql(e),\n exp.Trim: lambda self, e: self.func(\"TRIM\", e.this, e.expression),\n exp.TsOrDsAdd: date_delta_sql(\"DATEADD\", cast=True),\n exp.TsOrDsDiff: date_delta_sql(\"DATEDIFF\"),\n exp.UnixToTime: _unix_to_time_sql,\n exp.VarMap: lambda self, e: var_map_sql(self, e, \"OBJECT_CONSTRUCT\"),\n exp.WeekOfYear: rename_func(\"WEEKOFYEAR\"),\n exp.Xor: rename_func(\"BOOLXOR\"),\n }\n\n TYPE_MAPPING = {\n **generator.Generator.TYPE_MAPPING,\n exp.DataType.Type.TIMESTAMP: \"TIMESTAMPNTZ\",\n }\n\n STAR_MAPPING = {\n \"except\": \"EXCLUDE\",\n \"replace\": \"RENAME\",\n }\n\n PROPERTIES_LOCATION = {\n **generator.Generator.PROPERTIES_LOCATION,\n exp.SetProperty: exp.Properties.Location.UNSUPPORTED,\n exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,\n }\n\n def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:\n milli = expression.args.get(\"milli\")\n if milli is not None:\n milli_to_nano = milli.pop() * exp.Literal.number(1000000)\n expression.set(\"nano\", milli_to_nano)\n\n return rename_func(\"TIMESTAMP_FROM_PARTS\")(self, expression)\n\n def trycast_sql(self, expression: exp.TryCast) -> str:\n value = expression.this\n\n if value.type is None:\n from sqlglot.optimizer.annotate_types import annotate_types\n\n value = annotate_types(value)\n\n if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):\n return super().trycast_sql(expression)\n\n # TRY_CAST only works for string values in Snowflake\n return self.cast_sql(expression)\n\n def log_sql(self, expression: exp.Log) -> str:\n if not expression.expression:\n return self.func(\"LN\", expression.this)\n\n return super().log_sql(expression)\n\n def unnest_sql(self, expression: exp.Unnest) -> str:\n unnest_alias = expression.args.get(\"alias\")\n offset = expression.args.get(\"offset\")\n\n columns = [\n exp.to_identifier(\"seq\"),\n exp.to_identifier(\"key\"),\n exp.to_identifier(\"path\"),\n offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier(\"index\"),\n seq_get(unnest_alias.columns if unnest_alias else [], 0)\n or exp.to_identifier(\"value\"),\n exp.to_identifier(\"this\"),\n ]\n\n if unnest_alias:\n unnest_alias.set(\"columns\", columns)\n else:\n unnest_alias = exp.TableAlias(this=\"_u\", columns=columns)\n\n explode = f\"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))\"\n alias = self.sql(unnest_alias)\n alias = f\" AS {alias}\" if alias else \"\"\n return f\"{explode}{alias}\"\n\n def show_sql(self, expression: exp.Show) -> str:\n like = self.sql(expression, \"like\")\n like = f\" LIKE {like}\" if like else \"\"\n\n scope = self.sql(expression, \"scope\")\n scope = f\" {scope}\" if scope else \"\"\n\n scope_kind = self.sql(expression, \"scope_kind\")\n if scope_kind:\n scope_kind = f\" IN {scope_kind}\"\n\n return f\"SHOW {expression.name}{like}{scope_kind}{scope}\"\n\n def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:\n # Other dialects don't support all of the following parameters, so we need to\n # generate default values as necessary to ensure the transpilation is correct\n group = expression.args.get(\"group\")\n parameters = expression.args.get(\"parameters\") or (group and exp.Literal.string(\"c\"))\n occurrence = expression.args.get(\"occurrence\") or (parameters and exp.Literal.number(1))\n position = expression.args.get(\"position\") or (occurrence and exp.Literal.number(1))\n\n return self.func(\n \"REGEXP_SUBSTR\",\n expression.this,\n expression.expression,\n position,\n occurrence,\n parameters,\n group,\n )\n\n def except_op(self, expression: exp.Except) -> str:\n if not expression.args.get(\"distinct\", False):\n self.unsupported(\"EXCEPT with All is not supported in Snowflake\")\n return super().except_op(expression)\n\n def intersect_op(self, expression: exp.Intersect) -> str:\n if not expression.args.get(\"distinct\", False):\n self.unsupported(\"INTERSECT with All is not supported in Snowflake\")\n return super().intersect_op(expression)\n\n def describe_sql(self, expression: exp.Describe) -> str:\n # Default to table if kind is unknown\n kind_value = expression.args.get(\"kind\") or \"TABLE\"\n kind = f\" {kind_value}\" if kind_value else \"\"\n this = f\" {self.sql(expression, 'this')}\"\n expressions = self.expressions(expression, flat=True)\n expressions = f\" {expressions}\" if expressions else \"\"\n return f\"DESCRIBE{kind}{this}{expressions}\"\n\n def generatedasidentitycolumnconstraint_sql(\n self, expression: exp.GeneratedAsIdentityColumnConstraint\n ) -> str:\n start = expression.args.get(\"start\")\n start = f\" START {start}\" if start else \"\"\n increment = expression.args.get(\"increment\")\n increment = f\" INCREMENT {increment}\" if increment else \"\"\n return f\"AUTOINCREMENT{start}{increment}\"\n\n def swaptable_sql(self, expression: exp.SwapTable) -> str:\n this = self.sql(expression, \"this\")\n return f\"SWAP WITH {this}\"\n\n def with_properties(self, properties: exp.Properties) -> str:\n return self.properties(properties, wrapped=False, prefix=self.seg(\"\"), sep=\" \")\n", "path": "sqlglot/dialects/snowflake.py" } ]
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py index ad14e6ee74..454df94c9e 100644 --- a/sqlglot/dialects/snowflake.py +++ b/sqlglot/dialects/snowflake.py @@ -328,6 +328,9 @@ def _parse_colon_get_path( if not self._match(TokenType.COLON): break + if self._match_set(self.RANGE_PARSERS): + this = self.RANGE_PARSERS[self._prev.token_type](self, this) or this + return this diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py index 602bc63027..39963b2817 100644 --- a/tests/dialects/test_snowflake.py +++ b/tests/dialects/test_snowflake.py @@ -78,6 +78,14 @@ def test_snowflake(self): self.validate_identity( "SELECT a FROM test PIVOT(SUM(x) FOR y IN ('z', 'q')) AS x TABLESAMPLE (0.1)" ) + self.validate_identity( + """SELECT PARSE_JSON('{"x": "hello"}'):x LIKE 'hello'""", + """SELECT GET_PATH(PARSE_JSON('{"x": "hello"}'), 'x') LIKE 'hello'""", + ) + self.validate_identity( + """SELECT data:x LIKE 'hello' FROM some_table""", + """SELECT GET_PATH(data, 'x') LIKE 'hello' FROM some_table""", + ) self.validate_identity( "SELECT SUM({ fn CONVERT(123, SQL_DOUBLE) })", "SELECT SUM(CAST(123 AS DOUBLE))",
urllib3__urllib3-2655
Problem with urllib3.HTTPResponse.geturl() type hint ### Subject Why the return type hint for `urllib3.response.BaseHTTPResponse.geturl()` is `Optional[Union[str, "Literal[False]"]]` but not `Optional[str]` ?
[ { "content": "import io\nimport json as _json\nimport logging\nimport re\nimport zlib\nfrom contextlib import contextmanager\nfrom http.client import HTTPMessage as _HttplibHTTPMessage\nfrom http.client import HTTPResponse as _HttplibHTTPResponse\nfrom socket import timeout as SocketTimeout\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Generator,\n Iterator,\n List,\n Mapping,\n Optional,\n Tuple,\n Type,\n Union,\n)\n\ntry:\n try:\n import brotlicffi as brotli # type: ignore[import]\n except ImportError:\n import brotli # type: ignore[import]\nexcept ImportError:\n brotli = None\n\ntry:\n import zstandard as zstd # type: ignore[import]\n\n # The package 'zstandard' added the 'eof' property starting\n # in v0.18.0 which we require to ensure a complete and\n # valid zstd stream was fed into the ZstdDecoder.\n # See: https://github.com/urllib3/urllib3/pull/2624\n _zstd_version = _zstd_version = tuple(\n map(int, re.search(r\"^([0-9]+)\\.([0-9]+)\", zstd.__version__).groups()) # type: ignore[union-attr]\n )\n if _zstd_version < (0, 18): # Defensive:\n zstd = None\n\nexcept (AttributeError, ImportError, ValueError): # Defensive:\n zstd = None\n\nfrom ._collections import HTTPHeaderDict\nfrom .connection import _TYPE_BODY, BaseSSLError, HTTPConnection, HTTPException\nfrom .exceptions import (\n BodyNotHttplibCompatible,\n DecodeError,\n HTTPError,\n IncompleteRead,\n InvalidChunkLength,\n InvalidHeader,\n ProtocolError,\n ReadTimeoutError,\n ResponseNotChunked,\n SSLError,\n)\nfrom .util.response import is_fp_closed, is_response_to_head\nfrom .util.retry import Retry\n\nif TYPE_CHECKING:\n from typing_extensions import Literal\n\n from .connectionpool import HTTPConnectionPool\n\nlog = logging.getLogger(__name__)\n\n\nclass ContentDecoder:\n def decompress(self, data: bytes) -> bytes:\n raise NotImplementedError()\n\n def flush(self) -> bytes:\n raise NotImplementedError()\n\n\nclass DeflateDecoder(ContentDecoder):\n def __init__(self) -> None:\n self._first_try = True\n self._data = b\"\"\n self._obj = zlib.decompressobj()\n\n def decompress(self, data: bytes) -> bytes:\n if not data:\n return data\n\n if not self._first_try:\n return self._obj.decompress(data)\n\n self._data += data\n try:\n decompressed = self._obj.decompress(data)\n if decompressed:\n self._first_try = False\n self._data = None # type: ignore[assignment]\n return decompressed\n except zlib.error:\n self._first_try = False\n self._obj = zlib.decompressobj(-zlib.MAX_WBITS)\n try:\n return self.decompress(self._data)\n finally:\n self._data = None # type: ignore[assignment]\n\n def flush(self) -> bytes:\n return self._obj.flush()\n\n\nclass GzipDecoderState:\n\n FIRST_MEMBER = 0\n OTHER_MEMBERS = 1\n SWALLOW_DATA = 2\n\n\nclass GzipDecoder(ContentDecoder):\n def __init__(self) -> None:\n self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)\n self._state = GzipDecoderState.FIRST_MEMBER\n\n def decompress(self, data: bytes) -> bytes:\n ret = bytearray()\n if self._state == GzipDecoderState.SWALLOW_DATA or not data:\n return bytes(ret)\n while True:\n try:\n ret += self._obj.decompress(data)\n except zlib.error:\n previous_state = self._state\n # Ignore data after the first error\n self._state = GzipDecoderState.SWALLOW_DATA\n if previous_state == GzipDecoderState.OTHER_MEMBERS:\n # Allow trailing garbage acceptable in other gzip clients\n return bytes(ret)\n raise\n data = self._obj.unused_data\n if not data:\n return bytes(ret)\n self._state = GzipDecoderState.OTHER_MEMBERS\n self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)\n\n def flush(self) -> bytes:\n return self._obj.flush()\n\n\nif brotli is not None:\n\n class BrotliDecoder(ContentDecoder):\n # Supports both 'brotlipy' and 'Brotli' packages\n # since they share an import name. The top branches\n # are for 'brotlipy' and bottom branches for 'Brotli'\n def __init__(self) -> None:\n self._obj = brotli.Decompressor()\n if hasattr(self._obj, \"decompress\"):\n setattr(self, \"decompress\", self._obj.decompress)\n else:\n setattr(self, \"decompress\", self._obj.process)\n\n def flush(self) -> bytes:\n if hasattr(self._obj, \"flush\"):\n return self._obj.flush() # type: ignore[no-any-return]\n return b\"\"\n\n\nif zstd is not None:\n\n class ZstdDecoder(ContentDecoder):\n def __init__(self) -> None:\n self._obj = zstd.ZstdDecompressor().decompressobj()\n\n def decompress(self, data: bytes) -> bytes:\n if not data:\n return b\"\"\n return self._obj.decompress(data) # type: ignore[no-any-return]\n\n def flush(self) -> bytes:\n ret = self._obj.flush()\n if not self._obj.eof:\n raise DecodeError(\"Zstandard data is incomplete\")\n return ret # type: ignore[no-any-return]\n\n\nclass MultiDecoder(ContentDecoder):\n \"\"\"\n From RFC7231:\n If one or more encodings have been applied to a representation, the\n sender that applied the encodings MUST generate a Content-Encoding\n header field that lists the content codings in the order in which\n they were applied.\n \"\"\"\n\n def __init__(self, modes: str) -> None:\n self._decoders = [_get_decoder(m.strip()) for m in modes.split(\",\")]\n\n def flush(self) -> bytes:\n return self._decoders[0].flush()\n\n def decompress(self, data: bytes) -> bytes:\n for d in reversed(self._decoders):\n data = d.decompress(data)\n return data\n\n\ndef _get_decoder(mode: str) -> ContentDecoder:\n if \",\" in mode:\n return MultiDecoder(mode)\n\n if mode == \"gzip\":\n return GzipDecoder()\n\n if brotli is not None and mode == \"br\":\n return BrotliDecoder()\n\n if zstd is not None and mode == \"zstd\":\n return ZstdDecoder()\n\n return DeflateDecoder()\n\n\nclass BaseHTTPResponse(io.IOBase):\n CONTENT_DECODERS = [\"gzip\", \"deflate\"]\n if brotli is not None:\n CONTENT_DECODERS += [\"br\"]\n if zstd is not None:\n CONTENT_DECODERS += [\"zstd\"]\n REDIRECT_STATUSES = [301, 302, 303, 307, 308]\n\n DECODER_ERROR_CLASSES: Tuple[Type[Exception], ...] = (IOError, zlib.error)\n if brotli is not None:\n DECODER_ERROR_CLASSES += (brotli.error,)\n\n if zstd is not None:\n DECODER_ERROR_CLASSES += (zstd.ZstdError,)\n\n def __init__(\n self,\n *,\n headers: Optional[Union[Mapping[str, str], Mapping[bytes, bytes]]] = None,\n status: int,\n version: int,\n reason: Optional[str],\n decode_content: bool,\n request_url: Optional[str],\n retries: Optional[Retry] = None,\n ) -> None:\n if isinstance(headers, HTTPHeaderDict):\n self.headers = headers\n else:\n self.headers = HTTPHeaderDict(headers) # type: ignore[arg-type]\n self.status = status\n self.version = version\n self.reason = reason\n self.decode_content = decode_content\n self.request_url: Optional[str]\n self.retries = retries\n\n self.chunked = False\n tr_enc = self.headers.get(\"transfer-encoding\", \"\").lower()\n # Don't incur the penalty of creating a list and then discarding it\n encodings = (enc.strip() for enc in tr_enc.split(\",\"))\n if \"chunked\" in encodings:\n self.chunked = True\n\n self._decoder: Optional[ContentDecoder] = None\n\n def get_redirect_location(self) -> Union[Optional[str], \"Literal[False]\"]:\n \"\"\"\n Should we redirect and where to?\n\n :returns: Truthy redirect location string if we got a redirect status\n code and valid location. ``None`` if redirect status and no\n location. ``False`` if not a redirect status code.\n \"\"\"\n if self.status in self.REDIRECT_STATUSES:\n return self.headers.get(\"location\")\n return False\n\n @property\n def data(self) -> bytes:\n raise NotImplementedError()\n\n def json(self) -> Any:\n \"\"\"\n Parses the body of the HTTP response as JSON.\n\n To use a custom JSON decoder pass the result of :attr:`HTTPResponse.data` to the decoder.\n\n This method can raise either `UnicodeDecodeError` or `json.JSONDecodeError`.\n\n Read more :ref:`here <json>`.\n \"\"\"\n data = self.data.decode(\"utf-8\")\n return _json.loads(data)\n\n @property\n def url(self) -> Optional[str]:\n raise NotImplementedError()\n\n @property\n def closed(self) -> bool:\n raise NotImplementedError()\n\n @property\n def connection(self) -> Optional[HTTPConnection]:\n raise NotImplementedError()\n\n def stream(\n self, amt: Optional[int] = 2**16, decode_content: Optional[bool] = None\n ) -> Iterator[bytes]:\n raise NotImplementedError()\n\n def read(\n self,\n amt: Optional[int] = None,\n decode_content: Optional[bool] = None,\n cache_content: bool = False,\n ) -> bytes:\n raise NotImplementedError()\n\n def read_chunked(\n self,\n amt: Optional[int] = None,\n decode_content: Optional[bool] = None,\n ) -> Iterator[bytes]:\n raise NotImplementedError()\n\n def release_conn(self) -> None:\n raise NotImplementedError()\n\n def drain_conn(self) -> None:\n raise NotImplementedError()\n\n def close(self) -> None:\n raise NotImplementedError()\n\n def _init_decoder(self) -> None:\n \"\"\"\n Set-up the _decoder attribute if necessary.\n \"\"\"\n # Note: content-encoding value should be case-insensitive, per RFC 7230\n # Section 3.2\n content_encoding = self.headers.get(\"content-encoding\", \"\").lower()\n if self._decoder is None:\n if content_encoding in self.CONTENT_DECODERS:\n self._decoder = _get_decoder(content_encoding)\n elif \",\" in content_encoding:\n encodings = [\n e.strip()\n for e in content_encoding.split(\",\")\n if e.strip() in self.CONTENT_DECODERS\n ]\n if encodings:\n self._decoder = _get_decoder(content_encoding)\n\n def _decode(\n self, data: bytes, decode_content: Optional[bool], flush_decoder: bool\n ) -> bytes:\n \"\"\"\n Decode the data passed in and potentially flush the decoder.\n \"\"\"\n if not decode_content:\n return data\n\n try:\n if self._decoder:\n data = self._decoder.decompress(data)\n except self.DECODER_ERROR_CLASSES as e:\n content_encoding = self.headers.get(\"content-encoding\", \"\").lower()\n raise DecodeError(\n \"Received response with content-encoding: %s, but \"\n \"failed to decode it.\" % content_encoding,\n e,\n ) from e\n if flush_decoder:\n data += self._flush_decoder()\n\n return data\n\n def _flush_decoder(self) -> bytes:\n \"\"\"\n Flushes the decoder. Should only be called if the decoder is actually\n being used.\n \"\"\"\n if self._decoder:\n return self._decoder.decompress(b\"\") + self._decoder.flush()\n return b\"\"\n\n # Compatibility methods for `io` module\n def readable(self) -> bool:\n return True\n\n def readinto(self, b: bytearray) -> int:\n temp = self.read(len(b))\n if len(temp) == 0:\n return 0\n else:\n b[: len(temp)] = temp\n return len(temp)\n\n # Compatibility methods for http.client.HTTPResponse\n def getheaders(self) -> List[Tuple[str, str]]:\n return list(self.headers.items())\n\n def getheader(self, name: str, default: Optional[str] = None) -> Optional[str]:\n return self.headers.get(name, default)\n\n # Compatibility method for http.cookiejar\n def info(self) -> HTTPHeaderDict:\n return self.headers\n\n def geturl(self) -> Optional[Union[str, \"Literal[False]\"]]:\n return self.url\n\n\nclass HTTPResponse(BaseHTTPResponse):\n \"\"\"\n HTTP Response container.\n\n Backwards-compatible with :class:`http.client.HTTPResponse` but the response ``body`` is\n loaded and decoded on-demand when the ``data`` property is accessed. This\n class is also compatible with the Python standard library's :mod:`io`\n module, and can hence be treated as a readable object in the context of that\n framework.\n\n Extra parameters for behaviour not present in :class:`http.client.HTTPResponse`:\n\n :param preload_content:\n If True, the response's body will be preloaded during construction.\n\n :param decode_content:\n If True, will attempt to decode the body based on the\n 'content-encoding' header.\n\n :param original_response:\n When this HTTPResponse wrapper is generated from an :class:`http.client.HTTPResponse`\n object, it's convenient to include the original for debug purposes. It's\n otherwise unused.\n\n :param retries:\n The retries contains the last :class:`~urllib3.util.retry.Retry` that\n was used during the request.\n\n :param enforce_content_length:\n Enforce content length checking. Body returned by server must match\n value of Content-Length header, if present. Otherwise, raise error.\n \"\"\"\n\n def __init__(\n self,\n body: _TYPE_BODY = \"\",\n headers: Optional[Union[Mapping[str, str], Mapping[bytes, bytes]]] = None,\n status: int = 0,\n version: int = 0,\n reason: Optional[str] = None,\n preload_content: bool = True,\n decode_content: bool = True,\n original_response: Optional[_HttplibHTTPResponse] = None,\n pool: Optional[\"HTTPConnectionPool\"] = None,\n connection: Optional[HTTPConnection] = None,\n msg: Optional[_HttplibHTTPMessage] = None,\n retries: Optional[Retry] = None,\n enforce_content_length: bool = True,\n request_method: Optional[str] = None,\n request_url: Optional[str] = None,\n auto_close: bool = True,\n ) -> None:\n super().__init__(\n headers=headers,\n status=status,\n version=version,\n reason=reason,\n decode_content=decode_content,\n request_url=request_url,\n retries=retries,\n )\n\n self.enforce_content_length = enforce_content_length\n self.auto_close = auto_close\n\n self._body = None\n self._fp: Optional[_HttplibHTTPResponse] = None\n self._original_response = original_response\n self._fp_bytes_read = 0\n self.msg = msg\n if self.retries is not None and self.retries.history:\n self._request_url = self.retries.history[-1].redirect_location\n else:\n self._request_url = request_url\n\n if body and isinstance(body, (str, bytes)):\n self._body = body\n\n self._pool = pool\n self._connection = connection\n\n if hasattr(body, \"read\"):\n self._fp = body # type: ignore[assignment]\n\n # Are we using the chunked-style of transfer encoding?\n self.chunk_left: Optional[int] = None\n\n # Determine length of response\n self.length_remaining = self._init_length(request_method)\n\n # If requested, preload the body.\n if preload_content and not self._body:\n self._body = self.read(decode_content=decode_content)\n\n def release_conn(self) -> None:\n if not self._pool or not self._connection:\n return None\n\n self._pool._put_conn(self._connection)\n self._connection = None\n\n def drain_conn(self) -> None:\n \"\"\"\n Read and discard any remaining HTTP response data in the response connection.\n\n Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.\n \"\"\"\n try:\n self.read()\n except (HTTPError, OSError, BaseSSLError, HTTPException):\n pass\n\n @property\n def data(self) -> bytes:\n # For backwards-compat with earlier urllib3 0.4 and earlier.\n if self._body:\n return self._body # type: ignore[return-value]\n\n if self._fp:\n return self.read(cache_content=True)\n\n return None # type: ignore[return-value]\n\n @property\n def connection(self) -> Optional[HTTPConnection]:\n return self._connection\n\n def isclosed(self) -> bool:\n return is_fp_closed(self._fp)\n\n def tell(self) -> int:\n \"\"\"\n Obtain the number of bytes pulled over the wire so far. May differ from\n the amount of content returned by :meth:``urllib3.response.HTTPResponse.read``\n if bytes are encoded on the wire (e.g, compressed).\n \"\"\"\n return self._fp_bytes_read\n\n def _init_length(self, request_method: Optional[str]) -> Optional[int]:\n \"\"\"\n Set initial length value for Response content if available.\n \"\"\"\n length: Optional[int]\n content_length: Optional[str] = self.headers.get(\"content-length\")\n\n if content_length is not None:\n if self.chunked:\n # This Response will fail with an IncompleteRead if it can't be\n # received as chunked. This method falls back to attempt reading\n # the response before raising an exception.\n log.warning(\n \"Received response with both Content-Length and \"\n \"Transfer-Encoding set. This is expressly forbidden \"\n \"by RFC 7230 sec 3.3.2. Ignoring Content-Length and \"\n \"attempting to process response as Transfer-Encoding: \"\n \"chunked.\"\n )\n return None\n\n try:\n # RFC 7230 section 3.3.2 specifies multiple content lengths can\n # be sent in a single Content-Length header\n # (e.g. Content-Length: 42, 42). This line ensures the values\n # are all valid ints and that as long as the `set` length is 1,\n # all values are the same. Otherwise, the header is invalid.\n lengths = {int(val) for val in content_length.split(\",\")}\n if len(lengths) > 1:\n raise InvalidHeader(\n \"Content-Length contained multiple \"\n \"unmatching values (%s)\" % content_length\n )\n length = lengths.pop()\n except ValueError:\n length = None\n else:\n if length < 0:\n length = None\n\n else: # if content_length is None\n length = None\n\n # Convert status to int for comparison\n # In some cases, httplib returns a status of \"_UNKNOWN\"\n try:\n status = int(self.status)\n except ValueError:\n status = 0\n\n # Check for responses that shouldn't include a body\n if status in (204, 304) or 100 <= status < 200 or request_method == \"HEAD\":\n length = 0\n\n return length\n\n @contextmanager\n def _error_catcher(self) -> Generator[None, None, None]:\n \"\"\"\n Catch low-level python exceptions, instead re-raising urllib3\n variants, so that low-level exceptions are not leaked in the\n high-level api.\n\n On exit, release the connection back to the pool.\n \"\"\"\n clean_exit = False\n\n try:\n try:\n yield\n\n except SocketTimeout as e:\n # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but\n # there is yet no clean way to get at it from this context.\n raise ReadTimeoutError(self._pool, None, \"Read timed out.\") from e # type: ignore[arg-type]\n\n except BaseSSLError as e:\n # FIXME: Is there a better way to differentiate between SSLErrors?\n if \"read operation timed out\" not in str(e):\n # SSL errors related to framing/MAC get wrapped and reraised here\n raise SSLError(e) from e\n\n raise ReadTimeoutError(self._pool, None, \"Read timed out.\") from e # type: ignore[arg-type]\n\n except (HTTPException, OSError) as e:\n # This includes IncompleteRead.\n raise ProtocolError(f\"Connection broken: {e!r}\", e) from e\n\n # If no exception is thrown, we should avoid cleaning up\n # unnecessarily.\n clean_exit = True\n finally:\n # If we didn't terminate cleanly, we need to throw away our\n # connection.\n if not clean_exit:\n # The response may not be closed but we're not going to use it\n # anymore so close it now to ensure that the connection is\n # released back to the pool.\n if self._original_response:\n self._original_response.close()\n\n # Closing the response may not actually be sufficient to close\n # everything, so if we have a hold of the connection close that\n # too.\n if self._connection:\n self._connection.close()\n\n # If we hold the original response but it's closed now, we should\n # return the connection back to the pool.\n if self._original_response and self._original_response.isclosed():\n self.release_conn()\n\n def read(\n self,\n amt: Optional[int] = None,\n decode_content: Optional[bool] = None,\n cache_content: bool = False,\n ) -> bytes:\n \"\"\"\n Similar to :meth:`http.client.HTTPResponse.read`, but with two additional\n parameters: ``decode_content`` and ``cache_content``.\n\n :param amt:\n How much of the content to read. If specified, caching is skipped\n because it doesn't make sense to cache partial content as the full\n response.\n\n :param decode_content:\n If True, will attempt to decode the body based on the\n 'content-encoding' header.\n\n :param cache_content:\n If True, will save the returned data such that the same result is\n returned despite of the state of the underlying file object. This\n is useful if you want the ``.data`` property to continue working\n after having ``.read()`` the file object. (Overridden if ``amt`` is\n set.)\n \"\"\"\n self._init_decoder()\n if decode_content is None:\n decode_content = self.decode_content\n\n if self._fp is None:\n return None # type: ignore[return-value]\n\n flush_decoder = False\n fp_closed = getattr(self._fp, \"closed\", False)\n\n with self._error_catcher():\n if amt is None:\n # cStringIO doesn't like amt=None\n data = self._fp.read() if not fp_closed else b\"\"\n flush_decoder = True\n else:\n cache_content = False\n data = self._fp.read(amt) if not fp_closed else b\"\"\n if (\n amt != 0 and not data\n ): # Platform-specific: Buggy versions of Python.\n # Close the connection when no data is returned\n #\n # This is redundant to what httplib/http.client _should_\n # already do. However, versions of python released before\n # December 15, 2012 (http://bugs.python.org/issue16298) do\n # not properly close the connection in all cases. There is\n # no harm in redundantly calling close.\n self._fp.close()\n flush_decoder = True\n if (\n self.enforce_content_length\n and self.length_remaining is not None\n and self.length_remaining != 0\n ):\n # This is an edge case that httplib failed to cover due\n # to concerns of backward compatibility. We're\n # addressing it here to make sure IncompleteRead is\n # raised during streaming, so all calls with incorrect\n # Content-Length are caught.\n raise IncompleteRead(self._fp_bytes_read, self.length_remaining)\n\n if data:\n self._fp_bytes_read += len(data)\n if self.length_remaining is not None:\n self.length_remaining -= len(data)\n\n data = self._decode(data, decode_content, flush_decoder)\n\n if cache_content:\n self._body = data\n\n return data\n\n def stream(\n self, amt: Optional[int] = 2**16, decode_content: Optional[bool] = None\n ) -> Generator[bytes, None, None]:\n \"\"\"\n A generator wrapper for the read() method. A call will block until\n ``amt`` bytes have been read from the connection or until the\n connection is closed.\n\n :param amt:\n How much of the content to read. The generator will return up to\n much data per iteration, but may return less. This is particularly\n likely when using compressed data. However, the empty string will\n never be returned.\n\n :param decode_content:\n If True, will attempt to decode the body based on the\n 'content-encoding' header.\n \"\"\"\n if self.chunked and self.supports_chunked_reads():\n yield from self.read_chunked(amt, decode_content=decode_content)\n else:\n while not is_fp_closed(self._fp):\n data = self.read(amt=amt, decode_content=decode_content)\n\n if data:\n yield data\n\n @classmethod\n def from_httplib(\n ResponseCls: Type[\"HTTPResponse\"], r: _HttplibHTTPResponse, **response_kw: Any\n ) -> \"HTTPResponse\":\n \"\"\"\n Given an :class:`http.client.HTTPResponse` instance ``r``, return a\n corresponding :class:`urllib3.response.HTTPResponse` object.\n\n Remaining parameters are passed to the HTTPResponse constructor, along\n with ``original_response=r``.\n \"\"\"\n headers = r.msg\n\n if not isinstance(headers, HTTPHeaderDict):\n headers = HTTPHeaderDict(headers.items()) # type: ignore[assignment]\n\n resp = ResponseCls(\n body=r,\n headers=headers, # type: ignore[arg-type]\n status=r.status,\n version=r.version,\n reason=r.reason,\n original_response=r,\n **response_kw,\n )\n return resp\n\n # Overrides from io.IOBase\n def close(self) -> None:\n if not self.closed and self._fp:\n self._fp.close()\n\n if self._connection:\n self._connection.close()\n\n if not self.auto_close:\n io.IOBase.close(self)\n\n @property\n def closed(self) -> bool:\n if not self.auto_close:\n return io.IOBase.closed.__get__(self) # type: ignore[no-any-return]\n elif self._fp is None:\n return True\n elif hasattr(self._fp, \"isclosed\"):\n return self._fp.isclosed()\n elif hasattr(self._fp, \"closed\"):\n return self._fp.closed\n else:\n return True\n\n def fileno(self) -> int:\n if self._fp is None:\n raise OSError(\"HTTPResponse has no file to get a fileno from\")\n elif hasattr(self._fp, \"fileno\"):\n return self._fp.fileno()\n else:\n raise OSError(\n \"The file-like object this HTTPResponse is wrapped \"\n \"around has no file descriptor\"\n )\n\n def flush(self) -> None:\n if (\n self._fp is not None\n and hasattr(self._fp, \"flush\")\n and not getattr(self._fp, \"closed\", False)\n ):\n return self._fp.flush()\n\n def supports_chunked_reads(self) -> bool:\n \"\"\"\n Checks if the underlying file-like object looks like a\n :class:`http.client.HTTPResponse` object. We do this by testing for\n the fp attribute. If it is present we assume it returns raw chunks as\n processed by read_chunked().\n \"\"\"\n return hasattr(self._fp, \"fp\")\n\n def _update_chunk_length(self) -> None:\n # First, we'll figure out length of a chunk and then\n # we'll try to read it from socket.\n if self.chunk_left is not None:\n return None\n line = self._fp.fp.readline() # type: ignore[union-attr]\n line = line.split(b\";\", 1)[0]\n try:\n self.chunk_left = int(line, 16)\n except ValueError:\n # Invalid chunked protocol response, abort.\n self.close()\n raise InvalidChunkLength(self, line) from None\n\n def _handle_chunk(self, amt: Optional[int]) -> bytes:\n returned_chunk = None\n if amt is None:\n chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr]\n returned_chunk = chunk\n self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.\n self.chunk_left = None\n elif self.chunk_left is not None and amt < self.chunk_left:\n value = self._fp._safe_read(amt) # type: ignore[union-attr]\n self.chunk_left = self.chunk_left - amt\n returned_chunk = value\n elif amt == self.chunk_left:\n value = self._fp._safe_read(amt) # type: ignore[union-attr]\n self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.\n self.chunk_left = None\n returned_chunk = value\n else: # amt > self.chunk_left\n returned_chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr]\n self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.\n self.chunk_left = None\n return returned_chunk # type: ignore[no-any-return]\n\n def read_chunked(\n self, amt: Optional[int] = None, decode_content: Optional[bool] = None\n ) -> Generator[bytes, None, None]:\n \"\"\"\n Similar to :meth:`HTTPResponse.read`, but with an additional\n parameter: ``decode_content``.\n\n :param amt:\n How much of the content to read. If specified, caching is skipped\n because it doesn't make sense to cache partial content as the full\n response.\n\n :param decode_content:\n If True, will attempt to decode the body based on the\n 'content-encoding' header.\n \"\"\"\n self._init_decoder()\n # FIXME: Rewrite this method and make it a class with a better structured logic.\n if not self.chunked:\n raise ResponseNotChunked(\n \"Response is not chunked. \"\n \"Header 'transfer-encoding: chunked' is missing.\"\n )\n if not self.supports_chunked_reads():\n raise BodyNotHttplibCompatible(\n \"Body should be http.client.HTTPResponse like. \"\n \"It should have have an fp attribute which returns raw chunks.\"\n )\n\n with self._error_catcher():\n # Don't bother reading the body of a HEAD request.\n if self._original_response and is_response_to_head(self._original_response):\n self._original_response.close()\n return None\n\n # If a response is already read and closed\n # then return immediately.\n if self._fp.fp is None: # type: ignore[union-attr]\n return None\n\n while True:\n self._update_chunk_length()\n if self.chunk_left == 0:\n break\n chunk = self._handle_chunk(amt)\n decoded = self._decode(\n chunk, decode_content=decode_content, flush_decoder=False\n )\n if decoded:\n yield decoded\n\n if decode_content:\n # On CPython and PyPy, we should never need to flush the\n # decoder. However, on Jython we *might* need to, so\n # lets defensively do it anyway.\n decoded = self._flush_decoder()\n if decoded: # Platform-specific: Jython.\n yield decoded\n\n # Chunk content ends with \\r\\n: discard it.\n while self._fp is not None:\n line = self._fp.fp.readline()\n if not line:\n # Some sites may not end with '\\r\\n'.\n break\n if line == b\"\\r\\n\":\n break\n\n # We read everything; close the \"file\".\n if self._original_response:\n self._original_response.close()\n\n @property\n def url(self) -> Optional[str]:\n \"\"\"\n Returns the URL that was the source of this response.\n If the request that generated this response redirected, this method\n will return the final redirect location.\n \"\"\"\n return self._request_url\n\n @url.setter\n def url(self, url: str) -> None:\n self._request_url = url\n\n def __iter__(self) -> Iterator[bytes]:\n buffer: List[bytes] = []\n for chunk in self.stream(decode_content=True):\n if b\"\\n\" in chunk:\n chunks = chunk.split(b\"\\n\")\n yield b\"\".join(buffer) + chunks[0] + b\"\\n\"\n for x in chunks[1:-1]:\n yield x + b\"\\n\"\n if chunks[-1]:\n buffer = [chunks[-1]]\n else:\n buffer = []\n else:\n buffer.append(chunk)\n if buffer:\n yield b\"\".join(buffer)\n", "path": "src/urllib3/response.py" } ]
[ { "content": "import io\nimport json as _json\nimport logging\nimport re\nimport zlib\nfrom contextlib import contextmanager\nfrom http.client import HTTPMessage as _HttplibHTTPMessage\nfrom http.client import HTTPResponse as _HttplibHTTPResponse\nfrom socket import timeout as SocketTimeout\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Generator,\n Iterator,\n List,\n Mapping,\n Optional,\n Tuple,\n Type,\n Union,\n)\n\ntry:\n try:\n import brotlicffi as brotli # type: ignore[import]\n except ImportError:\n import brotli # type: ignore[import]\nexcept ImportError:\n brotli = None\n\ntry:\n import zstandard as zstd # type: ignore[import]\n\n # The package 'zstandard' added the 'eof' property starting\n # in v0.18.0 which we require to ensure a complete and\n # valid zstd stream was fed into the ZstdDecoder.\n # See: https://github.com/urllib3/urllib3/pull/2624\n _zstd_version = _zstd_version = tuple(\n map(int, re.search(r\"^([0-9]+)\\.([0-9]+)\", zstd.__version__).groups()) # type: ignore[union-attr]\n )\n if _zstd_version < (0, 18): # Defensive:\n zstd = None\n\nexcept (AttributeError, ImportError, ValueError): # Defensive:\n zstd = None\n\nfrom ._collections import HTTPHeaderDict\nfrom .connection import _TYPE_BODY, BaseSSLError, HTTPConnection, HTTPException\nfrom .exceptions import (\n BodyNotHttplibCompatible,\n DecodeError,\n HTTPError,\n IncompleteRead,\n InvalidChunkLength,\n InvalidHeader,\n ProtocolError,\n ReadTimeoutError,\n ResponseNotChunked,\n SSLError,\n)\nfrom .util.response import is_fp_closed, is_response_to_head\nfrom .util.retry import Retry\n\nif TYPE_CHECKING:\n from typing_extensions import Literal\n\n from .connectionpool import HTTPConnectionPool\n\nlog = logging.getLogger(__name__)\n\n\nclass ContentDecoder:\n def decompress(self, data: bytes) -> bytes:\n raise NotImplementedError()\n\n def flush(self) -> bytes:\n raise NotImplementedError()\n\n\nclass DeflateDecoder(ContentDecoder):\n def __init__(self) -> None:\n self._first_try = True\n self._data = b\"\"\n self._obj = zlib.decompressobj()\n\n def decompress(self, data: bytes) -> bytes:\n if not data:\n return data\n\n if not self._first_try:\n return self._obj.decompress(data)\n\n self._data += data\n try:\n decompressed = self._obj.decompress(data)\n if decompressed:\n self._first_try = False\n self._data = None # type: ignore[assignment]\n return decompressed\n except zlib.error:\n self._first_try = False\n self._obj = zlib.decompressobj(-zlib.MAX_WBITS)\n try:\n return self.decompress(self._data)\n finally:\n self._data = None # type: ignore[assignment]\n\n def flush(self) -> bytes:\n return self._obj.flush()\n\n\nclass GzipDecoderState:\n\n FIRST_MEMBER = 0\n OTHER_MEMBERS = 1\n SWALLOW_DATA = 2\n\n\nclass GzipDecoder(ContentDecoder):\n def __init__(self) -> None:\n self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)\n self._state = GzipDecoderState.FIRST_MEMBER\n\n def decompress(self, data: bytes) -> bytes:\n ret = bytearray()\n if self._state == GzipDecoderState.SWALLOW_DATA or not data:\n return bytes(ret)\n while True:\n try:\n ret += self._obj.decompress(data)\n except zlib.error:\n previous_state = self._state\n # Ignore data after the first error\n self._state = GzipDecoderState.SWALLOW_DATA\n if previous_state == GzipDecoderState.OTHER_MEMBERS:\n # Allow trailing garbage acceptable in other gzip clients\n return bytes(ret)\n raise\n data = self._obj.unused_data\n if not data:\n return bytes(ret)\n self._state = GzipDecoderState.OTHER_MEMBERS\n self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)\n\n def flush(self) -> bytes:\n return self._obj.flush()\n\n\nif brotli is not None:\n\n class BrotliDecoder(ContentDecoder):\n # Supports both 'brotlipy' and 'Brotli' packages\n # since they share an import name. The top branches\n # are for 'brotlipy' and bottom branches for 'Brotli'\n def __init__(self) -> None:\n self._obj = brotli.Decompressor()\n if hasattr(self._obj, \"decompress\"):\n setattr(self, \"decompress\", self._obj.decompress)\n else:\n setattr(self, \"decompress\", self._obj.process)\n\n def flush(self) -> bytes:\n if hasattr(self._obj, \"flush\"):\n return self._obj.flush() # type: ignore[no-any-return]\n return b\"\"\n\n\nif zstd is not None:\n\n class ZstdDecoder(ContentDecoder):\n def __init__(self) -> None:\n self._obj = zstd.ZstdDecompressor().decompressobj()\n\n def decompress(self, data: bytes) -> bytes:\n if not data:\n return b\"\"\n return self._obj.decompress(data) # type: ignore[no-any-return]\n\n def flush(self) -> bytes:\n ret = self._obj.flush()\n if not self._obj.eof:\n raise DecodeError(\"Zstandard data is incomplete\")\n return ret # type: ignore[no-any-return]\n\n\nclass MultiDecoder(ContentDecoder):\n \"\"\"\n From RFC7231:\n If one or more encodings have been applied to a representation, the\n sender that applied the encodings MUST generate a Content-Encoding\n header field that lists the content codings in the order in which\n they were applied.\n \"\"\"\n\n def __init__(self, modes: str) -> None:\n self._decoders = [_get_decoder(m.strip()) for m in modes.split(\",\")]\n\n def flush(self) -> bytes:\n return self._decoders[0].flush()\n\n def decompress(self, data: bytes) -> bytes:\n for d in reversed(self._decoders):\n data = d.decompress(data)\n return data\n\n\ndef _get_decoder(mode: str) -> ContentDecoder:\n if \",\" in mode:\n return MultiDecoder(mode)\n\n if mode == \"gzip\":\n return GzipDecoder()\n\n if brotli is not None and mode == \"br\":\n return BrotliDecoder()\n\n if zstd is not None and mode == \"zstd\":\n return ZstdDecoder()\n\n return DeflateDecoder()\n\n\nclass BaseHTTPResponse(io.IOBase):\n CONTENT_DECODERS = [\"gzip\", \"deflate\"]\n if brotli is not None:\n CONTENT_DECODERS += [\"br\"]\n if zstd is not None:\n CONTENT_DECODERS += [\"zstd\"]\n REDIRECT_STATUSES = [301, 302, 303, 307, 308]\n\n DECODER_ERROR_CLASSES: Tuple[Type[Exception], ...] = (IOError, zlib.error)\n if brotli is not None:\n DECODER_ERROR_CLASSES += (brotli.error,)\n\n if zstd is not None:\n DECODER_ERROR_CLASSES += (zstd.ZstdError,)\n\n def __init__(\n self,\n *,\n headers: Optional[Union[Mapping[str, str], Mapping[bytes, bytes]]] = None,\n status: int,\n version: int,\n reason: Optional[str],\n decode_content: bool,\n request_url: Optional[str],\n retries: Optional[Retry] = None,\n ) -> None:\n if isinstance(headers, HTTPHeaderDict):\n self.headers = headers\n else:\n self.headers = HTTPHeaderDict(headers) # type: ignore[arg-type]\n self.status = status\n self.version = version\n self.reason = reason\n self.decode_content = decode_content\n self.request_url: Optional[str]\n self.retries = retries\n\n self.chunked = False\n tr_enc = self.headers.get(\"transfer-encoding\", \"\").lower()\n # Don't incur the penalty of creating a list and then discarding it\n encodings = (enc.strip() for enc in tr_enc.split(\",\"))\n if \"chunked\" in encodings:\n self.chunked = True\n\n self._decoder: Optional[ContentDecoder] = None\n\n def get_redirect_location(self) -> Union[Optional[str], \"Literal[False]\"]:\n \"\"\"\n Should we redirect and where to?\n\n :returns: Truthy redirect location string if we got a redirect status\n code and valid location. ``None`` if redirect status and no\n location. ``False`` if not a redirect status code.\n \"\"\"\n if self.status in self.REDIRECT_STATUSES:\n return self.headers.get(\"location\")\n return False\n\n @property\n def data(self) -> bytes:\n raise NotImplementedError()\n\n def json(self) -> Any:\n \"\"\"\n Parses the body of the HTTP response as JSON.\n\n To use a custom JSON decoder pass the result of :attr:`HTTPResponse.data` to the decoder.\n\n This method can raise either `UnicodeDecodeError` or `json.JSONDecodeError`.\n\n Read more :ref:`here <json>`.\n \"\"\"\n data = self.data.decode(\"utf-8\")\n return _json.loads(data)\n\n @property\n def url(self) -> Optional[str]:\n raise NotImplementedError()\n\n @property\n def closed(self) -> bool:\n raise NotImplementedError()\n\n @property\n def connection(self) -> Optional[HTTPConnection]:\n raise NotImplementedError()\n\n def stream(\n self, amt: Optional[int] = 2**16, decode_content: Optional[bool] = None\n ) -> Iterator[bytes]:\n raise NotImplementedError()\n\n def read(\n self,\n amt: Optional[int] = None,\n decode_content: Optional[bool] = None,\n cache_content: bool = False,\n ) -> bytes:\n raise NotImplementedError()\n\n def read_chunked(\n self,\n amt: Optional[int] = None,\n decode_content: Optional[bool] = None,\n ) -> Iterator[bytes]:\n raise NotImplementedError()\n\n def release_conn(self) -> None:\n raise NotImplementedError()\n\n def drain_conn(self) -> None:\n raise NotImplementedError()\n\n def close(self) -> None:\n raise NotImplementedError()\n\n def _init_decoder(self) -> None:\n \"\"\"\n Set-up the _decoder attribute if necessary.\n \"\"\"\n # Note: content-encoding value should be case-insensitive, per RFC 7230\n # Section 3.2\n content_encoding = self.headers.get(\"content-encoding\", \"\").lower()\n if self._decoder is None:\n if content_encoding in self.CONTENT_DECODERS:\n self._decoder = _get_decoder(content_encoding)\n elif \",\" in content_encoding:\n encodings = [\n e.strip()\n for e in content_encoding.split(\",\")\n if e.strip() in self.CONTENT_DECODERS\n ]\n if encodings:\n self._decoder = _get_decoder(content_encoding)\n\n def _decode(\n self, data: bytes, decode_content: Optional[bool], flush_decoder: bool\n ) -> bytes:\n \"\"\"\n Decode the data passed in and potentially flush the decoder.\n \"\"\"\n if not decode_content:\n return data\n\n try:\n if self._decoder:\n data = self._decoder.decompress(data)\n except self.DECODER_ERROR_CLASSES as e:\n content_encoding = self.headers.get(\"content-encoding\", \"\").lower()\n raise DecodeError(\n \"Received response with content-encoding: %s, but \"\n \"failed to decode it.\" % content_encoding,\n e,\n ) from e\n if flush_decoder:\n data += self._flush_decoder()\n\n return data\n\n def _flush_decoder(self) -> bytes:\n \"\"\"\n Flushes the decoder. Should only be called if the decoder is actually\n being used.\n \"\"\"\n if self._decoder:\n return self._decoder.decompress(b\"\") + self._decoder.flush()\n return b\"\"\n\n # Compatibility methods for `io` module\n def readable(self) -> bool:\n return True\n\n def readinto(self, b: bytearray) -> int:\n temp = self.read(len(b))\n if len(temp) == 0:\n return 0\n else:\n b[: len(temp)] = temp\n return len(temp)\n\n # Compatibility methods for http.client.HTTPResponse\n def getheaders(self) -> List[Tuple[str, str]]:\n return list(self.headers.items())\n\n def getheader(self, name: str, default: Optional[str] = None) -> Optional[str]:\n return self.headers.get(name, default)\n\n # Compatibility method for http.cookiejar\n def info(self) -> HTTPHeaderDict:\n return self.headers\n\n def geturl(self) -> Optional[str]:\n return self.url\n\n\nclass HTTPResponse(BaseHTTPResponse):\n \"\"\"\n HTTP Response container.\n\n Backwards-compatible with :class:`http.client.HTTPResponse` but the response ``body`` is\n loaded and decoded on-demand when the ``data`` property is accessed. This\n class is also compatible with the Python standard library's :mod:`io`\n module, and can hence be treated as a readable object in the context of that\n framework.\n\n Extra parameters for behaviour not present in :class:`http.client.HTTPResponse`:\n\n :param preload_content:\n If True, the response's body will be preloaded during construction.\n\n :param decode_content:\n If True, will attempt to decode the body based on the\n 'content-encoding' header.\n\n :param original_response:\n When this HTTPResponse wrapper is generated from an :class:`http.client.HTTPResponse`\n object, it's convenient to include the original for debug purposes. It's\n otherwise unused.\n\n :param retries:\n The retries contains the last :class:`~urllib3.util.retry.Retry` that\n was used during the request.\n\n :param enforce_content_length:\n Enforce content length checking. Body returned by server must match\n value of Content-Length header, if present. Otherwise, raise error.\n \"\"\"\n\n def __init__(\n self,\n body: _TYPE_BODY = \"\",\n headers: Optional[Union[Mapping[str, str], Mapping[bytes, bytes]]] = None,\n status: int = 0,\n version: int = 0,\n reason: Optional[str] = None,\n preload_content: bool = True,\n decode_content: bool = True,\n original_response: Optional[_HttplibHTTPResponse] = None,\n pool: Optional[\"HTTPConnectionPool\"] = None,\n connection: Optional[HTTPConnection] = None,\n msg: Optional[_HttplibHTTPMessage] = None,\n retries: Optional[Retry] = None,\n enforce_content_length: bool = True,\n request_method: Optional[str] = None,\n request_url: Optional[str] = None,\n auto_close: bool = True,\n ) -> None:\n super().__init__(\n headers=headers,\n status=status,\n version=version,\n reason=reason,\n decode_content=decode_content,\n request_url=request_url,\n retries=retries,\n )\n\n self.enforce_content_length = enforce_content_length\n self.auto_close = auto_close\n\n self._body = None\n self._fp: Optional[_HttplibHTTPResponse] = None\n self._original_response = original_response\n self._fp_bytes_read = 0\n self.msg = msg\n if self.retries is not None and self.retries.history:\n self._request_url = self.retries.history[-1].redirect_location\n else:\n self._request_url = request_url\n\n if body and isinstance(body, (str, bytes)):\n self._body = body\n\n self._pool = pool\n self._connection = connection\n\n if hasattr(body, \"read\"):\n self._fp = body # type: ignore[assignment]\n\n # Are we using the chunked-style of transfer encoding?\n self.chunk_left: Optional[int] = None\n\n # Determine length of response\n self.length_remaining = self._init_length(request_method)\n\n # If requested, preload the body.\n if preload_content and not self._body:\n self._body = self.read(decode_content=decode_content)\n\n def release_conn(self) -> None:\n if not self._pool or not self._connection:\n return None\n\n self._pool._put_conn(self._connection)\n self._connection = None\n\n def drain_conn(self) -> None:\n \"\"\"\n Read and discard any remaining HTTP response data in the response connection.\n\n Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.\n \"\"\"\n try:\n self.read()\n except (HTTPError, OSError, BaseSSLError, HTTPException):\n pass\n\n @property\n def data(self) -> bytes:\n # For backwards-compat with earlier urllib3 0.4 and earlier.\n if self._body:\n return self._body # type: ignore[return-value]\n\n if self._fp:\n return self.read(cache_content=True)\n\n return None # type: ignore[return-value]\n\n @property\n def connection(self) -> Optional[HTTPConnection]:\n return self._connection\n\n def isclosed(self) -> bool:\n return is_fp_closed(self._fp)\n\n def tell(self) -> int:\n \"\"\"\n Obtain the number of bytes pulled over the wire so far. May differ from\n the amount of content returned by :meth:``urllib3.response.HTTPResponse.read``\n if bytes are encoded on the wire (e.g, compressed).\n \"\"\"\n return self._fp_bytes_read\n\n def _init_length(self, request_method: Optional[str]) -> Optional[int]:\n \"\"\"\n Set initial length value for Response content if available.\n \"\"\"\n length: Optional[int]\n content_length: Optional[str] = self.headers.get(\"content-length\")\n\n if content_length is not None:\n if self.chunked:\n # This Response will fail with an IncompleteRead if it can't be\n # received as chunked. This method falls back to attempt reading\n # the response before raising an exception.\n log.warning(\n \"Received response with both Content-Length and \"\n \"Transfer-Encoding set. This is expressly forbidden \"\n \"by RFC 7230 sec 3.3.2. Ignoring Content-Length and \"\n \"attempting to process response as Transfer-Encoding: \"\n \"chunked.\"\n )\n return None\n\n try:\n # RFC 7230 section 3.3.2 specifies multiple content lengths can\n # be sent in a single Content-Length header\n # (e.g. Content-Length: 42, 42). This line ensures the values\n # are all valid ints and that as long as the `set` length is 1,\n # all values are the same. Otherwise, the header is invalid.\n lengths = {int(val) for val in content_length.split(\",\")}\n if len(lengths) > 1:\n raise InvalidHeader(\n \"Content-Length contained multiple \"\n \"unmatching values (%s)\" % content_length\n )\n length = lengths.pop()\n except ValueError:\n length = None\n else:\n if length < 0:\n length = None\n\n else: # if content_length is None\n length = None\n\n # Convert status to int for comparison\n # In some cases, httplib returns a status of \"_UNKNOWN\"\n try:\n status = int(self.status)\n except ValueError:\n status = 0\n\n # Check for responses that shouldn't include a body\n if status in (204, 304) or 100 <= status < 200 or request_method == \"HEAD\":\n length = 0\n\n return length\n\n @contextmanager\n def _error_catcher(self) -> Generator[None, None, None]:\n \"\"\"\n Catch low-level python exceptions, instead re-raising urllib3\n variants, so that low-level exceptions are not leaked in the\n high-level api.\n\n On exit, release the connection back to the pool.\n \"\"\"\n clean_exit = False\n\n try:\n try:\n yield\n\n except SocketTimeout as e:\n # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but\n # there is yet no clean way to get at it from this context.\n raise ReadTimeoutError(self._pool, None, \"Read timed out.\") from e # type: ignore[arg-type]\n\n except BaseSSLError as e:\n # FIXME: Is there a better way to differentiate between SSLErrors?\n if \"read operation timed out\" not in str(e):\n # SSL errors related to framing/MAC get wrapped and reraised here\n raise SSLError(e) from e\n\n raise ReadTimeoutError(self._pool, None, \"Read timed out.\") from e # type: ignore[arg-type]\n\n except (HTTPException, OSError) as e:\n # This includes IncompleteRead.\n raise ProtocolError(f\"Connection broken: {e!r}\", e) from e\n\n # If no exception is thrown, we should avoid cleaning up\n # unnecessarily.\n clean_exit = True\n finally:\n # If we didn't terminate cleanly, we need to throw away our\n # connection.\n if not clean_exit:\n # The response may not be closed but we're not going to use it\n # anymore so close it now to ensure that the connection is\n # released back to the pool.\n if self._original_response:\n self._original_response.close()\n\n # Closing the response may not actually be sufficient to close\n # everything, so if we have a hold of the connection close that\n # too.\n if self._connection:\n self._connection.close()\n\n # If we hold the original response but it's closed now, we should\n # return the connection back to the pool.\n if self._original_response and self._original_response.isclosed():\n self.release_conn()\n\n def read(\n self,\n amt: Optional[int] = None,\n decode_content: Optional[bool] = None,\n cache_content: bool = False,\n ) -> bytes:\n \"\"\"\n Similar to :meth:`http.client.HTTPResponse.read`, but with two additional\n parameters: ``decode_content`` and ``cache_content``.\n\n :param amt:\n How much of the content to read. If specified, caching is skipped\n because it doesn't make sense to cache partial content as the full\n response.\n\n :param decode_content:\n If True, will attempt to decode the body based on the\n 'content-encoding' header.\n\n :param cache_content:\n If True, will save the returned data such that the same result is\n returned despite of the state of the underlying file object. This\n is useful if you want the ``.data`` property to continue working\n after having ``.read()`` the file object. (Overridden if ``amt`` is\n set.)\n \"\"\"\n self._init_decoder()\n if decode_content is None:\n decode_content = self.decode_content\n\n if self._fp is None:\n return None # type: ignore[return-value]\n\n flush_decoder = False\n fp_closed = getattr(self._fp, \"closed\", False)\n\n with self._error_catcher():\n if amt is None:\n # cStringIO doesn't like amt=None\n data = self._fp.read() if not fp_closed else b\"\"\n flush_decoder = True\n else:\n cache_content = False\n data = self._fp.read(amt) if not fp_closed else b\"\"\n if (\n amt != 0 and not data\n ): # Platform-specific: Buggy versions of Python.\n # Close the connection when no data is returned\n #\n # This is redundant to what httplib/http.client _should_\n # already do. However, versions of python released before\n # December 15, 2012 (http://bugs.python.org/issue16298) do\n # not properly close the connection in all cases. There is\n # no harm in redundantly calling close.\n self._fp.close()\n flush_decoder = True\n if (\n self.enforce_content_length\n and self.length_remaining is not None\n and self.length_remaining != 0\n ):\n # This is an edge case that httplib failed to cover due\n # to concerns of backward compatibility. We're\n # addressing it here to make sure IncompleteRead is\n # raised during streaming, so all calls with incorrect\n # Content-Length are caught.\n raise IncompleteRead(self._fp_bytes_read, self.length_remaining)\n\n if data:\n self._fp_bytes_read += len(data)\n if self.length_remaining is not None:\n self.length_remaining -= len(data)\n\n data = self._decode(data, decode_content, flush_decoder)\n\n if cache_content:\n self._body = data\n\n return data\n\n def stream(\n self, amt: Optional[int] = 2**16, decode_content: Optional[bool] = None\n ) -> Generator[bytes, None, None]:\n \"\"\"\n A generator wrapper for the read() method. A call will block until\n ``amt`` bytes have been read from the connection or until the\n connection is closed.\n\n :param amt:\n How much of the content to read. The generator will return up to\n much data per iteration, but may return less. This is particularly\n likely when using compressed data. However, the empty string will\n never be returned.\n\n :param decode_content:\n If True, will attempt to decode the body based on the\n 'content-encoding' header.\n \"\"\"\n if self.chunked and self.supports_chunked_reads():\n yield from self.read_chunked(amt, decode_content=decode_content)\n else:\n while not is_fp_closed(self._fp):\n data = self.read(amt=amt, decode_content=decode_content)\n\n if data:\n yield data\n\n @classmethod\n def from_httplib(\n ResponseCls: Type[\"HTTPResponse\"], r: _HttplibHTTPResponse, **response_kw: Any\n ) -> \"HTTPResponse\":\n \"\"\"\n Given an :class:`http.client.HTTPResponse` instance ``r``, return a\n corresponding :class:`urllib3.response.HTTPResponse` object.\n\n Remaining parameters are passed to the HTTPResponse constructor, along\n with ``original_response=r``.\n \"\"\"\n headers = r.msg\n\n if not isinstance(headers, HTTPHeaderDict):\n headers = HTTPHeaderDict(headers.items()) # type: ignore[assignment]\n\n resp = ResponseCls(\n body=r,\n headers=headers, # type: ignore[arg-type]\n status=r.status,\n version=r.version,\n reason=r.reason,\n original_response=r,\n **response_kw,\n )\n return resp\n\n # Overrides from io.IOBase\n def close(self) -> None:\n if not self.closed and self._fp:\n self._fp.close()\n\n if self._connection:\n self._connection.close()\n\n if not self.auto_close:\n io.IOBase.close(self)\n\n @property\n def closed(self) -> bool:\n if not self.auto_close:\n return io.IOBase.closed.__get__(self) # type: ignore[no-any-return]\n elif self._fp is None:\n return True\n elif hasattr(self._fp, \"isclosed\"):\n return self._fp.isclosed()\n elif hasattr(self._fp, \"closed\"):\n return self._fp.closed\n else:\n return True\n\n def fileno(self) -> int:\n if self._fp is None:\n raise OSError(\"HTTPResponse has no file to get a fileno from\")\n elif hasattr(self._fp, \"fileno\"):\n return self._fp.fileno()\n else:\n raise OSError(\n \"The file-like object this HTTPResponse is wrapped \"\n \"around has no file descriptor\"\n )\n\n def flush(self) -> None:\n if (\n self._fp is not None\n and hasattr(self._fp, \"flush\")\n and not getattr(self._fp, \"closed\", False)\n ):\n return self._fp.flush()\n\n def supports_chunked_reads(self) -> bool:\n \"\"\"\n Checks if the underlying file-like object looks like a\n :class:`http.client.HTTPResponse` object. We do this by testing for\n the fp attribute. If it is present we assume it returns raw chunks as\n processed by read_chunked().\n \"\"\"\n return hasattr(self._fp, \"fp\")\n\n def _update_chunk_length(self) -> None:\n # First, we'll figure out length of a chunk and then\n # we'll try to read it from socket.\n if self.chunk_left is not None:\n return None\n line = self._fp.fp.readline() # type: ignore[union-attr]\n line = line.split(b\";\", 1)[0]\n try:\n self.chunk_left = int(line, 16)\n except ValueError:\n # Invalid chunked protocol response, abort.\n self.close()\n raise InvalidChunkLength(self, line) from None\n\n def _handle_chunk(self, amt: Optional[int]) -> bytes:\n returned_chunk = None\n if amt is None:\n chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr]\n returned_chunk = chunk\n self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.\n self.chunk_left = None\n elif self.chunk_left is not None and amt < self.chunk_left:\n value = self._fp._safe_read(amt) # type: ignore[union-attr]\n self.chunk_left = self.chunk_left - amt\n returned_chunk = value\n elif amt == self.chunk_left:\n value = self._fp._safe_read(amt) # type: ignore[union-attr]\n self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.\n self.chunk_left = None\n returned_chunk = value\n else: # amt > self.chunk_left\n returned_chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr]\n self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.\n self.chunk_left = None\n return returned_chunk # type: ignore[no-any-return]\n\n def read_chunked(\n self, amt: Optional[int] = None, decode_content: Optional[bool] = None\n ) -> Generator[bytes, None, None]:\n \"\"\"\n Similar to :meth:`HTTPResponse.read`, but with an additional\n parameter: ``decode_content``.\n\n :param amt:\n How much of the content to read. If specified, caching is skipped\n because it doesn't make sense to cache partial content as the full\n response.\n\n :param decode_content:\n If True, will attempt to decode the body based on the\n 'content-encoding' header.\n \"\"\"\n self._init_decoder()\n # FIXME: Rewrite this method and make it a class with a better structured logic.\n if not self.chunked:\n raise ResponseNotChunked(\n \"Response is not chunked. \"\n \"Header 'transfer-encoding: chunked' is missing.\"\n )\n if not self.supports_chunked_reads():\n raise BodyNotHttplibCompatible(\n \"Body should be http.client.HTTPResponse like. \"\n \"It should have have an fp attribute which returns raw chunks.\"\n )\n\n with self._error_catcher():\n # Don't bother reading the body of a HEAD request.\n if self._original_response and is_response_to_head(self._original_response):\n self._original_response.close()\n return None\n\n # If a response is already read and closed\n # then return immediately.\n if self._fp.fp is None: # type: ignore[union-attr]\n return None\n\n while True:\n self._update_chunk_length()\n if self.chunk_left == 0:\n break\n chunk = self._handle_chunk(amt)\n decoded = self._decode(\n chunk, decode_content=decode_content, flush_decoder=False\n )\n if decoded:\n yield decoded\n\n if decode_content:\n # On CPython and PyPy, we should never need to flush the\n # decoder. However, on Jython we *might* need to, so\n # lets defensively do it anyway.\n decoded = self._flush_decoder()\n if decoded: # Platform-specific: Jython.\n yield decoded\n\n # Chunk content ends with \\r\\n: discard it.\n while self._fp is not None:\n line = self._fp.fp.readline()\n if not line:\n # Some sites may not end with '\\r\\n'.\n break\n if line == b\"\\r\\n\":\n break\n\n # We read everything; close the \"file\".\n if self._original_response:\n self._original_response.close()\n\n @property\n def url(self) -> Optional[str]:\n \"\"\"\n Returns the URL that was the source of this response.\n If the request that generated this response redirected, this method\n will return the final redirect location.\n \"\"\"\n return self._request_url\n\n @url.setter\n def url(self, url: str) -> None:\n self._request_url = url\n\n def __iter__(self) -> Iterator[bytes]:\n buffer: List[bytes] = []\n for chunk in self.stream(decode_content=True):\n if b\"\\n\" in chunk:\n chunks = chunk.split(b\"\\n\")\n yield b\"\".join(buffer) + chunks[0] + b\"\\n\"\n for x in chunks[1:-1]:\n yield x + b\"\\n\"\n if chunks[-1]:\n buffer = [chunks[-1]]\n else:\n buffer = []\n else:\n buffer.append(chunk)\n if buffer:\n yield b\"\".join(buffer)\n", "path": "src/urllib3/response.py" } ]
diff --git a/src/urllib3/response.py b/src/urllib3/response.py index 8690d2d7a8..448627c79e 100644 --- a/src/urllib3/response.py +++ b/src/urllib3/response.py @@ -411,7 +411,7 @@ def getheader(self, name: str, default: Optional[str] = None) -> Optional[str]: def info(self) -> HTTPHeaderDict: return self.headers - def geturl(self) -> Optional[Union[str, "Literal[False]"]]: + def geturl(self) -> Optional[str]: return self.url
python-pillow__Pillow-4788
PSD Plugin does not register a MIME type The [`PSDImagePlugin`](https://github.com/python-pillow/Pillow/blob/master/src/PIL/PsdImagePlugin.py) does not register a MIME type as I'd expect it to. The correct MIME for PSD images, according to IANA, is ["image/vnd.adobe.photoshop"](https://www.iana.org/assignments/media-types/image/vnd.adobe.photoshop). Is there a reason this isn't registered? PSD Plugin does not register a MIME type The [`PSDImagePlugin`](https://github.com/python-pillow/Pillow/blob/master/src/PIL/PsdImagePlugin.py) does not register a MIME type as I'd expect it to. The correct MIME for PSD images, according to IANA, is ["image/vnd.adobe.photoshop"](https://www.iana.org/assignments/media-types/image/vnd.adobe.photoshop). Is there a reason this isn't registered?
[ { "content": "#\n# The Python Imaging Library\n# $Id$\n#\n# Adobe PSD 2.5/3.0 file handling\n#\n# History:\n# 1995-09-01 fl Created\n# 1997-01-03 fl Read most PSD images\n# 1997-01-18 fl Fixed P and CMYK support\n# 2001-10-21 fl Added seek/tell support (for layers)\n#\n# Copyright (c) 1997-2001 by Secret Labs AB.\n# Copyright (c) 1995-2001 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nimport io\n\nfrom . import Image, ImageFile, ImagePalette\nfrom ._binary import i8, i16be as i16, i32be as i32\n\nMODES = {\n # (photoshop mode, bits) -> (pil mode, required channels)\n (0, 1): (\"1\", 1),\n (0, 8): (\"L\", 1),\n (1, 8): (\"L\", 1),\n (2, 8): (\"P\", 1),\n (3, 8): (\"RGB\", 3),\n (4, 8): (\"CMYK\", 4),\n (7, 8): (\"L\", 1), # FIXME: multilayer\n (8, 8): (\"L\", 1), # duotone\n (9, 8): (\"LAB\", 3),\n}\n\n\n# --------------------------------------------------------------------.\n# read PSD images\n\n\ndef _accept(prefix):\n return prefix[:4] == b\"8BPS\"\n\n\n##\n# Image plugin for Photoshop images.\n\n\nclass PsdImageFile(ImageFile.ImageFile):\n\n format = \"PSD\"\n format_description = \"Adobe Photoshop\"\n _close_exclusive_fp_after_loading = False\n\n def _open(self):\n\n read = self.fp.read\n\n #\n # header\n\n s = read(26)\n if not _accept(s) or i16(s[4:]) != 1:\n raise SyntaxError(\"not a PSD file\")\n\n psd_bits = i16(s[22:])\n psd_channels = i16(s[12:])\n psd_mode = i16(s[24:])\n\n mode, channels = MODES[(psd_mode, psd_bits)]\n\n if channels > psd_channels:\n raise OSError(\"not enough channels\")\n\n self.mode = mode\n self._size = i32(s[18:]), i32(s[14:])\n\n #\n # color mode data\n\n size = i32(read(4))\n if size:\n data = read(size)\n if mode == \"P\" and size == 768:\n self.palette = ImagePalette.raw(\"RGB;L\", data)\n\n #\n # image resources\n\n self.resources = []\n\n size = i32(read(4))\n if size:\n # load resources\n end = self.fp.tell() + size\n while self.fp.tell() < end:\n read(4) # signature\n id = i16(read(2))\n name = read(i8(read(1)))\n if not (len(name) & 1):\n read(1) # padding\n data = read(i32(read(4)))\n if len(data) & 1:\n read(1) # padding\n self.resources.append((id, name, data))\n if id == 1039: # ICC profile\n self.info[\"icc_profile\"] = data\n\n #\n # layer and mask information\n\n self.layers = []\n\n size = i32(read(4))\n if size:\n end = self.fp.tell() + size\n size = i32(read(4))\n if size:\n self.layers = _layerinfo(self.fp)\n self.fp.seek(end)\n self.n_frames = len(self.layers)\n self.is_animated = self.n_frames > 1\n\n #\n # image descriptor\n\n self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels)\n\n # keep the file open\n self.__fp = self.fp\n self.frame = 1\n self._min_frame = 1\n\n def seek(self, layer):\n if not self._seek_check(layer):\n return\n\n # seek to given layer (1..max)\n try:\n name, mode, bbox, tile = self.layers[layer - 1]\n self.mode = mode\n self.tile = tile\n self.frame = layer\n self.fp = self.__fp\n return name, bbox\n except IndexError as e:\n raise EOFError(\"no such layer\") from e\n\n def tell(self):\n # return layer number (0=image, 1..max=layers)\n return self.frame\n\n def load_prepare(self):\n # create image memory if necessary\n if not self.im or self.im.mode != self.mode or self.im.size != self.size:\n self.im = Image.core.fill(self.mode, self.size, 0)\n # create palette (optional)\n if self.mode == \"P\":\n Image.Image.load(self)\n\n def _close__fp(self):\n try:\n if self.__fp != self.fp:\n self.__fp.close()\n except AttributeError:\n pass\n finally:\n self.__fp = None\n\n\ndef _layerinfo(file):\n # read layerinfo block\n layers = []\n read = file.read\n for i in range(abs(i16(read(2)))):\n\n # bounding box\n y0 = i32(read(4))\n x0 = i32(read(4))\n y1 = i32(read(4))\n x1 = i32(read(4))\n\n # image info\n info = []\n mode = []\n types = list(range(i16(read(2))))\n if len(types) > 4:\n continue\n\n for i in types:\n type = i16(read(2))\n\n if type == 65535:\n m = \"A\"\n else:\n m = \"RGBA\"[type]\n\n mode.append(m)\n size = i32(read(4))\n info.append((m, size))\n\n # figure out the image mode\n mode.sort()\n if mode == [\"R\"]:\n mode = \"L\"\n elif mode == [\"B\", \"G\", \"R\"]:\n mode = \"RGB\"\n elif mode == [\"A\", \"B\", \"G\", \"R\"]:\n mode = \"RGBA\"\n else:\n mode = None # unknown\n\n # skip over blend flags and extra information\n read(12) # filler\n name = \"\"\n size = i32(read(4)) # length of the extra data field\n combined = 0\n if size:\n data_end = file.tell() + size\n\n length = i32(read(4))\n if length:\n file.seek(length - 16, io.SEEK_CUR)\n combined += length + 4\n\n length = i32(read(4))\n if length:\n file.seek(length, io.SEEK_CUR)\n combined += length + 4\n\n length = i8(read(1))\n if length:\n # Don't know the proper encoding,\n # Latin-1 should be a good guess\n name = read(length).decode(\"latin-1\", \"replace\")\n combined += length + 1\n\n file.seek(data_end)\n layers.append((name, mode, (x0, y0, x1, y1)))\n\n # get tiles\n i = 0\n for name, mode, bbox in layers:\n tile = []\n for m in mode:\n t = _maketile(file, m, bbox, 1)\n if t:\n tile.extend(t)\n layers[i] = name, mode, bbox, tile\n i += 1\n\n return layers\n\n\ndef _maketile(file, mode, bbox, channels):\n\n tile = None\n read = file.read\n\n compression = i16(read(2))\n\n xsize = bbox[2] - bbox[0]\n ysize = bbox[3] - bbox[1]\n\n offset = file.tell()\n\n if compression == 0:\n #\n # raw compression\n tile = []\n for channel in range(channels):\n layer = mode[channel]\n if mode == \"CMYK\":\n layer += \";I\"\n tile.append((\"raw\", bbox, offset, layer))\n offset = offset + xsize * ysize\n\n elif compression == 1:\n #\n # packbits compression\n i = 0\n tile = []\n bytecount = read(channels * ysize * 2)\n offset = file.tell()\n for channel in range(channels):\n layer = mode[channel]\n if mode == \"CMYK\":\n layer += \";I\"\n tile.append((\"packbits\", bbox, offset, layer))\n for y in range(ysize):\n offset = offset + i16(bytecount[i : i + 2])\n i += 2\n\n file.seek(offset)\n\n if offset & 1:\n read(1) # padding\n\n return tile\n\n\n# --------------------------------------------------------------------\n# registry\n\n\nImage.register_open(PsdImageFile.format, PsdImageFile, _accept)\n\nImage.register_extension(PsdImageFile.format, \".psd\")\n", "path": "src/PIL/PsdImagePlugin.py" } ]
[ { "content": "#\n# The Python Imaging Library\n# $Id$\n#\n# Adobe PSD 2.5/3.0 file handling\n#\n# History:\n# 1995-09-01 fl Created\n# 1997-01-03 fl Read most PSD images\n# 1997-01-18 fl Fixed P and CMYK support\n# 2001-10-21 fl Added seek/tell support (for layers)\n#\n# Copyright (c) 1997-2001 by Secret Labs AB.\n# Copyright (c) 1995-2001 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nimport io\n\nfrom . import Image, ImageFile, ImagePalette\nfrom ._binary import i8, i16be as i16, i32be as i32\n\nMODES = {\n # (photoshop mode, bits) -> (pil mode, required channels)\n (0, 1): (\"1\", 1),\n (0, 8): (\"L\", 1),\n (1, 8): (\"L\", 1),\n (2, 8): (\"P\", 1),\n (3, 8): (\"RGB\", 3),\n (4, 8): (\"CMYK\", 4),\n (7, 8): (\"L\", 1), # FIXME: multilayer\n (8, 8): (\"L\", 1), # duotone\n (9, 8): (\"LAB\", 3),\n}\n\n\n# --------------------------------------------------------------------.\n# read PSD images\n\n\ndef _accept(prefix):\n return prefix[:4] == b\"8BPS\"\n\n\n##\n# Image plugin for Photoshop images.\n\n\nclass PsdImageFile(ImageFile.ImageFile):\n\n format = \"PSD\"\n format_description = \"Adobe Photoshop\"\n _close_exclusive_fp_after_loading = False\n\n def _open(self):\n\n read = self.fp.read\n\n #\n # header\n\n s = read(26)\n if not _accept(s) or i16(s[4:]) != 1:\n raise SyntaxError(\"not a PSD file\")\n\n psd_bits = i16(s[22:])\n psd_channels = i16(s[12:])\n psd_mode = i16(s[24:])\n\n mode, channels = MODES[(psd_mode, psd_bits)]\n\n if channels > psd_channels:\n raise OSError(\"not enough channels\")\n\n self.mode = mode\n self._size = i32(s[18:]), i32(s[14:])\n\n #\n # color mode data\n\n size = i32(read(4))\n if size:\n data = read(size)\n if mode == \"P\" and size == 768:\n self.palette = ImagePalette.raw(\"RGB;L\", data)\n\n #\n # image resources\n\n self.resources = []\n\n size = i32(read(4))\n if size:\n # load resources\n end = self.fp.tell() + size\n while self.fp.tell() < end:\n read(4) # signature\n id = i16(read(2))\n name = read(i8(read(1)))\n if not (len(name) & 1):\n read(1) # padding\n data = read(i32(read(4)))\n if len(data) & 1:\n read(1) # padding\n self.resources.append((id, name, data))\n if id == 1039: # ICC profile\n self.info[\"icc_profile\"] = data\n\n #\n # layer and mask information\n\n self.layers = []\n\n size = i32(read(4))\n if size:\n end = self.fp.tell() + size\n size = i32(read(4))\n if size:\n self.layers = _layerinfo(self.fp)\n self.fp.seek(end)\n self.n_frames = len(self.layers)\n self.is_animated = self.n_frames > 1\n\n #\n # image descriptor\n\n self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels)\n\n # keep the file open\n self.__fp = self.fp\n self.frame = 1\n self._min_frame = 1\n\n def seek(self, layer):\n if not self._seek_check(layer):\n return\n\n # seek to given layer (1..max)\n try:\n name, mode, bbox, tile = self.layers[layer - 1]\n self.mode = mode\n self.tile = tile\n self.frame = layer\n self.fp = self.__fp\n return name, bbox\n except IndexError as e:\n raise EOFError(\"no such layer\") from e\n\n def tell(self):\n # return layer number (0=image, 1..max=layers)\n return self.frame\n\n def load_prepare(self):\n # create image memory if necessary\n if not self.im or self.im.mode != self.mode or self.im.size != self.size:\n self.im = Image.core.fill(self.mode, self.size, 0)\n # create palette (optional)\n if self.mode == \"P\":\n Image.Image.load(self)\n\n def _close__fp(self):\n try:\n if self.__fp != self.fp:\n self.__fp.close()\n except AttributeError:\n pass\n finally:\n self.__fp = None\n\n\ndef _layerinfo(file):\n # read layerinfo block\n layers = []\n read = file.read\n for i in range(abs(i16(read(2)))):\n\n # bounding box\n y0 = i32(read(4))\n x0 = i32(read(4))\n y1 = i32(read(4))\n x1 = i32(read(4))\n\n # image info\n info = []\n mode = []\n types = list(range(i16(read(2))))\n if len(types) > 4:\n continue\n\n for i in types:\n type = i16(read(2))\n\n if type == 65535:\n m = \"A\"\n else:\n m = \"RGBA\"[type]\n\n mode.append(m)\n size = i32(read(4))\n info.append((m, size))\n\n # figure out the image mode\n mode.sort()\n if mode == [\"R\"]:\n mode = \"L\"\n elif mode == [\"B\", \"G\", \"R\"]:\n mode = \"RGB\"\n elif mode == [\"A\", \"B\", \"G\", \"R\"]:\n mode = \"RGBA\"\n else:\n mode = None # unknown\n\n # skip over blend flags and extra information\n read(12) # filler\n name = \"\"\n size = i32(read(4)) # length of the extra data field\n combined = 0\n if size:\n data_end = file.tell() + size\n\n length = i32(read(4))\n if length:\n file.seek(length - 16, io.SEEK_CUR)\n combined += length + 4\n\n length = i32(read(4))\n if length:\n file.seek(length, io.SEEK_CUR)\n combined += length + 4\n\n length = i8(read(1))\n if length:\n # Don't know the proper encoding,\n # Latin-1 should be a good guess\n name = read(length).decode(\"latin-1\", \"replace\")\n combined += length + 1\n\n file.seek(data_end)\n layers.append((name, mode, (x0, y0, x1, y1)))\n\n # get tiles\n i = 0\n for name, mode, bbox in layers:\n tile = []\n for m in mode:\n t = _maketile(file, m, bbox, 1)\n if t:\n tile.extend(t)\n layers[i] = name, mode, bbox, tile\n i += 1\n\n return layers\n\n\ndef _maketile(file, mode, bbox, channels):\n\n tile = None\n read = file.read\n\n compression = i16(read(2))\n\n xsize = bbox[2] - bbox[0]\n ysize = bbox[3] - bbox[1]\n\n offset = file.tell()\n\n if compression == 0:\n #\n # raw compression\n tile = []\n for channel in range(channels):\n layer = mode[channel]\n if mode == \"CMYK\":\n layer += \";I\"\n tile.append((\"raw\", bbox, offset, layer))\n offset = offset + xsize * ysize\n\n elif compression == 1:\n #\n # packbits compression\n i = 0\n tile = []\n bytecount = read(channels * ysize * 2)\n offset = file.tell()\n for channel in range(channels):\n layer = mode[channel]\n if mode == \"CMYK\":\n layer += \";I\"\n tile.append((\"packbits\", bbox, offset, layer))\n for y in range(ysize):\n offset = offset + i16(bytecount[i : i + 2])\n i += 2\n\n file.seek(offset)\n\n if offset & 1:\n read(1) # padding\n\n return tile\n\n\n# --------------------------------------------------------------------\n# registry\n\n\nImage.register_open(PsdImageFile.format, PsdImageFile, _accept)\n\nImage.register_extension(PsdImageFile.format, \".psd\")\n\nImage.register_mime(PsdImageFile.format, \"image/vnd.adobe.photoshop\")\n", "path": "src/PIL/PsdImagePlugin.py" } ]
diff --git a/Tests/test_file_psd.py b/Tests/test_file_psd.py index 011efc9773e..6b26fe44288 100644 --- a/Tests/test_file_psd.py +++ b/Tests/test_file_psd.py @@ -12,6 +12,7 @@ def test_sanity(): assert im.mode == "RGB" assert im.size == (128, 128) assert im.format == "PSD" + assert im.get_format_mimetype() == "image/vnd.adobe.photoshop" im2 = hopper() assert_image_similar(im, im2, 4.8) diff --git a/src/PIL/PsdImagePlugin.py b/src/PIL/PsdImagePlugin.py index f019bb64eb1..80bc116fc14 100644 --- a/src/PIL/PsdImagePlugin.py +++ b/src/PIL/PsdImagePlugin.py @@ -307,3 +307,5 @@ def _maketile(file, mode, bbox, channels): Image.register_open(PsdImageFile.format, PsdImageFile, _accept) Image.register_extension(PsdImageFile.format, ".psd") + +Image.register_mime(PsdImageFile.format, "image/vnd.adobe.photoshop")
getredash__redash-2501
Non blocking widget refresh indicator When refreshing a dashboard widget the previous results are hidden by the refresh animation. This can be an issue when refreshing a dashboard frequently, as you might happen to see the spinner for long period of times. To solve this we can keep showing the old data until new one is available, while showing some indication that refresh is in progress. Is the following animation enough? ![](http://g.recordit.co/CyccMD6dFc.gif) After refreshing a dashboard, widgets become draggable even when not in edit mode
[ { "content": "import json\n\nfrom flask import request\nfrom redash import models\nfrom redash.handlers.base import BaseResource\nfrom redash.permissions import (require_access,\n require_object_modify_permission,\n require_permission, view_only)\n\n\nclass WidgetListResource(BaseResource):\n @require_permission('edit_dashboard')\n def post(self):\n \"\"\"\n Add a widget to a dashboard.\n\n :<json number dashboard_id: The ID for the dashboard being added to\n :<json visualization_id: The ID of the visualization to put in this widget\n :<json object options: Widget options\n :<json string text: Text box contents\n :<json number width: Width for widget display\n\n :>json object widget: The created widget\n \"\"\"\n widget_properties = request.get_json(force=True)\n dashboard = models.Dashboard.get_by_id_and_org(widget_properties.pop('dashboard_id'), self.current_org)\n require_object_modify_permission(dashboard, self.current_user)\n\n widget_properties['options'] = json.dumps(widget_properties['options'])\n widget_properties.pop('id', None)\n widget_properties['dashboard'] = dashboard\n\n visualization_id = widget_properties.pop('visualization_id')\n if visualization_id:\n visualization = models.Visualization.get_by_id_and_org(visualization_id, self.current_org)\n require_access(visualization.query_rel.groups, self.current_user, view_only)\n else:\n visualization = None\n\n widget_properties['visualization'] = visualization\n\n widget = models.Widget(**widget_properties)\n models.db.session.add(widget)\n models.db.session.commit()\n\n models.db.session.commit()\n return {'widget': widget.to_dict()}\n\n\nclass WidgetResource(BaseResource):\n @require_permission('edit_dashboard')\n def post(self, widget_id):\n \"\"\"\n Updates a widget in a dashboard.\n This method currently handles Text Box widgets only.\n\n :param number widget_id: The ID of the widget to modify\n\n :<json string text: The new contents of the text box\n \"\"\"\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n require_object_modify_permission(widget.dashboard, self.current_user)\n widget_properties = request.get_json(force=True)\n widget.text = widget_properties['text']\n widget.options = json.dumps(widget_properties['options'])\n models.db.session.commit()\n return widget.to_dict()\n\n @require_permission('edit_dashboard')\n def delete(self, widget_id):\n \"\"\"\n Remove a widget from a dashboard.\n\n :param number widget_id: ID of widget to remove\n \"\"\"\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n require_object_modify_permission(widget.dashboard, self.current_user)\n models.db.session.delete(widget)\n models.db.session.commit()\n", "path": "redash/handlers/widgets.py" } ]
[ { "content": "import json\n\nfrom flask import request\nfrom redash import models\nfrom redash.handlers.base import BaseResource\nfrom redash.permissions import (require_access,\n require_object_modify_permission,\n require_permission, view_only)\n\n\nclass WidgetListResource(BaseResource):\n @require_permission('edit_dashboard')\n def post(self):\n \"\"\"\n Add a widget to a dashboard.\n\n :<json number dashboard_id: The ID for the dashboard being added to\n :<json visualization_id: The ID of the visualization to put in this widget\n :<json object options: Widget options\n :<json string text: Text box contents\n :<json number width: Width for widget display\n\n :>json object widget: The created widget\n \"\"\"\n widget_properties = request.get_json(force=True)\n dashboard = models.Dashboard.get_by_id_and_org(widget_properties.pop('dashboard_id'), self.current_org)\n require_object_modify_permission(dashboard, self.current_user)\n\n widget_properties['options'] = json.dumps(widget_properties['options'])\n widget_properties.pop('id', None)\n widget_properties['dashboard'] = dashboard\n\n visualization_id = widget_properties.pop('visualization_id')\n if visualization_id:\n visualization = models.Visualization.get_by_id_and_org(visualization_id, self.current_org)\n require_access(visualization.query_rel.groups, self.current_user, view_only)\n else:\n visualization = None\n\n widget_properties['visualization'] = visualization\n\n widget = models.Widget(**widget_properties)\n models.db.session.add(widget)\n models.db.session.commit()\n\n models.db.session.commit()\n return widget.to_dict()\n\n\nclass WidgetResource(BaseResource):\n @require_permission('edit_dashboard')\n def post(self, widget_id):\n \"\"\"\n Updates a widget in a dashboard.\n This method currently handles Text Box widgets only.\n\n :param number widget_id: The ID of the widget to modify\n\n :<json string text: The new contents of the text box\n \"\"\"\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n require_object_modify_permission(widget.dashboard, self.current_user)\n widget_properties = request.get_json(force=True)\n widget.text = widget_properties['text']\n widget.options = json.dumps(widget_properties['options'])\n models.db.session.commit()\n return widget.to_dict()\n\n @require_permission('edit_dashboard')\n def delete(self, widget_id):\n \"\"\"\n Remove a widget from a dashboard.\n\n :param number widget_id: ID of widget to remove\n \"\"\"\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n require_object_modify_permission(widget.dashboard, self.current_user)\n models.db.session.delete(widget)\n models.db.session.commit()\n", "path": "redash/handlers/widgets.py" } ]
diff --git a/client/app/components/dashboards/add-widget-dialog.js b/client/app/components/dashboards/add-widget-dialog.js index 12796e938c..055055928a 100644 --- a/client/app/components/dashboards/add-widget-dialog.js +++ b/client/app/components/dashboards/add-widget-dialog.js @@ -94,11 +94,10 @@ const AddWidgetDialog = { widget.options.position.col = position.col; widget.options.position.row = position.row; - widget.$save() - .then((response) => { - // update dashboard layout - this.dashboard.version = response.version; - this.dashboard.widgets.push(new Widget(response.widget)); + widget + .save() + .then(() => { + this.dashboard.widgets.push(widget); this.close(); }) .catch(() => { diff --git a/client/app/components/dashboards/widget.html b/client/app/components/dashboards/widget.html index 8f84c98373..ccfdb36353 100644 --- a/client/app/components/dashboards/widget.html +++ b/client/app/components/dashboards/widget.html @@ -10,7 +10,7 @@ </div> <div class="dropdown pull-right widget-menu-regular" ng-if="!$ctrl.public" uib-dropdown> <div class="actions"> - <a data-toggle="dropdown" uib-dropdown-toggle><i class="zmdi zmdi-more"></i></a> + <a data-toggle="dropdown" uib-dropdown-toggle><i class="zmdi zmdi-more-vert"></i></a> </div> <ul class="dropdown-menu pull-right" uib-dropdown-menu style="z-index:1000000"> @@ -51,8 +51,10 @@ </div> <div class="body-row clearfix tile__bottom-control"> - <a class="small hidden-print" ng-click="$ctrl.reload(true)" ng-if="!$ctrl.public"> - <i class="zmdi zmdi-time-restore"></i> <span am-time-ago="$ctrl.widget.getQueryResult().getUpdatedAt()"></span> + <a class="small hidden-print" ng-click="$ctrl.refresh()" ng-if="!$ctrl.public"> + <i ng-class='{"zmdi-hc-spin": $ctrl.widget.loading}' class="zmdi zmdi-refresh"></i> + <span am-time-ago="$ctrl.widget.getQueryResult().getUpdatedAt()" ng-if="!$ctrl.widget.loading"></span> + <rd-timer timestamp="$ctrl.widget.refreshStartedAt" ng-if="$ctrl.widget.loading"></rd-timer> </a> <span class="small hidden-print" ng-if="$ctrl.public"> <i class="zmdi zmdi-time-restore"></i> <span am-time-ago="$ctrl.widget.getQueryResult().getUpdatedAt()"></span> @@ -61,7 +63,7 @@ <i class="zmdi zmdi-time-restore"></i> {{$ctrl.widget.getQueryResult().getUpdatedAt() | dateTime}} </span> - <button class="btn btn-sm btn-default pull-right hidden-print btn-transparent btn__refresh" ng-click="$ctrl.reload(true)" ng-if="!$ctrl.public"><i class="zmdi zmdi-refresh"></i></button> + <button class="btn btn-sm btn-default pull-right hidden-print btn-transparent btn__refresh" ng-click="$ctrl.refresh()" ng-if="!$ctrl.public"><i class="zmdi zmdi-refresh"></i></button> </div> </div> diff --git a/client/app/components/dashboards/widget.js b/client/app/components/dashboards/widget.js index 5b6fdde0a8..ddefe33f00 100644 --- a/client/app/components/dashboards/widget.js +++ b/client/app/components/dashboards/widget.js @@ -19,7 +19,7 @@ const EditTextBoxComponent = { if (this.widget.new_text !== this.widget.existing_text) { this.widget.text = this.widget.new_text; this.widget - .$save() + .save() .then(() => { this.close(); }) @@ -67,9 +67,7 @@ function DashboardWidgetCtrl($location, $uibModal, $window, Events, currentUser) Events.record('delete', 'widget', this.widget.id); - this.widget.$delete((response) => { - this.dashboard.widgets = this.dashboard.widgets.filter(w => w.id !== undefined && w.id !== this.widget.id); - this.dashboard.version = response.version; + this.widget.delete().then(() => { if (this.deleted) { this.deleted({}); } @@ -78,18 +76,21 @@ function DashboardWidgetCtrl($location, $uibModal, $window, Events, currentUser) Events.record('view', 'widget', this.widget.id); - this.reload = (force) => { + this.load = (refresh = false) => { const maxAge = $location.search().maxAge; - this.widget.load(force, maxAge); + this.widget.load(refresh, maxAge); + }; + + this.refresh = () => { + this.load(true); }; if (this.widget.visualization) { Events.record('view', 'query', this.widget.visualization.query.id, { dashboard: true }); Events.record('view', 'visualization', this.widget.visualization.id, { dashboard: true }); - this.reload(false); - this.type = 'visualization'; + this.load(); } else if (this.widget.restricted) { this.type = 'restricted'; } else { diff --git a/client/app/pages/dashboards/dashboard.html b/client/app/pages/dashboards/dashboard.html index 74e04a8e81..bd49593bc4 100644 --- a/client/app/pages/dashboards/dashboard.html +++ b/client/app/pages/dashboards/dashboard.html @@ -31,7 +31,7 @@ <h3> <div class="btn-group" uib-dropdown ng-if="!$ctrl.layoutEditing"> <button id="split-button" type="button" ng-class="{'btn-default btn-sm': $ctrl.refreshRate === null,'btn-primary btn-sm':$ctrl.refreshRate !== null}" - class="btn btn-sm" ng-click="$ctrl.loadDashboard(true)"> + class="btn btn-sm" ng-click="$ctrl.refreshDashboard()"> <i class="zmdi zmdi-refresh"></i> {{$ctrl.refreshRate === null ? 'Refresh' : $ctrl.refreshRate.name}} </button> <button type="button" class="btn" uib-dropdown-toggle @@ -92,7 +92,7 @@ <h3> ng-repeat="widget in $ctrl.dashboard.widgets track by widget.id" gridstack-item="widget.options.position" gridstack-item-id="{{ widget.id }}"> <div class="grid-stack-item-content"> - <dashboard-widget widget="widget" dashboard="$ctrl.dashboard" on-delete="$ctrl.removeWidget()"></dashboard-widget> + <dashboard-widget widget="widget" dashboard="$ctrl.dashboard" on-delete="$ctrl.removeWidget(widget.id)"></dashboard-widget> </div> </div> </div> diff --git a/client/app/pages/dashboards/dashboard.js b/client/app/pages/dashboards/dashboard.js index 45d3dcd9fe..b0ed9fdfc6 100644 --- a/client/app/pages/dashboards/dashboard.js +++ b/client/app/pages/dashboards/dashboard.js @@ -46,7 +46,7 @@ function DashboardCtrl( this.saveInProgress = true; const showMessages = true; return $q - .all(_.map(widgets, widget => widget.$save())) + .all(_.map(widgets, widget => widget.save())) .then(() => { if (showMessages) { toastr.success('Changes saved.'); @@ -83,7 +83,7 @@ function DashboardCtrl( this.refreshRate = rate; if (rate !== null) { if (load) { - this.loadDashboard(true); + this.refreshDashboard(); } this.autoRefresh(); } @@ -118,7 +118,7 @@ function DashboardCtrl( }; const collectFilters = (dashboard, forceRefresh) => { - const queryResultPromises = _.compact(this.dashboard.widgets.map(widget => widget.loadPromise(forceRefresh))); + const queryResultPromises = _.compact(this.dashboard.widgets.map(widget => widget.load(forceRefresh))); $q.all(queryResultPromises).then((queryResults) => { const filters = {}; @@ -206,9 +206,13 @@ function DashboardCtrl( this.loadDashboard(); + this.refreshDashboard = () => { + renderDashboard(this.dashboard, true); + }; + this.autoRefresh = () => { $timeout(() => { - this.loadDashboard(true); + this.refreshDashboard(); }, this.refreshRate.rate * 1000).then(() => this.autoRefresh()); }; @@ -319,12 +323,13 @@ function DashboardCtrl( // Save position of newly added widget (but not entire layout) const widget = _.last(this.dashboard.widgets); if (_.isObject(widget)) { - return widget.$save(); + return widget.save(); } }); }; - this.removeWidget = () => { + this.removeWidget = (widgetId) => { + this.dashboard.widgets = this.dashboard.widgets.filter(w => w.id !== undefined && w.id !== widgetId); this.extractGlobalParameters(); if (!this.layoutEditing) { // We need to wait a bit while `angular` updates widgets, and only then save new layout diff --git a/client/app/services/widget.js b/client/app/services/widget.js index 4efef4dfd0..a0dc6f8434 100644 --- a/client/app/services/widget.js +++ b/client/app/services/widget.js @@ -1,142 +1,166 @@ +import moment from 'moment'; import { truncate } from 'underscore.string'; -import { pick, flatten, extend, isObject } from 'underscore'; +import { each, pick, extend, isObject } from 'underscore'; + +function calculatePositionOptions(Visualization, dashboardGridOptions, widget) { + widget.width = 1; // Backward compatibility, user on back-end + + const visualizationOptions = { + autoHeight: false, + sizeX: Math.round(dashboardGridOptions.columns / 2), + sizeY: dashboardGridOptions.defaultSizeY, + minSizeX: dashboardGridOptions.minSizeX, + maxSizeX: dashboardGridOptions.maxSizeX, + minSizeY: dashboardGridOptions.minSizeY, + maxSizeY: dashboardGridOptions.maxSizeY, + }; -function Widget($resource, $http, Query, Visualization, dashboardGridOptions) { - function prepareForSave(data) { - return pick(data, 'options', 'text', 'id', 'width', 'dashboard_id', 'visualization_id'); - } + const visualization = widget.visualization ? Visualization.visualizations[widget.visualization.type] : null; + if (isObject(visualization)) { + const options = extend({}, visualization.defaultOptions); - const WidgetResource = $resource( - 'api/widgets/:id', - { id: '@id' }, - { - get: { method: 'GET' }, - save: { - method: 'POST', - transformRequest: flatten([prepareForSave, $http.defaults.transformRequest]), - }, - query: { method: 'GET', isArray: true }, - remove: { method: 'DELETE' }, - delete: { method: 'DELETE' }, - }, - ); - - WidgetResource.prototype.getQuery = function getQuery() { - if (!this.query && this.visualization) { - this.query = new Query(this.visualization.query); + if (Object.prototype.hasOwnProperty.call(options, 'autoHeight')) { + visualizationOptions.autoHeight = options.autoHeight; } - return this.query; - }; + // Width constraints + const minColumns = parseInt(options.minColumns, 10); + if (isFinite(minColumns) && minColumns >= 0) { + visualizationOptions.minSizeX = minColumns; + } + const maxColumns = parseInt(options.maxColumns, 10); + if (isFinite(maxColumns) && maxColumns >= 0) { + visualizationOptions.maxSizeX = Math.min(maxColumns, dashboardGridOptions.columns); + } - WidgetResource.prototype.getQueryResult = function getQueryResult(force, maxAge) { - return this.load(force, maxAge); - }; + // Height constraints + // `minRows` is preferred, but it should be kept for backward compatibility + const height = parseInt(options.height, 10); + if (isFinite(height)) { + visualizationOptions.minSizeY = Math.ceil(height / dashboardGridOptions.rowHeight); + } + const minRows = parseInt(options.minRows, 10); + if (isFinite(minRows)) { + visualizationOptions.minSizeY = minRows; + } + const maxRows = parseInt(options.maxRows, 10); + if (isFinite(maxRows) && maxRows >= 0) { + visualizationOptions.maxSizeY = maxRows; + } - WidgetResource.prototype.load = function load(force, maxAge) { - if (!this.visualization) { - return undefined; + // Default dimensions + const defaultWidth = parseInt(options.defaultColumns, 10); + if (isFinite(defaultWidth) && defaultWidth > 0) { + visualizationOptions.sizeX = defaultWidth; + } + const defaultHeight = parseInt(options.defaultRows, 10); + if (isFinite(defaultHeight) && defaultHeight > 0) { + visualizationOptions.sizeY = defaultHeight; } + } - if (force || this.queryResult === undefined) { - if (maxAge === undefined || force) { - maxAge = force ? 0 : undefined; + return visualizationOptions; +} + +function WidgetFactory($http, Query, Visualization, dashboardGridOptions) { + class Widget { + constructor(data) { + // Copy properties + each(data, (v, k) => { + this[k] = v; + }); + + const visualizationOptions = calculatePositionOptions(Visualization, dashboardGridOptions, this); + + this.options = this.options || {}; + this.options.position = extend( + {}, + visualizationOptions, + pick(this.options.position, ['col', 'row', 'sizeX', 'sizeY', 'autoHeight']), + ); + + if (this.options.position.sizeY < 0) { + this.options.position.autoHeight = true; } - this.queryResult = this.getQuery().getQueryResult(maxAge); + + // Save original position (create a shallow copy) + this.$originalPosition = extend({}, this.options.position); } - return this.queryResult; - }; + getQuery() { + if (!this.query && this.visualization) { + this.query = new Query(this.visualization.query); + } - WidgetResource.prototype.loadPromise = function loadPromise(force, maxAge) { - return this.load(force, maxAge).toPromise(); - }; + return this.query; + } - WidgetResource.prototype.getName = function getName() { - if (this.visualization) { - return `${this.visualization.query.name} (${this.visualization.name})`; + getQueryResult() { + return this.data; } - return truncate(this.text, 20); - }; - function WidgetConstructor(widget) { - widget.width = 1; // Backward compatibility, user on back-end - - const visualizationOptions = { - autoHeight: false, - sizeX: Math.round(dashboardGridOptions.columns / 2), - sizeY: dashboardGridOptions.defaultSizeY, - minSizeX: dashboardGridOptions.minSizeX, - maxSizeX: dashboardGridOptions.maxSizeX, - minSizeY: dashboardGridOptions.minSizeY, - maxSizeY: dashboardGridOptions.maxSizeY, - }; - const visualization = widget.visualization ? Visualization.visualizations[widget.visualization.type] : null; - if (isObject(visualization)) { - const options = extend({}, visualization.defaultOptions); - - if (Object.prototype.hasOwnProperty.call(options, 'autoHeight')) { - visualizationOptions.autoHeight = options.autoHeight; + getName() { + if (this.visualization) { + return `${this.visualization.query.name} (${this.visualization.name})`; } + return truncate(this.text, 20); + } - // Width constraints - const minColumns = parseInt(options.minColumns, 10); - if (isFinite(minColumns) && minColumns >= 0) { - visualizationOptions.minSizeX = minColumns; - } - const maxColumns = parseInt(options.maxColumns, 10); - if (isFinite(maxColumns) && maxColumns >= 0) { - visualizationOptions.maxSizeX = Math.min(maxColumns, dashboardGridOptions.columns); - } + load(force, maxAge) { + this.loading = true; + this.refreshStartedAt = moment(); - // Height constraints - // `minRows` is preferred, but it should be kept for backward compatibility - const height = parseInt(options.height, 10); - if (isFinite(height)) { - visualizationOptions.minSizeY = Math.ceil(height / dashboardGridOptions.rowHeight); - } - const minRows = parseInt(options.minRows, 10); - if (isFinite(minRows)) { - visualizationOptions.minSizeY = minRows; - } - const maxRows = parseInt(options.maxRows, 10); - if (isFinite(maxRows) && maxRows >= 0) { - visualizationOptions.maxSizeY = maxRows; + if (!this.visualization) { + return undefined; } - // Default dimensions - const defaultWidth = parseInt(options.defaultColumns, 10); - if (isFinite(defaultWidth) && defaultWidth > 0) { - visualizationOptions.sizeX = defaultWidth; - } - const defaultHeight = parseInt(options.defaultRows, 10); - if (isFinite(defaultHeight) && defaultHeight > 0) { - visualizationOptions.sizeY = defaultHeight; + if (force || this.queryResult === undefined) { + if (maxAge === undefined || force) { + maxAge = force ? 0 : undefined; + } + this.queryResult = this.getQuery().getQueryResult(maxAge); + + this.queryResult.toPromise().then( + (queryResult) => { + this.data = queryResult; + this.loading = false; + }, + () => { + this.loading = false; + this.data = null; + }, + ); } + + return this.queryResult.toPromise(); } - widget.options = widget.options || {}; - widget.options.position = extend( - {}, - visualizationOptions, - pick(widget.options.position, ['col', 'row', 'sizeX', 'sizeY', 'autoHeight']), - ); + save() { + const data = pick(this, 'options', 'text', 'id', 'width', 'dashboard_id', 'visualization_id'); - if (widget.options.position.sizeY < 0) { - widget.options.position.autoHeight = true; - } + let url = 'api/widgets'; + if (this.id) { + url = `${url}/${this.id}`; + } - const result = new WidgetResource(widget); + return $http.post(url, data).then((response) => { + each(response.data, (v, k) => { + this[k] = v; + }); - // Save original position (create a shallow copy) - result.$originalPosition = extend({}, result.options.position); + return this; + }); + } - return result; + delete() { + const url = `api/widgets/${this.id}`; + return $http.delete(url); + } } - return WidgetConstructor; + return Widget; } export default function init(ngModule) { - ngModule.factory('Widget', Widget); + ngModule.factory('Widget', WidgetFactory); } diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py index 939f2fbc0f..5376a8074b 100644 --- a/redash/handlers/widgets.py +++ b/redash/handlers/widgets.py @@ -44,7 +44,7 @@ def post(self): models.db.session.commit() models.db.session.commit() - return {'widget': widget.to_dict()} + return widget.to_dict() class WidgetResource(BaseResource): diff --git a/tests/handlers/test_widgets.py b/tests/handlers/test_widgets.py index f928c3d0c1..702ef6f828 100644 --- a/tests/handlers/test_widgets.py +++ b/tests/handlers/test_widgets.py @@ -54,7 +54,7 @@ def test_create_text_widget(self): rv = self.make_request('post', '/api/widgets', data=data) self.assertEquals(rv.status_code, 200) - self.assertEquals(rv.json['widget']['text'], 'Sample text.') + self.assertEquals(rv.json['text'], 'Sample text.') def test_delete_widget(self): widget = self.factory.create_widget() diff --git a/webpack.config.js b/webpack.config.js index c3a3ddf320..2af1d7376e 100644 --- a/webpack.config.js +++ b/webpack.config.js @@ -1,94 +1,87 @@ /* eslint-disable */ -const fs = require('fs'); -const webpack = require('webpack'); -const HtmlWebpackPlugin = require('html-webpack-plugin'); +const fs = require("fs"); +const webpack = require("webpack"); +const HtmlWebpackPlugin = require("html-webpack-plugin"); const ExtractTextPlugin = require("extract-text-webpack-plugin"); -const WebpackBuildNotifierPlugin = require('webpack-build-notifier'); -const ManifestPlugin = require('webpack-manifest-plugin'); -const CopyWebpackPlugin = require('copy-webpack-plugin'); -const LessPluginAutoPrefix = require('less-plugin-autoprefix'); -const BundleAnalyzerPlugin = require('webpack-bundle-analyzer').BundleAnalyzerPlugin; -const path = require('path'); +const WebpackBuildNotifierPlugin = require("webpack-build-notifier"); +const ManifestPlugin = require("webpack-manifest-plugin"); +const CopyWebpackPlugin = require("copy-webpack-plugin"); +const LessPluginAutoPrefix = require("less-plugin-autoprefix"); +const BundleAnalyzerPlugin = require("webpack-bundle-analyzer").BundleAnalyzerPlugin; +const path = require("path"); -const redashBackend = process.env.REDASH_BACKEND || 'http://localhost:5000'; +const redashBackend = process.env.REDASH_BACKEND || "http://localhost:5000"; -const basePath = fs.realpathSync(path.join(__dirname, 'client')); -const appPath = fs.realpathSync(path.join(__dirname, 'client', 'app')); +const basePath = fs.realpathSync(path.join(__dirname, "client")); +const appPath = fs.realpathSync(path.join(__dirname, "client", "app")); const config = { entry: { - app: [ - './client/app/index.js', - './client/app/assets/less/main.less', - ], - server: [ - './client/app/assets/less/server.less', - ], + app: ["./client/app/index.js", "./client/app/assets/less/main.less"], + server: ["./client/app/assets/less/server.less"] }, output: { - path: path.join(basePath, './dist'), - filename: '[name].js', - publicPath: '/static/' + path: path.join(basePath, "./dist"), + filename: "[name].js", + publicPath: "/static/" }, resolve: { alias: { - '@': appPath, + "@": appPath, // Currently `lodash` is used only by `gridstack.js`, but it can work // with `underscore` as well, so set an alias to avoid bundling both `lodash` and // `underscore`. When adding new libraries, check if they can work // with `underscore`, otherwise remove this line - 'lodash': 'underscore', + lodash: "underscore" } }, plugins: [ - new WebpackBuildNotifierPlugin({title: 'Redash'}), + new WebpackBuildNotifierPlugin({ title: "Redash" }), new webpack.DefinePlugin({ - ON_TEST: process.env.NODE_ENV === 'test' + ON_TEST: process.env.NODE_ENV === "test" }), // Enforce angular to use jQuery instead of jqLite - new webpack.ProvidePlugin({'window.jQuery': 'jquery'}), + new webpack.ProvidePlugin({ "window.jQuery": "jquery" }), // bundle only default `moment` locale (`en`) new webpack.ContextReplacementPlugin(/moment[\/\\]locale$/, /en/), new webpack.optimize.CommonsChunkPlugin({ - name: 'vendor', - minChunks: function (module, count) { + name: "vendor", + minChunks: function(module, count) { // any required modules inside node_modules are extracted to vendor return ( module.resource && /\.js$/.test(module.resource) && - module.resource.indexOf( - path.join(__dirname, './node_modules') - ) === 0 - ) + module.resource.indexOf(path.join(__dirname, "./node_modules")) === 0 + ); } }), // extract webpack runtime and module manifest to its own file in order to // prevent vendor hash from being updated whenever app bundle is updated new webpack.optimize.CommonsChunkPlugin({ - name: 'manifest', - chunks: ['vendor'] + name: "manifest", + chunks: ["vendor"] }), new HtmlWebpackPlugin({ - template: './client/app/index.html', - filename: 'index.html', - excludeChunks: ['server'], + template: "./client/app/index.html", + filename: "index.html", + excludeChunks: ["server"] }), new HtmlWebpackPlugin({ - template: './client/app/multi_org.html', - filename: 'multi_org.html', - excludeChunks: ['server'], + template: "./client/app/multi_org.html", + filename: "multi_org.html", + excludeChunks: ["server"] }), new ExtractTextPlugin({ - filename: '[name].[chunkhash].css', + filename: "[name].[chunkhash].css" }), new ManifestPlugin({ - fileName: 'asset-manifest.json' + fileName: "asset-manifest.json" }), new CopyWebpackPlugin([ - { from: 'client/app/assets/robots.txt' }, - { from: 'client/app/assets/css/login.css', to: 'styles/login.css' }, - { from: 'node_modules/jquery/dist/jquery.min.js', to: 'js/jquery.min.js' }, + { from: "client/app/assets/robots.txt" }, + { from: "client/app/assets/css/login.css", to: "styles/login.css" }, + { from: "node_modules/jquery/dist/jquery.min.js", to: "js/jquery.min.js" } ]) ], @@ -97,113 +90,122 @@ const config = { { test: /\.js$/, exclude: /node_modules/, - use: ['babel-loader', 'eslint-loader'] + use: ["babel-loader", "eslint-loader"] }, { test: /\.html$/, exclude: [/node_modules/, /index\.html/], - use: [{ - loader: 'raw-loader' - }] + use: [ + { + loader: "raw-loader" + } + ] }, { test: /\.css$/, - use: ExtractTextPlugin.extract([{ - loader: 'css-loader', - options: { - minimize: process.env.NODE_ENV === 'production' + use: ExtractTextPlugin.extract([ + { + loader: "css-loader", + options: { + minimize: process.env.NODE_ENV === "production" + } } - }]) + ]) }, { test: /\.less$/, use: ExtractTextPlugin.extract([ { - loader: 'css-loader', + loader: "css-loader", options: { - minimize: process.env.NODE_ENV === 'production' + minimize: process.env.NODE_ENV === "production" } - }, { - loader: 'less-loader', + }, + { + loader: "less-loader", options: { - plugins: [ - new LessPluginAutoPrefix({browsers: ['last 3 versions']}) - ] + plugins: [new LessPluginAutoPrefix({ browsers: ["last 3 versions"] })] } } ]) }, { test: /\.(png|jpe?g|gif|svg)(\?.*)?$/, - use: [{ - loader: 'file-loader', - options: { - context: path.resolve(appPath, './assets/images/'), - outputPath: 'images/', - name: '[path][name].[ext]', + use: [ + { + loader: "file-loader", + options: { + context: path.resolve(appPath, "./assets/images/"), + outputPath: "images/", + name: "[path][name].[ext]" + } } - }] + ] }, { test: /\.geo\.json$/, - use: [{ - loader: 'file-loader', - options: { - outputPath: 'data/', - name: '[hash:7].[name].[ext]', + use: [ + { + loader: "file-loader", + options: { + outputPath: "data/", + name: "[hash:7].[name].[ext]" + } } - }] + ] }, { test: /\.(woff2?|eot|ttf|otf)(\?.*)?$/, - use: [{ - loader: 'url-loader', - options: { - limit: 10000, - name: 'fonts/[name].[hash:7].[ext]' + use: [ + { + loader: "url-loader", + options: { + limit: 10000, + name: "fonts/[name].[hash:7].[ext]" + } } - }] + ] } ] }, - devtool: 'cheap-eval-module-source-map', + devtool: "cheap-eval-module-source-map", stats: { modules: false, - chunkModules: false, + chunkModules: false }, watchOptions: { - ignored: /\.sw.$/, + ignored: /\.sw.$/ }, devServer: { inline: true, - index: '/static/index.html', + index: "/static/index.html", historyApiFallback: { - index: '/static/index.html', - rewrites: [{from: /./, to: '/static/index.html'}], + index: "/static/index.html", + rewrites: [{ from: /./, to: "/static/index.html" }] }, contentBase: false, - publicPath: '/static/', + publicPath: "/static/", proxy: [ { - context: ['/login', '/logout', '/invite', '/setup', '/status.json', '/api', '/oauth'], - target: redashBackend + '/', - changeOrigin: true, - secure: false, + context: ["/login", "/logout", "/invite", "/setup", "/status.json", "/api", "/oauth"], + target: redashBackend + "/", + changeOrigin: false, + secure: false }, { - context: (path) => { + context: path => { // CSS/JS for server-rendered pages should be served from backend return /^\/static\/[a-z]+\.[0-9a-fA-F]+\.(css|js)$/.test(path); }, - target: redashBackend + '/', + target: redashBackend + "/", changeOrigin: true, - secure: false, + secure: false } ], stats: { modules: false, - chunkModules: false, - }, + chunkModules: false + } } }; @@ -211,15 +213,17 @@ if (process.env.DEV_SERVER_HOST) { config.devServer.host = process.env.DEV_SERVER_HOST; } -if (process.env.NODE_ENV === 'production') { - config.output.filename = '[name].[chunkhash].js'; - config.plugins.push(new webpack.optimize.UglifyJsPlugin({ - sourceMap: true, - compress: { - warnings: true - } - })); - config.devtool = 'source-map'; +if (process.env.NODE_ENV === "production") { + config.output.filename = "[name].[chunkhash].js"; + config.plugins.push( + new webpack.optimize.UglifyJsPlugin({ + sourceMap: true, + compress: { + warnings: true + } + }) + ); + config.devtool = "source-map"; } if (process.env.BUNDLE_ANALYZER) {
pantsbuild__pants-20802
jvm_exclude with Group Only Fails Parsing by Coursier **Describe the bug** Running `pants generate-lockfiles` when a `jvm_artifact` contains a `jvm_exclude` that only specifies a group will fail with a "Failed to parse [group-name]" message from Coursier. This is contrary to the documentation for `jvm_exclude` which states "`jvm_exclude`: Exclude the given `artifact` and `group`, or all artifacts from the given `group`." **Pants version** 2.20.0rc2 **OS** MacOS **Additional info** Example Repo https://github.com/NGustafson/pants-examples/blob/main/3rdparty/jvm/BUILD This repo has a single jvm_artifact with nothing else configured. Attempting to run `pants generate-lockfiles` will cause this error: ``` pants generate-lockfiles [ERROR] 1 Exception encountered: Engine traceback: in `generate-lockfiles` goal ProcessExecutionFailure: Process 'Running `coursier fetch` against 1 requirement: org.slf4j:slf4j-log4j12:2.0.12' failed with exit code 1. stdout: stderr: + coursier_exe=__coursier/./cs-aarch64-apple-darwin + shift + json_output_file=coursier_report.json + shift ++ pwd + working_dir=/private/var/folders/cm/gmrdwxcn7tv_cct4dzg38w91kjyl1q/T/pants-sandbox-aM4FVB + __coursier/./cs-aarch64-apple-darwin fetch -r=https://maven-central.storage-download.googleapis.com/maven2 -r=https://repo1.maven.org/maven2 --no-default --json-output-file=coursier_report.json org.slf4j:slf4j-log4j12:2.0.12 --local-exclude-file PANTS_RESOLVE_EXCLUDES Failed to parse org.slf4j Failed to parse org.slf4j ```
[ { "content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport dataclasses\nimport re\nimport xml.etree.ElementTree as ET\nfrom abc import ABC, ABCMeta, abstractmethod\nfrom dataclasses import dataclass\nfrom typing import Callable, ClassVar, Iterable, Iterator, Optional, Tuple, Type, Union\n\nfrom pants.build_graph.build_file_aliases import BuildFileAliases\nfrom pants.core.goals.generate_lockfiles import UnrecognizedResolveNamesError\nfrom pants.core.goals.package import OutputPathField\nfrom pants.core.goals.run import RestartableField, RunFieldSet, RunInSandboxBehavior, RunRequest\nfrom pants.core.goals.test import TestExtraEnvVarsField, TestTimeoutField\nfrom pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest\nfrom pants.engine.addresses import Address\nfrom pants.engine.fs import Digest, DigestContents\nfrom pants.engine.internals.selectors import Get\nfrom pants.engine.rules import Rule, collect_rules, rule\nfrom pants.engine.target import (\n COMMON_TARGET_FIELDS,\n AsyncFieldMixin,\n BoolField,\n Dependencies,\n DictStringToStringSequenceField,\n FieldDefaultFactoryRequest,\n FieldDefaultFactoryResult,\n GeneratedTargets,\n GenerateTargetsRequest,\n InvalidFieldException,\n InvalidTargetException,\n OptionalSingleSourceField,\n SequenceField,\n SingleSourceField,\n SpecialCasedDependencies,\n StringField,\n StringSequenceField,\n Target,\n TargetGenerator,\n)\nfrom pants.engine.unions import UnionMembership, UnionRule\nfrom pants.jvm.resolve.coordinate import Coordinate\nfrom pants.jvm.subsystems import JvmSubsystem\nfrom pants.util.docutil import git_url\nfrom pants.util.frozendict import FrozenDict\nfrom pants.util.logging import LogLevel\nfrom pants.util.memo import memoized\nfrom pants.util.strutil import bullet_list, help_text, pluralize, softwrap\n\n# -----------------------------------------------------------------------------------------------\n# Generic resolve support fields\n# -----------------------------------------------------------------------------------------------\n\n\nclass JvmDependenciesField(Dependencies):\n pass\n\n\nclass JvmResolveField(StringField, AsyncFieldMixin):\n alias = \"resolve\"\n required = False\n help = help_text(\n \"\"\"\n The resolve from `[jvm].resolves` to use when compiling this target.\n\n If not defined, will default to `[jvm].default_resolve`.\n \"\"\"\n # TODO: Document expectations for dependencies once we validate that.\n )\n\n def normalized_value(self, jvm_subsystem: JvmSubsystem) -> str:\n \"\"\"Get the value after applying the default and validating that the key is recognized.\"\"\"\n resolve = self.value or jvm_subsystem.default_resolve\n if resolve not in jvm_subsystem.resolves:\n raise UnrecognizedResolveNamesError(\n [resolve],\n jvm_subsystem.resolves.keys(),\n description_of_origin=f\"the field `{self.alias}` in the target {self.address}\",\n )\n return resolve\n\n\nclass JvmJdkField(StringField):\n alias = \"jdk\"\n required = False\n help = help_text(\n \"\"\"\n The major version of the JDK that this target should be built with. If not defined,\n will default to `[jvm].default_source_jdk`.\n \"\"\"\n )\n\n\nclass PrefixedJvmJdkField(JvmJdkField):\n alias = \"jvm_jdk\"\n\n\nclass PrefixedJvmResolveField(JvmResolveField):\n alias = \"jvm_resolve\"\n\n\n# -----------------------------------------------------------------------------------------------\n# Targets that can be called with `./pants run` or `experimental_run_in_sandbox`\n# -----------------------------------------------------------------------------------------------\nNO_MAIN_CLASS = \"org.pantsbuild.meta.no.main.class\"\n\n\nclass JvmMainClassNameField(StringField):\n alias = \"main\"\n required = False\n default = None\n help = help_text(\n \"\"\"\n `.`-separated name of the JVM class containing the `main()` method to be called when\n executing this target. If not supplied, this will be calculated automatically, either by\n inspecting the existing manifest (for 3rd-party JARs), or by inspecting the classes inside\n the JAR, looking for a valid `main` method. If a value cannot be calculated automatically,\n you must supply a value for `run` to succeed.\n \"\"\"\n )\n\n\n@dataclass(frozen=True)\nclass JvmRunnableSourceFieldSet(RunFieldSet):\n run_in_sandbox_behavior = RunInSandboxBehavior.RUN_REQUEST_HERMETIC\n jdk_version: JvmJdkField\n main_class: JvmMainClassNameField\n\n @classmethod\n def jvm_rules(cls) -> Iterable[Union[Rule, UnionRule]]:\n yield from _jvm_source_run_request_rule(cls)\n yield from cls.rules()\n\n\n@dataclass(frozen=True)\nclass GenericJvmRunRequest:\n \"\"\"Allows the use of a generic rule to return a `RunRequest` based on the field set.\"\"\"\n\n field_set: JvmRunnableSourceFieldSet\n\n\n# -----------------------------------------------------------------------------------------------\n# `jvm_artifact` targets\n# -----------------------------------------------------------------------------------------------\n\n_DEFAULT_PACKAGE_MAPPING_URL = git_url(\n \"src/python/pants/jvm/dependency_inference/jvm_artifact_mappings.py\"\n)\n\n\nclass JvmArtifactGroupField(StringField):\n alias = \"group\"\n required = True\n value: str\n help = help_text(\n \"\"\"\n The 'group' part of a Maven-compatible coordinate to a third-party JAR artifact.\n\n For the JAR coordinate `com.google.guava:guava:30.1.1-jre`, the group is `com.google.guava`.\n \"\"\"\n )\n\n\nclass JvmArtifactArtifactField(StringField):\n alias = \"artifact\"\n required = True\n value: str\n help = help_text(\n \"\"\"\n The 'artifact' part of a Maven-compatible coordinate to a third-party JAR artifact.\n\n For the JAR coordinate `com.google.guava:guava:30.1.1-jre`, the artifact is `guava`.\n \"\"\"\n )\n\n\nclass JvmArtifactVersionField(StringField):\n alias = \"version\"\n required = True\n value: str\n help = help_text(\n \"\"\"\n The 'version' part of a Maven-compatible coordinate to a third-party JAR artifact.\n\n For the JAR coordinate `com.google.guava:guava:30.1.1-jre`, the version is `30.1.1-jre`.\n \"\"\"\n )\n\n\nclass JvmArtifactUrlField(StringField):\n alias = \"url\"\n required = False\n help = help_text(\n \"\"\"\n A URL that points to the location of this artifact.\n\n If specified, Pants will not fetch this artifact from default Maven repositories, and\n will instead fetch the artifact from this URL. To use default maven\n repositories, do not set this value.\n\n Note that `file:` URLs are not supported. Instead, use the `jar` field for local\n artifacts.\n \"\"\"\n )\n\n\nclass JvmArtifactJarSourceField(OptionalSingleSourceField):\n alias = \"jar\"\n expected_file_extensions = (\".jar\",)\n help = help_text(\n \"\"\"\n A local JAR file that provides this artifact to the lockfile resolver, instead of a\n Maven repository.\n\n Path is relative to the BUILD file.\n\n Use the `url` field for remote artifacts.\n \"\"\"\n )\n\n @classmethod\n def compute_value(cls, raw_value: Optional[str], address: Address) -> Optional[str]:\n value_or_default = super().compute_value(raw_value, address)\n if value_or_default and value_or_default.startswith(\"file:\"):\n raise InvalidFieldException(\n softwrap(\n f\"\"\"\n The `{cls.alias}` field does not support `file:` URLS, but the target\n {address} sets the field to `{value_or_default}`.\n\n Instead, use the `jar` field to specify the relative path to the local jar file.\n \"\"\"\n )\n )\n return value_or_default\n\n\nclass JvmArtifactPackagesField(StringSequenceField):\n alias = \"packages\"\n help = help_text(\n f\"\"\"\n The JVM packages this artifact provides for the purposes of dependency inference.\n\n For example, the JVM artifact `junit:junit` might provide `[\"org.junit.**\"]`.\n\n Usually you can leave this field off. If unspecified, Pants will fall back to the\n `[java-infer].third_party_import_mapping`, then to a built in mapping\n ({_DEFAULT_PACKAGE_MAPPING_URL}), and then finally it will default to\n the normalized `group` of the artifact. For example, in the absence of any other mapping\n the artifact `io.confluent:common-config` would default to providing\n `[\"io.confluent.**\"]`.\n\n The package path may be made recursive to match symbols in subpackages\n by adding `.**` to the end of the package path. For example, specify `[\"org.junit.**\"]`\n to infer a dependency on the artifact for any file importing a symbol from `org.junit` or\n its subpackages.\n \"\"\"\n )\n\n\nclass JvmArtifactForceVersionField(BoolField):\n alias = \"force_version\"\n default = False\n help = help_text(\n \"\"\"\n Force artifact version during resolution.\n\n If set, pants will pass `--force-version` argument to `coursier fetch` for this artifact.\n \"\"\"\n )\n\n\nclass JvmProvidesTypesField(StringSequenceField):\n alias = \"experimental_provides_types\"\n help = help_text(\n \"\"\"\n Signals that the specified types should be fulfilled by these source files during\n dependency inference.\n\n This allows for specific types within packages that are otherwise inferred as\n belonging to `jvm_artifact` targets to be unambiguously inferred as belonging\n to this first-party source.\n\n If a given type is defined, at least one source file captured by this target must\n actually provide that symbol.\n \"\"\"\n )\n\n\n@dataclass(frozen=True)\nclass JvmArtifactExclusion:\n alias: ClassVar[str] = \"jvm_exclude\"\n help: ClassVar[str | Callable[[], str]] = help_text(\n \"\"\"\n Exclude the given `artifact` and `group`, or all artifacts from the given `group`.\n \"\"\"\n )\n\n group: str\n artifact: str | None = None\n\n def validate(self, _: Address) -> set[str]:\n return set()\n\n def to_coord_str(self) -> str:\n result = self.group\n if self.artifact:\n result += f\":{self.artifact}\"\n return result\n\n\ndef _jvm_artifact_exclusions_field_help(\n supported_exclusions: Callable[[], Iterable[type[JvmArtifactExclusion]]]\n) -> str | Callable[[], str]:\n return help_text(\n lambda: f\"\"\"\n A list of exclusions for unversioned coordinates that should be excluded\n as dependencies when this artifact is resolved.\n\n This does not prevent this artifact from being included in the resolve as a dependency\n of other artifacts that depend on it, and is currently intended as a way to resolve\n version conflicts in complex resolves.\n\n Supported exclusions are:\n {bullet_list(f'`{exclusion.alias}`: {exclusion.help}' for exclusion in supported_exclusions())}\n \"\"\"\n )\n\n\nclass JvmArtifactExclusionsField(SequenceField[JvmArtifactExclusion]):\n alias = \"exclusions\"\n help = _jvm_artifact_exclusions_field_help(\n lambda: JvmArtifactExclusionsField.supported_exclusion_types\n )\n\n supported_exclusion_types: ClassVar[tuple[type[JvmArtifactExclusion], ...]] = (\n JvmArtifactExclusion,\n )\n expected_element_type = JvmArtifactExclusion\n expected_type_description = \"an iterable of JvmArtifactExclusionRule\"\n\n @classmethod\n def compute_value(\n cls, raw_value: Optional[Iterable[JvmArtifactExclusion]], address: Address\n ) -> Optional[Tuple[JvmArtifactExclusion, ...]]:\n computed_value = super().compute_value(raw_value, address)\n\n if computed_value:\n errors: list[str] = []\n for exclusion_rule in computed_value:\n err = exclusion_rule.validate(address)\n if err:\n errors.extend(err)\n\n if errors:\n raise InvalidFieldException(\n softwrap(\n f\"\"\"\n Invalid value for `{JvmArtifactExclusionsField.alias}` field at target\n {address}. Found following errors:\n\n {bullet_list(errors)}\n \"\"\"\n )\n )\n return computed_value\n\n\nclass JvmArtifactResolveField(JvmResolveField):\n help = help_text(\n \"\"\"\n The resolve from `[jvm].resolves` that this artifact should be included in.\n\n If not defined, will default to `[jvm].default_resolve`.\n\n When generating a lockfile for a particular resolve via the `coursier-resolve` goal,\n it will include all artifacts that are declared compatible with that resolve. First-party\n targets like `java_source` and `scala_source` also declare which resolve they use\n via the `resolve` field; so, for your first-party code to use\n a particular `jvm_artifact` target, that artifact must be included in the resolve\n used by that code.\n \"\"\"\n )\n\n\n@dataclass(frozen=True)\nclass JvmArtifactFieldSet(JvmRunnableSourceFieldSet):\n group: JvmArtifactGroupField\n artifact: JvmArtifactArtifactField\n version: JvmArtifactVersionField\n packages: JvmArtifactPackagesField\n url: JvmArtifactUrlField\n force_version: JvmArtifactForceVersionField\n\n required_fields = (\n JvmArtifactGroupField,\n JvmArtifactArtifactField,\n JvmArtifactVersionField,\n JvmArtifactPackagesField,\n JvmArtifactForceVersionField,\n )\n\n\nclass JvmArtifactTarget(Target):\n alias = \"jvm_artifact\"\n core_fields = (\n *COMMON_TARGET_FIELDS,\n *JvmArtifactFieldSet.required_fields,\n JvmArtifactUrlField, # TODO: should `JvmArtifactFieldSet` have an `all_fields` field?\n JvmArtifactJarSourceField,\n JvmArtifactResolveField,\n JvmArtifactExclusionsField,\n JvmJdkField,\n JvmMainClassNameField,\n )\n help = help_text(\n \"\"\"\n A third-party JVM artifact, as identified by its Maven-compatible coordinate.\n\n That is, an artifact identified by its `group`, `artifact`, and `version` components.\n\n Each artifact is associated with one or more resolves (a logical name you give to a\n lockfile). For this artifact to be used by your first-party code, it must be\n associated with the resolve(s) used by that code. See the `resolve` field.\n \"\"\"\n )\n\n def validate(self) -> None:\n if self[JvmArtifactJarSourceField].value and self[JvmArtifactUrlField].value:\n raise InvalidTargetException(\n f\"You cannot specify both the `url` and `jar` fields, but both were set on the \"\n f\"`{self.alias}` target {self.address}.\"\n )\n\n\n# -----------------------------------------------------------------------------------------------\n# Generate `jvm_artifact` targets from pom.xml\n# -----------------------------------------------------------------------------------------------\n\n\nclass PomXmlSourceField(SingleSourceField):\n default = \"pom.xml\"\n required = False\n\n\nclass JvmArtifactsPackageMappingField(DictStringToStringSequenceField):\n alias = \"package_mapping\"\n help = help_text(\n f\"\"\"\n A mapping of jvm artifacts to a list of the packages they provide.\n\n For example, `{{\"com.google.guava:guava\": [\"com.google.common.**\"]}}`.\n\n Any unspecified jvm artifacts will use a default. See the\n `{JvmArtifactPackagesField.alias}` field from the `{JvmArtifactTarget.alias}`\n target for more information.\n \"\"\"\n )\n value: FrozenDict[str, tuple[str, ...]]\n default: ClassVar[Optional[FrozenDict[str, tuple[str, ...]]]] = FrozenDict()\n\n @classmethod\n def compute_value( # type: ignore[override]\n cls, raw_value: dict[str, Iterable[str]], address: Address\n ) -> FrozenDict[tuple[str, str], tuple[str, ...]]:\n value_or_default = super().compute_value(raw_value, address)\n assert value_or_default is not None\n return FrozenDict(\n {\n cls._parse_coord(coord): tuple(packages)\n for coord, packages in value_or_default.items()\n }\n )\n\n @classmethod\n def _parse_coord(cls, coord: str) -> tuple[str, str]:\n group, artifact = coord.split(\":\")\n return group, artifact\n\n\nclass JvmArtifactsTargetGenerator(TargetGenerator):\n alias = \"jvm_artifacts\"\n core_fields = (\n PomXmlSourceField,\n JvmArtifactsPackageMappingField,\n *COMMON_TARGET_FIELDS,\n )\n generated_target_cls = JvmArtifactTarget\n copied_fields = COMMON_TARGET_FIELDS\n moved_fields = (JvmArtifactResolveField,)\n help = help_text(\n \"\"\"\n Generate a `jvm_artifact` target for each dependency in pom.xml file.\n \"\"\"\n )\n\n\nclass GenerateFromPomXmlRequest(GenerateTargetsRequest):\n generate_from = JvmArtifactsTargetGenerator\n\n\n@rule(\n desc=(\"Generate `jvm_artifact` targets from pom.xml\"),\n level=LogLevel.DEBUG,\n)\nasync def generate_from_pom_xml(\n request: GenerateFromPomXmlRequest,\n union_membership: UnionMembership,\n) -> GeneratedTargets:\n generator = request.generator\n pom_xml = await Get(\n SourceFiles,\n SourceFilesRequest([generator[PomXmlSourceField]]),\n )\n files = await Get(DigestContents, Digest, pom_xml.snapshot.digest)\n if not files:\n raise FileNotFoundError(f\"pom.xml not found: {generator[PomXmlSourceField].value}\")\n\n mapping = request.generator[JvmArtifactsPackageMappingField].value\n coordinates = parse_pom_xml(files[0].content, pom_xml_path=pom_xml.snapshot.files[0])\n targets = (\n JvmArtifactTarget(\n unhydrated_values={\n \"group\": coord.group,\n \"artifact\": coord.artifact,\n \"version\": coord.version,\n \"packages\": mapping.get((coord.group, coord.artifact)),\n **request.template,\n },\n address=request.template_address.create_generated(coord.artifact),\n )\n for coord in coordinates\n )\n return GeneratedTargets(request.generator, targets)\n\n\ndef parse_pom_xml(content: bytes, pom_xml_path: str) -> Iterator[Coordinate]:\n root = ET.fromstring(content.decode(\"utf-8\"))\n match = re.match(r\"^(\\{.*\\})project$\", root.tag)\n if not match:\n raise ValueError(\n f\"Unexpected root tag `{root.tag}` in {pom_xml_path}, expected tag `project`\"\n )\n\n namespace = match.group(1)\n for dependency in root.iter(f\"{namespace}dependency\"):\n yield Coordinate(\n group=get_child_text(dependency, f\"{namespace}groupId\"),\n artifact=get_child_text(dependency, f\"{namespace}artifactId\"),\n version=get_child_text(dependency, f\"{namespace}version\"),\n )\n\n\ndef get_child_text(parent: ET.Element, child: str) -> str:\n tag = parent.find(child)\n if tag is None:\n raise ValueError(f\"missing element: {child}\")\n text = tag.text\n if text is None:\n raise ValueError(f\"empty element: {child}\")\n return text\n\n\n# -----------------------------------------------------------------------------------------------\n# JUnit test support field(s)\n# -----------------------------------------------------------------------------------------------\n\n\nclass JunitTestSourceField(SingleSourceField, metaclass=ABCMeta):\n \"\"\"A marker that indicates that a source field represents a JUnit test.\"\"\"\n\n\nclass JunitTestTimeoutField(TestTimeoutField):\n pass\n\n\nclass JunitTestExtraEnvVarsField(TestExtraEnvVarsField):\n pass\n\n\n# -----------------------------------------------------------------------------------------------\n# JAR support fields\n# -----------------------------------------------------------------------------------------------\n\n\nclass JvmRequiredMainClassNameField(JvmMainClassNameField):\n required = True\n default = None\n help = help_text(\n \"\"\"\n `.`-separated name of the JVM class containing the `main()` method to be called when\n executing this JAR.\n \"\"\"\n )\n\n\nclass JvmShadingRule(ABC):\n \"\"\"Base class for defining JAR shading rules as valid aliases in BUILD files.\n\n Subclasses need to provide with an `alias` and a `help` message. The `alias` represents\n the name that will be used in BUILD files to instantiate the given subclass.\n\n Set the `help` class property with a description, which will be used in `./pants help`. For the\n best rendering, use soft wrapping (e.g. implicit string concatenation) within paragraphs, but\n hard wrapping (`\\n`) to separate distinct paragraphs and/or lists.\n \"\"\"\n\n alias: ClassVar[str]\n help: ClassVar[str | Callable[[], str]]\n\n @abstractmethod\n def encode(self) -> str:\n pass\n\n @abstractmethod\n def validate(self) -> set[str]:\n pass\n\n @staticmethod\n def _validate_field(value: str, *, name: str, invalid_chars: str) -> set[str]:\n errors = []\n for ch in invalid_chars:\n if ch in value:\n errors.append(f\"`{name}` can not contain the character `{ch}`.\")\n return set(errors)\n\n def __repr__(self) -> str:\n fields = [f\"{fld.name}={repr(getattr(self, fld.name))}\" for fld in dataclasses.fields(self)] # type: ignore[arg-type]\n return f\"{self.alias}({', '.join(fields)})\"\n\n\n@dataclass(frozen=True, repr=False)\nclass JvmShadingRenameRule(JvmShadingRule):\n alias = \"shading_rename\"\n help = \"Renames all occurrences of the given `pattern` by the `replacement`.\"\n\n pattern: str\n replacement: str\n\n def encode(self) -> str:\n return f\"rule {self.pattern} {self.replacement}\"\n\n def validate(self) -> set[str]:\n errors: list[str] = []\n errors.extend(\n JvmShadingRule._validate_field(self.pattern, name=\"pattern\", invalid_chars=\"/\")\n )\n errors.extend(\n JvmShadingRule._validate_field(self.replacement, name=\"replacement\", invalid_chars=\"/\")\n )\n return set(errors)\n\n\n@dataclass(frozen=True, repr=False)\nclass JvmShadingRelocateRule(JvmShadingRule):\n alias = \"shading_relocate\"\n help = help_text(\n \"\"\"\n Relocates the classes under the given `package` into the new package name.\n The default target package is `__shaded_by_pants__` if none provided in\n the `into` parameter.\n \"\"\"\n )\n\n package: str\n into: str | None = None\n\n def encode(self) -> str:\n if not self.into:\n target_suffix = \"__shaded_by_pants__\"\n else:\n target_suffix = self.into\n return f\"rule {self.package}.** {target_suffix}.@1\"\n\n def validate(self) -> set[str]:\n errors: list[str] = []\n errors.extend(\n JvmShadingRule._validate_field(self.package, name=\"package\", invalid_chars=\"/*\")\n )\n if self.into:\n errors.extend(\n JvmShadingRule._validate_field(self.into, name=\"into\", invalid_chars=\"/*\")\n )\n return set(errors)\n\n\n@dataclass(frozen=True, repr=False)\nclass JvmShadingZapRule(JvmShadingRule):\n alias = \"shading_zap\"\n help = \"Removes from the final artifact the occurrences of the `pattern`.\"\n\n pattern: str\n\n def encode(self) -> str:\n return f\"zap {self.pattern}\"\n\n def validate(self) -> set[str]:\n return JvmShadingRule._validate_field(self.pattern, name=\"pattern\", invalid_chars=\"/\")\n\n\n@dataclass(frozen=True, repr=False)\nclass JvmShadingKeepRule(JvmShadingRule):\n alias = \"shading_keep\"\n help = help_text(\n \"\"\"\n Keeps in the final artifact the occurrences of the `pattern`\n (and removes anything else).\n \"\"\"\n )\n\n pattern: str\n\n def encode(self) -> str:\n return f\"keep {self.pattern}\"\n\n def validate(self) -> set[str]:\n return JvmShadingRule._validate_field(self.pattern, name=\"pattern\", invalid_chars=\"/\")\n\n\nJVM_SHADING_RULE_TYPES: list[Type[JvmShadingRule]] = [\n JvmShadingRelocateRule,\n JvmShadingRenameRule,\n JvmShadingZapRule,\n JvmShadingKeepRule,\n]\n\n\ndef _shading_rules_field_help(intro: str) -> str:\n return softwrap(\n f\"\"\"\n {intro}\n\n There are {pluralize(len(JVM_SHADING_RULE_TYPES), \"possible shading rule\")} available,\n which are as follows:\n {bullet_list([f'`{rule.alias}`: {rule.help}' for rule in JVM_SHADING_RULE_TYPES])}\n\n When defining shading rules, just add them in this field using the previously listed rule\n alias and passing along the required parameters.\n \"\"\"\n )\n\n\ndef _shading_validate_rules(shading_rules: Iterable[JvmShadingRule]) -> set[str]:\n validation_errors = []\n for shading_rule in shading_rules:\n found_errors = shading_rule.validate()\n if found_errors:\n validation_errors.append(\n \"\\n\".join(\n [\n f\"In rule `{shading_rule.alias}`:\",\n bullet_list(found_errors),\n \"\",\n ]\n )\n )\n return set(validation_errors)\n\n\nclass JvmShadingRulesField(SequenceField[JvmShadingRule], metaclass=ABCMeta):\n alias = \"shading_rules\"\n required = False\n expected_element_type = JvmShadingRule\n expected_type_description = \"an iterable of JvmShadingRule\"\n\n @classmethod\n def compute_value(\n cls, raw_value: Optional[Iterable[JvmShadingRule]], address: Address\n ) -> Optional[Tuple[JvmShadingRule, ...]]:\n computed_value = super().compute_value(raw_value, address)\n\n if computed_value:\n validation_errors = _shading_validate_rules(computed_value)\n if validation_errors:\n raise InvalidFieldException(\n \"\\n\".join(\n [\n f\"Invalid shading rules assigned to `{cls.alias}` field in target {address}:\\n\",\n *validation_errors,\n ]\n )\n )\n\n return computed_value\n\n\n# -----------------------------------------------------------------------------------------------\n# `deploy_jar` target\n# -----------------------------------------------------------------------------------------------\n\n\n@dataclass(frozen=True)\nclass DeployJarDuplicateRule:\n alias: ClassVar[str] = \"duplicate_rule\"\n valid_actions: ClassVar[tuple[str, ...]] = (\"skip\", \"replace\", \"concat\", \"concat_text\", \"throw\")\n\n pattern: str\n action: str\n\n def validate(self) -> str | None:\n if self.action not in DeployJarDuplicateRule.valid_actions:\n return softwrap(\n f\"\"\"\n Value '{self.action}' for `action` associated with pattern\n '{self.pattern}' is not valid.\n\n It must be one of {list(DeployJarDuplicateRule.valid_actions)}.\n \"\"\"\n )\n return None\n\n def __repr__(self) -> str:\n return f\"{self.alias}(pattern='{self.pattern}', action='{self.action}')\"\n\n\nclass DeployJarDuplicatePolicyField(SequenceField[DeployJarDuplicateRule]):\n alias = \"duplicate_policy\"\n help = help_text(\n f\"\"\"\n A list of the rules to apply when duplicate file entries are found in the final\n assembled JAR file.\n\n When defining a duplicate policy, just add `duplicate_rule` directives to this\n field as follows:\n\n Example:\n\n duplicate_policy=[\n duplicate_rule(pattern=\"^META-INF/services\", action=\"concat_text\"),\n duplicate_rule(pattern=\"^reference\\\\.conf\", action=\"concat_text\"),\n duplicate_rule(pattern=\"^org/apache/commons\", action=\"throw\"),\n ]\n\n Where:\n\n * The `pattern` field is treated as a regular expression\n * The `action` field must be one of `{list(DeployJarDuplicateRule.valid_actions)}`.\n\n Note that the order in which the rules are listed is relevant.\n \"\"\"\n )\n required = False\n\n expected_element_type = DeployJarDuplicateRule\n expected_type_description = \"a list of JAR duplicate rules\"\n\n default = (\n DeployJarDuplicateRule(pattern=\"^META-INF/services/\", action=\"concat_text\"),\n DeployJarDuplicateRule(pattern=\"^META-INF/LICENSE\", action=\"skip\"),\n )\n\n @classmethod\n def compute_value(\n cls, raw_value: Optional[Iterable[DeployJarDuplicateRule]], address: Address\n ) -> Optional[Tuple[DeployJarDuplicateRule, ...]]:\n value = super().compute_value(raw_value, address)\n if value:\n errors = []\n for duplicate_rule in value:\n err = duplicate_rule.validate()\n if err:\n errors.append(err)\n\n if errors:\n raise InvalidFieldException(\n softwrap(\n f\"\"\"\n Invalid value for `{DeployJarDuplicatePolicyField.alias}` field at target:\n {address}. Found following errors:\n\n {bullet_list(errors)}\n \"\"\"\n )\n )\n return value\n\n def value_or_default(self) -> tuple[DeployJarDuplicateRule, ...]:\n if self.value is not None:\n return self.value\n return self.default\n\n\nclass DeployJarShadingRulesField(JvmShadingRulesField):\n help = _shading_rules_field_help(\"Shading rules to be applied to the final JAR artifact.\")\n\n\nclass DeployJarExcludeFilesField(StringSequenceField):\n alias = \"exclude_files\"\n help = help_text(\n \"\"\"\n A list of patterns to exclude from the final jar.\n \"\"\"\n )\n\n\nclass DeployJarTarget(Target):\n alias = \"deploy_jar\"\n core_fields = (\n *COMMON_TARGET_FIELDS,\n RestartableField,\n OutputPathField,\n JvmDependenciesField,\n JvmRequiredMainClassNameField,\n JvmJdkField,\n JvmResolveField,\n DeployJarDuplicatePolicyField,\n DeployJarShadingRulesField,\n DeployJarExcludeFilesField,\n )\n help = help_text(\n \"\"\"\n A `jar` file with first and third-party code bundled for deploys.\n\n The JAR will contain class files for both first-party code and\n third-party dependencies, all in a common directory structure.\n \"\"\"\n )\n\n\n# -----------------------------------------------------------------------------------------------\n# `jvm_war` targets\n# -----------------------------------------------------------------------------------------------\n\n\nclass JvmWarDependenciesField(Dependencies):\n pass\n\n\nclass JvmWarDescriptorAddressField(SingleSourceField):\n alias = \"descriptor\"\n default = \"web.xml\"\n help = \"Path to a file containing the descriptor (i.e., `web.xml`) for this WAR file. Defaults to `web.xml`.\"\n\n\nclass JvmWarContentField(SpecialCasedDependencies):\n alias = \"content\"\n help = help_text(\n \"\"\"\n A list of addresses to `resources` and `files` targets with content to place in the\n document root of this WAR file.\n \"\"\"\n )\n\n\nclass JvmWarShadingRulesField(JvmShadingRulesField):\n help = _shading_rules_field_help(\n \"Shading rules to be applied to the individual JAR artifacts embedded in the `WEB-INF/lib` folder.\"\n )\n\n\nclass JvmWarTarget(Target):\n alias = \"jvm_war\"\n core_fields = (\n *COMMON_TARGET_FIELDS,\n JvmResolveField,\n JvmWarContentField,\n JvmWarDependenciesField,\n JvmWarDescriptorAddressField,\n JvmWarShadingRulesField,\n OutputPathField,\n )\n help = help_text(\n \"\"\"\n A JSR 154 \"web application archive\" (or \"war\") with first-party and third-party code bundled for\n deploys in Java Servlet containers.\n \"\"\"\n )\n\n\n# -----------------------------------------------------------------------------------------------\n# Dynamic Field defaults\n# -----------------------------------------------------------------------------------------------#\n\n\nclass JvmResolveFieldDefaultFactoryRequest(FieldDefaultFactoryRequest):\n field_type = JvmResolveField\n\n\n@rule\ndef jvm_resolve_field_default_factory(\n request: JvmResolveFieldDefaultFactoryRequest,\n jvm: JvmSubsystem,\n) -> FieldDefaultFactoryResult:\n return FieldDefaultFactoryResult(lambda f: f.normalized_value(jvm))\n\n\n@memoized\ndef _jvm_source_run_request_rule(cls: type[JvmRunnableSourceFieldSet]) -> Iterable[Rule]:\n from pants.jvm.run import rules as run_rules\n\n @rule(\n canonical_name_suffix=cls.__name__,\n _param_type_overrides={\"request\": cls},\n level=LogLevel.DEBUG,\n )\n async def jvm_source_run_request(request: JvmRunnableSourceFieldSet) -> RunRequest:\n return await Get(RunRequest, GenericJvmRunRequest(request))\n\n return [*run_rules(), *collect_rules(locals())]\n\n\ndef rules():\n return [\n *collect_rules(),\n UnionRule(GenerateTargetsRequest, GenerateFromPomXmlRequest),\n UnionRule(FieldDefaultFactoryRequest, JvmResolveFieldDefaultFactoryRequest),\n *JvmArtifactFieldSet.jvm_rules(),\n ]\n\n\ndef build_file_aliases():\n return BuildFileAliases(\n objects={\n JvmArtifactExclusion.alias: JvmArtifactExclusion,\n DeployJarDuplicateRule.alias: DeployJarDuplicateRule,\n **{rule.alias: rule for rule in JVM_SHADING_RULE_TYPES},\n }\n )\n", "path": "src/python/pants/jvm/target_types.py" } ]
[ { "content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport dataclasses\nimport re\nimport xml.etree.ElementTree as ET\nfrom abc import ABC, ABCMeta, abstractmethod\nfrom dataclasses import dataclass\nfrom typing import Callable, ClassVar, Iterable, Iterator, Optional, Tuple, Type, Union\n\nfrom pants.build_graph.build_file_aliases import BuildFileAliases\nfrom pants.core.goals.generate_lockfiles import UnrecognizedResolveNamesError\nfrom pants.core.goals.package import OutputPathField\nfrom pants.core.goals.run import RestartableField, RunFieldSet, RunInSandboxBehavior, RunRequest\nfrom pants.core.goals.test import TestExtraEnvVarsField, TestTimeoutField\nfrom pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest\nfrom pants.engine.addresses import Address\nfrom pants.engine.fs import Digest, DigestContents\nfrom pants.engine.internals.selectors import Get\nfrom pants.engine.rules import Rule, collect_rules, rule\nfrom pants.engine.target import (\n COMMON_TARGET_FIELDS,\n AsyncFieldMixin,\n BoolField,\n Dependencies,\n DictStringToStringSequenceField,\n FieldDefaultFactoryRequest,\n FieldDefaultFactoryResult,\n GeneratedTargets,\n GenerateTargetsRequest,\n InvalidFieldException,\n InvalidTargetException,\n OptionalSingleSourceField,\n SequenceField,\n SingleSourceField,\n SpecialCasedDependencies,\n StringField,\n StringSequenceField,\n Target,\n TargetGenerator,\n)\nfrom pants.engine.unions import UnionMembership, UnionRule\nfrom pants.jvm.resolve.coordinate import Coordinate\nfrom pants.jvm.subsystems import JvmSubsystem\nfrom pants.util.docutil import git_url\nfrom pants.util.frozendict import FrozenDict\nfrom pants.util.logging import LogLevel\nfrom pants.util.memo import memoized\nfrom pants.util.strutil import bullet_list, help_text, pluralize, softwrap\n\n# -----------------------------------------------------------------------------------------------\n# Generic resolve support fields\n# -----------------------------------------------------------------------------------------------\n\n\nclass JvmDependenciesField(Dependencies):\n pass\n\n\nclass JvmResolveField(StringField, AsyncFieldMixin):\n alias = \"resolve\"\n required = False\n help = help_text(\n \"\"\"\n The resolve from `[jvm].resolves` to use when compiling this target.\n\n If not defined, will default to `[jvm].default_resolve`.\n \"\"\"\n # TODO: Document expectations for dependencies once we validate that.\n )\n\n def normalized_value(self, jvm_subsystem: JvmSubsystem) -> str:\n \"\"\"Get the value after applying the default and validating that the key is recognized.\"\"\"\n resolve = self.value or jvm_subsystem.default_resolve\n if resolve not in jvm_subsystem.resolves:\n raise UnrecognizedResolveNamesError(\n [resolve],\n jvm_subsystem.resolves.keys(),\n description_of_origin=f\"the field `{self.alias}` in the target {self.address}\",\n )\n return resolve\n\n\nclass JvmJdkField(StringField):\n alias = \"jdk\"\n required = False\n help = help_text(\n \"\"\"\n The major version of the JDK that this target should be built with. If not defined,\n will default to `[jvm].default_source_jdk`.\n \"\"\"\n )\n\n\nclass PrefixedJvmJdkField(JvmJdkField):\n alias = \"jvm_jdk\"\n\n\nclass PrefixedJvmResolveField(JvmResolveField):\n alias = \"jvm_resolve\"\n\n\n# -----------------------------------------------------------------------------------------------\n# Targets that can be called with `./pants run` or `experimental_run_in_sandbox`\n# -----------------------------------------------------------------------------------------------\nNO_MAIN_CLASS = \"org.pantsbuild.meta.no.main.class\"\n\n\nclass JvmMainClassNameField(StringField):\n alias = \"main\"\n required = False\n default = None\n help = help_text(\n \"\"\"\n `.`-separated name of the JVM class containing the `main()` method to be called when\n executing this target. If not supplied, this will be calculated automatically, either by\n inspecting the existing manifest (for 3rd-party JARs), or by inspecting the classes inside\n the JAR, looking for a valid `main` method. If a value cannot be calculated automatically,\n you must supply a value for `run` to succeed.\n \"\"\"\n )\n\n\n@dataclass(frozen=True)\nclass JvmRunnableSourceFieldSet(RunFieldSet):\n run_in_sandbox_behavior = RunInSandboxBehavior.RUN_REQUEST_HERMETIC\n jdk_version: JvmJdkField\n main_class: JvmMainClassNameField\n\n @classmethod\n def jvm_rules(cls) -> Iterable[Union[Rule, UnionRule]]:\n yield from _jvm_source_run_request_rule(cls)\n yield from cls.rules()\n\n\n@dataclass(frozen=True)\nclass GenericJvmRunRequest:\n \"\"\"Allows the use of a generic rule to return a `RunRequest` based on the field set.\"\"\"\n\n field_set: JvmRunnableSourceFieldSet\n\n\n# -----------------------------------------------------------------------------------------------\n# `jvm_artifact` targets\n# -----------------------------------------------------------------------------------------------\n\n_DEFAULT_PACKAGE_MAPPING_URL = git_url(\n \"src/python/pants/jvm/dependency_inference/jvm_artifact_mappings.py\"\n)\n\n\nclass JvmArtifactGroupField(StringField):\n alias = \"group\"\n required = True\n value: str\n help = help_text(\n \"\"\"\n The 'group' part of a Maven-compatible coordinate to a third-party JAR artifact.\n\n For the JAR coordinate `com.google.guava:guava:30.1.1-jre`, the group is `com.google.guava`.\n \"\"\"\n )\n\n\nclass JvmArtifactArtifactField(StringField):\n alias = \"artifact\"\n required = True\n value: str\n help = help_text(\n \"\"\"\n The 'artifact' part of a Maven-compatible coordinate to a third-party JAR artifact.\n\n For the JAR coordinate `com.google.guava:guava:30.1.1-jre`, the artifact is `guava`.\n \"\"\"\n )\n\n\nclass JvmArtifactVersionField(StringField):\n alias = \"version\"\n required = True\n value: str\n help = help_text(\n \"\"\"\n The 'version' part of a Maven-compatible coordinate to a third-party JAR artifact.\n\n For the JAR coordinate `com.google.guava:guava:30.1.1-jre`, the version is `30.1.1-jre`.\n \"\"\"\n )\n\n\nclass JvmArtifactUrlField(StringField):\n alias = \"url\"\n required = False\n help = help_text(\n \"\"\"\n A URL that points to the location of this artifact.\n\n If specified, Pants will not fetch this artifact from default Maven repositories, and\n will instead fetch the artifact from this URL. To use default maven\n repositories, do not set this value.\n\n Note that `file:` URLs are not supported. Instead, use the `jar` field for local\n artifacts.\n \"\"\"\n )\n\n\nclass JvmArtifactJarSourceField(OptionalSingleSourceField):\n alias = \"jar\"\n expected_file_extensions = (\".jar\",)\n help = help_text(\n \"\"\"\n A local JAR file that provides this artifact to the lockfile resolver, instead of a\n Maven repository.\n\n Path is relative to the BUILD file.\n\n Use the `url` field for remote artifacts.\n \"\"\"\n )\n\n @classmethod\n def compute_value(cls, raw_value: Optional[str], address: Address) -> Optional[str]:\n value_or_default = super().compute_value(raw_value, address)\n if value_or_default and value_or_default.startswith(\"file:\"):\n raise InvalidFieldException(\n softwrap(\n f\"\"\"\n The `{cls.alias}` field does not support `file:` URLS, but the target\n {address} sets the field to `{value_or_default}`.\n\n Instead, use the `jar` field to specify the relative path to the local jar file.\n \"\"\"\n )\n )\n return value_or_default\n\n\nclass JvmArtifactPackagesField(StringSequenceField):\n alias = \"packages\"\n help = help_text(\n f\"\"\"\n The JVM packages this artifact provides for the purposes of dependency inference.\n\n For example, the JVM artifact `junit:junit` might provide `[\"org.junit.**\"]`.\n\n Usually you can leave this field off. If unspecified, Pants will fall back to the\n `[java-infer].third_party_import_mapping`, then to a built in mapping\n ({_DEFAULT_PACKAGE_MAPPING_URL}), and then finally it will default to\n the normalized `group` of the artifact. For example, in the absence of any other mapping\n the artifact `io.confluent:common-config` would default to providing\n `[\"io.confluent.**\"]`.\n\n The package path may be made recursive to match symbols in subpackages\n by adding `.**` to the end of the package path. For example, specify `[\"org.junit.**\"]`\n to infer a dependency on the artifact for any file importing a symbol from `org.junit` or\n its subpackages.\n \"\"\"\n )\n\n\nclass JvmArtifactForceVersionField(BoolField):\n alias = \"force_version\"\n default = False\n help = help_text(\n \"\"\"\n Force artifact version during resolution.\n\n If set, pants will pass `--force-version` argument to `coursier fetch` for this artifact.\n \"\"\"\n )\n\n\nclass JvmProvidesTypesField(StringSequenceField):\n alias = \"experimental_provides_types\"\n help = help_text(\n \"\"\"\n Signals that the specified types should be fulfilled by these source files during\n dependency inference.\n\n This allows for specific types within packages that are otherwise inferred as\n belonging to `jvm_artifact` targets to be unambiguously inferred as belonging\n to this first-party source.\n\n If a given type is defined, at least one source file captured by this target must\n actually provide that symbol.\n \"\"\"\n )\n\n\n@dataclass(frozen=True)\nclass JvmArtifactExclusion:\n alias: ClassVar[str] = \"jvm_exclude\"\n help: ClassVar[str | Callable[[], str]] = help_text(\n \"\"\"\n Exclude the given `artifact` and `group`, or all artifacts from the given `group`.\n \"\"\"\n )\n\n group: str\n artifact: str | None = None\n\n def validate(self, _: Address) -> set[str]:\n return set()\n\n def to_coord_str(self) -> str:\n result = self.group\n if self.artifact:\n result += f\":{self.artifact}\"\n else:\n result += \":*\"\n return result\n\n\ndef _jvm_artifact_exclusions_field_help(\n supported_exclusions: Callable[[], Iterable[type[JvmArtifactExclusion]]]\n) -> str | Callable[[], str]:\n return help_text(\n lambda: f\"\"\"\n A list of exclusions for unversioned coordinates that should be excluded\n as dependencies when this artifact is resolved.\n\n This does not prevent this artifact from being included in the resolve as a dependency\n of other artifacts that depend on it, and is currently intended as a way to resolve\n version conflicts in complex resolves.\n\n Supported exclusions are:\n {bullet_list(f'`{exclusion.alias}`: {exclusion.help}' for exclusion in supported_exclusions())}\n \"\"\"\n )\n\n\nclass JvmArtifactExclusionsField(SequenceField[JvmArtifactExclusion]):\n alias = \"exclusions\"\n help = _jvm_artifact_exclusions_field_help(\n lambda: JvmArtifactExclusionsField.supported_exclusion_types\n )\n\n supported_exclusion_types: ClassVar[tuple[type[JvmArtifactExclusion], ...]] = (\n JvmArtifactExclusion,\n )\n expected_element_type = JvmArtifactExclusion\n expected_type_description = \"an iterable of JvmArtifactExclusionRule\"\n\n @classmethod\n def compute_value(\n cls, raw_value: Optional[Iterable[JvmArtifactExclusion]], address: Address\n ) -> Optional[Tuple[JvmArtifactExclusion, ...]]:\n computed_value = super().compute_value(raw_value, address)\n\n if computed_value:\n errors: list[str] = []\n for exclusion_rule in computed_value:\n err = exclusion_rule.validate(address)\n if err:\n errors.extend(err)\n\n if errors:\n raise InvalidFieldException(\n softwrap(\n f\"\"\"\n Invalid value for `{JvmArtifactExclusionsField.alias}` field at target\n {address}. Found following errors:\n\n {bullet_list(errors)}\n \"\"\"\n )\n )\n return computed_value\n\n\nclass JvmArtifactResolveField(JvmResolveField):\n help = help_text(\n \"\"\"\n The resolve from `[jvm].resolves` that this artifact should be included in.\n\n If not defined, will default to `[jvm].default_resolve`.\n\n When generating a lockfile for a particular resolve via the `coursier-resolve` goal,\n it will include all artifacts that are declared compatible with that resolve. First-party\n targets like `java_source` and `scala_source` also declare which resolve they use\n via the `resolve` field; so, for your first-party code to use\n a particular `jvm_artifact` target, that artifact must be included in the resolve\n used by that code.\n \"\"\"\n )\n\n\n@dataclass(frozen=True)\nclass JvmArtifactFieldSet(JvmRunnableSourceFieldSet):\n group: JvmArtifactGroupField\n artifact: JvmArtifactArtifactField\n version: JvmArtifactVersionField\n packages: JvmArtifactPackagesField\n url: JvmArtifactUrlField\n force_version: JvmArtifactForceVersionField\n\n required_fields = (\n JvmArtifactGroupField,\n JvmArtifactArtifactField,\n JvmArtifactVersionField,\n JvmArtifactPackagesField,\n JvmArtifactForceVersionField,\n )\n\n\nclass JvmArtifactTarget(Target):\n alias = \"jvm_artifact\"\n core_fields = (\n *COMMON_TARGET_FIELDS,\n *JvmArtifactFieldSet.required_fields,\n JvmArtifactUrlField, # TODO: should `JvmArtifactFieldSet` have an `all_fields` field?\n JvmArtifactJarSourceField,\n JvmArtifactResolveField,\n JvmArtifactExclusionsField,\n JvmJdkField,\n JvmMainClassNameField,\n )\n help = help_text(\n \"\"\"\n A third-party JVM artifact, as identified by its Maven-compatible coordinate.\n\n That is, an artifact identified by its `group`, `artifact`, and `version` components.\n\n Each artifact is associated with one or more resolves (a logical name you give to a\n lockfile). For this artifact to be used by your first-party code, it must be\n associated with the resolve(s) used by that code. See the `resolve` field.\n \"\"\"\n )\n\n def validate(self) -> None:\n if self[JvmArtifactJarSourceField].value and self[JvmArtifactUrlField].value:\n raise InvalidTargetException(\n f\"You cannot specify both the `url` and `jar` fields, but both were set on the \"\n f\"`{self.alias}` target {self.address}.\"\n )\n\n\n# -----------------------------------------------------------------------------------------------\n# Generate `jvm_artifact` targets from pom.xml\n# -----------------------------------------------------------------------------------------------\n\n\nclass PomXmlSourceField(SingleSourceField):\n default = \"pom.xml\"\n required = False\n\n\nclass JvmArtifactsPackageMappingField(DictStringToStringSequenceField):\n alias = \"package_mapping\"\n help = help_text(\n f\"\"\"\n A mapping of jvm artifacts to a list of the packages they provide.\n\n For example, `{{\"com.google.guava:guava\": [\"com.google.common.**\"]}}`.\n\n Any unspecified jvm artifacts will use a default. See the\n `{JvmArtifactPackagesField.alias}` field from the `{JvmArtifactTarget.alias}`\n target for more information.\n \"\"\"\n )\n value: FrozenDict[str, tuple[str, ...]]\n default: ClassVar[Optional[FrozenDict[str, tuple[str, ...]]]] = FrozenDict()\n\n @classmethod\n def compute_value( # type: ignore[override]\n cls, raw_value: dict[str, Iterable[str]], address: Address\n ) -> FrozenDict[tuple[str, str], tuple[str, ...]]:\n value_or_default = super().compute_value(raw_value, address)\n assert value_or_default is not None\n return FrozenDict(\n {\n cls._parse_coord(coord): tuple(packages)\n for coord, packages in value_or_default.items()\n }\n )\n\n @classmethod\n def _parse_coord(cls, coord: str) -> tuple[str, str]:\n group, artifact = coord.split(\":\")\n return group, artifact\n\n\nclass JvmArtifactsTargetGenerator(TargetGenerator):\n alias = \"jvm_artifacts\"\n core_fields = (\n PomXmlSourceField,\n JvmArtifactsPackageMappingField,\n *COMMON_TARGET_FIELDS,\n )\n generated_target_cls = JvmArtifactTarget\n copied_fields = COMMON_TARGET_FIELDS\n moved_fields = (JvmArtifactResolveField,)\n help = help_text(\n \"\"\"\n Generate a `jvm_artifact` target for each dependency in pom.xml file.\n \"\"\"\n )\n\n\nclass GenerateFromPomXmlRequest(GenerateTargetsRequest):\n generate_from = JvmArtifactsTargetGenerator\n\n\n@rule(\n desc=(\"Generate `jvm_artifact` targets from pom.xml\"),\n level=LogLevel.DEBUG,\n)\nasync def generate_from_pom_xml(\n request: GenerateFromPomXmlRequest,\n union_membership: UnionMembership,\n) -> GeneratedTargets:\n generator = request.generator\n pom_xml = await Get(\n SourceFiles,\n SourceFilesRequest([generator[PomXmlSourceField]]),\n )\n files = await Get(DigestContents, Digest, pom_xml.snapshot.digest)\n if not files:\n raise FileNotFoundError(f\"pom.xml not found: {generator[PomXmlSourceField].value}\")\n\n mapping = request.generator[JvmArtifactsPackageMappingField].value\n coordinates = parse_pom_xml(files[0].content, pom_xml_path=pom_xml.snapshot.files[0])\n targets = (\n JvmArtifactTarget(\n unhydrated_values={\n \"group\": coord.group,\n \"artifact\": coord.artifact,\n \"version\": coord.version,\n \"packages\": mapping.get((coord.group, coord.artifact)),\n **request.template,\n },\n address=request.template_address.create_generated(coord.artifact),\n )\n for coord in coordinates\n )\n return GeneratedTargets(request.generator, targets)\n\n\ndef parse_pom_xml(content: bytes, pom_xml_path: str) -> Iterator[Coordinate]:\n root = ET.fromstring(content.decode(\"utf-8\"))\n match = re.match(r\"^(\\{.*\\})project$\", root.tag)\n if not match:\n raise ValueError(\n f\"Unexpected root tag `{root.tag}` in {pom_xml_path}, expected tag `project`\"\n )\n\n namespace = match.group(1)\n for dependency in root.iter(f\"{namespace}dependency\"):\n yield Coordinate(\n group=get_child_text(dependency, f\"{namespace}groupId\"),\n artifact=get_child_text(dependency, f\"{namespace}artifactId\"),\n version=get_child_text(dependency, f\"{namespace}version\"),\n )\n\n\ndef get_child_text(parent: ET.Element, child: str) -> str:\n tag = parent.find(child)\n if tag is None:\n raise ValueError(f\"missing element: {child}\")\n text = tag.text\n if text is None:\n raise ValueError(f\"empty element: {child}\")\n return text\n\n\n# -----------------------------------------------------------------------------------------------\n# JUnit test support field(s)\n# -----------------------------------------------------------------------------------------------\n\n\nclass JunitTestSourceField(SingleSourceField, metaclass=ABCMeta):\n \"\"\"A marker that indicates that a source field represents a JUnit test.\"\"\"\n\n\nclass JunitTestTimeoutField(TestTimeoutField):\n pass\n\n\nclass JunitTestExtraEnvVarsField(TestExtraEnvVarsField):\n pass\n\n\n# -----------------------------------------------------------------------------------------------\n# JAR support fields\n# -----------------------------------------------------------------------------------------------\n\n\nclass JvmRequiredMainClassNameField(JvmMainClassNameField):\n required = True\n default = None\n help = help_text(\n \"\"\"\n `.`-separated name of the JVM class containing the `main()` method to be called when\n executing this JAR.\n \"\"\"\n )\n\n\nclass JvmShadingRule(ABC):\n \"\"\"Base class for defining JAR shading rules as valid aliases in BUILD files.\n\n Subclasses need to provide with an `alias` and a `help` message. The `alias` represents\n the name that will be used in BUILD files to instantiate the given subclass.\n\n Set the `help` class property with a description, which will be used in `./pants help`. For the\n best rendering, use soft wrapping (e.g. implicit string concatenation) within paragraphs, but\n hard wrapping (`\\n`) to separate distinct paragraphs and/or lists.\n \"\"\"\n\n alias: ClassVar[str]\n help: ClassVar[str | Callable[[], str]]\n\n @abstractmethod\n def encode(self) -> str:\n pass\n\n @abstractmethod\n def validate(self) -> set[str]:\n pass\n\n @staticmethod\n def _validate_field(value: str, *, name: str, invalid_chars: str) -> set[str]:\n errors = []\n for ch in invalid_chars:\n if ch in value:\n errors.append(f\"`{name}` can not contain the character `{ch}`.\")\n return set(errors)\n\n def __repr__(self) -> str:\n fields = [f\"{fld.name}={repr(getattr(self, fld.name))}\" for fld in dataclasses.fields(self)] # type: ignore[arg-type]\n return f\"{self.alias}({', '.join(fields)})\"\n\n\n@dataclass(frozen=True, repr=False)\nclass JvmShadingRenameRule(JvmShadingRule):\n alias = \"shading_rename\"\n help = \"Renames all occurrences of the given `pattern` by the `replacement`.\"\n\n pattern: str\n replacement: str\n\n def encode(self) -> str:\n return f\"rule {self.pattern} {self.replacement}\"\n\n def validate(self) -> set[str]:\n errors: list[str] = []\n errors.extend(\n JvmShadingRule._validate_field(self.pattern, name=\"pattern\", invalid_chars=\"/\")\n )\n errors.extend(\n JvmShadingRule._validate_field(self.replacement, name=\"replacement\", invalid_chars=\"/\")\n )\n return set(errors)\n\n\n@dataclass(frozen=True, repr=False)\nclass JvmShadingRelocateRule(JvmShadingRule):\n alias = \"shading_relocate\"\n help = help_text(\n \"\"\"\n Relocates the classes under the given `package` into the new package name.\n The default target package is `__shaded_by_pants__` if none provided in\n the `into` parameter.\n \"\"\"\n )\n\n package: str\n into: str | None = None\n\n def encode(self) -> str:\n if not self.into:\n target_suffix = \"__shaded_by_pants__\"\n else:\n target_suffix = self.into\n return f\"rule {self.package}.** {target_suffix}.@1\"\n\n def validate(self) -> set[str]:\n errors: list[str] = []\n errors.extend(\n JvmShadingRule._validate_field(self.package, name=\"package\", invalid_chars=\"/*\")\n )\n if self.into:\n errors.extend(\n JvmShadingRule._validate_field(self.into, name=\"into\", invalid_chars=\"/*\")\n )\n return set(errors)\n\n\n@dataclass(frozen=True, repr=False)\nclass JvmShadingZapRule(JvmShadingRule):\n alias = \"shading_zap\"\n help = \"Removes from the final artifact the occurrences of the `pattern`.\"\n\n pattern: str\n\n def encode(self) -> str:\n return f\"zap {self.pattern}\"\n\n def validate(self) -> set[str]:\n return JvmShadingRule._validate_field(self.pattern, name=\"pattern\", invalid_chars=\"/\")\n\n\n@dataclass(frozen=True, repr=False)\nclass JvmShadingKeepRule(JvmShadingRule):\n alias = \"shading_keep\"\n help = help_text(\n \"\"\"\n Keeps in the final artifact the occurrences of the `pattern`\n (and removes anything else).\n \"\"\"\n )\n\n pattern: str\n\n def encode(self) -> str:\n return f\"keep {self.pattern}\"\n\n def validate(self) -> set[str]:\n return JvmShadingRule._validate_field(self.pattern, name=\"pattern\", invalid_chars=\"/\")\n\n\nJVM_SHADING_RULE_TYPES: list[Type[JvmShadingRule]] = [\n JvmShadingRelocateRule,\n JvmShadingRenameRule,\n JvmShadingZapRule,\n JvmShadingKeepRule,\n]\n\n\ndef _shading_rules_field_help(intro: str) -> str:\n return softwrap(\n f\"\"\"\n {intro}\n\n There are {pluralize(len(JVM_SHADING_RULE_TYPES), \"possible shading rule\")} available,\n which are as follows:\n {bullet_list([f'`{rule.alias}`: {rule.help}' for rule in JVM_SHADING_RULE_TYPES])}\n\n When defining shading rules, just add them in this field using the previously listed rule\n alias and passing along the required parameters.\n \"\"\"\n )\n\n\ndef _shading_validate_rules(shading_rules: Iterable[JvmShadingRule]) -> set[str]:\n validation_errors = []\n for shading_rule in shading_rules:\n found_errors = shading_rule.validate()\n if found_errors:\n validation_errors.append(\n \"\\n\".join(\n [\n f\"In rule `{shading_rule.alias}`:\",\n bullet_list(found_errors),\n \"\",\n ]\n )\n )\n return set(validation_errors)\n\n\nclass JvmShadingRulesField(SequenceField[JvmShadingRule], metaclass=ABCMeta):\n alias = \"shading_rules\"\n required = False\n expected_element_type = JvmShadingRule\n expected_type_description = \"an iterable of JvmShadingRule\"\n\n @classmethod\n def compute_value(\n cls, raw_value: Optional[Iterable[JvmShadingRule]], address: Address\n ) -> Optional[Tuple[JvmShadingRule, ...]]:\n computed_value = super().compute_value(raw_value, address)\n\n if computed_value:\n validation_errors = _shading_validate_rules(computed_value)\n if validation_errors:\n raise InvalidFieldException(\n \"\\n\".join(\n [\n f\"Invalid shading rules assigned to `{cls.alias}` field in target {address}:\\n\",\n *validation_errors,\n ]\n )\n )\n\n return computed_value\n\n\n# -----------------------------------------------------------------------------------------------\n# `deploy_jar` target\n# -----------------------------------------------------------------------------------------------\n\n\n@dataclass(frozen=True)\nclass DeployJarDuplicateRule:\n alias: ClassVar[str] = \"duplicate_rule\"\n valid_actions: ClassVar[tuple[str, ...]] = (\"skip\", \"replace\", \"concat\", \"concat_text\", \"throw\")\n\n pattern: str\n action: str\n\n def validate(self) -> str | None:\n if self.action not in DeployJarDuplicateRule.valid_actions:\n return softwrap(\n f\"\"\"\n Value '{self.action}' for `action` associated with pattern\n '{self.pattern}' is not valid.\n\n It must be one of {list(DeployJarDuplicateRule.valid_actions)}.\n \"\"\"\n )\n return None\n\n def __repr__(self) -> str:\n return f\"{self.alias}(pattern='{self.pattern}', action='{self.action}')\"\n\n\nclass DeployJarDuplicatePolicyField(SequenceField[DeployJarDuplicateRule]):\n alias = \"duplicate_policy\"\n help = help_text(\n f\"\"\"\n A list of the rules to apply when duplicate file entries are found in the final\n assembled JAR file.\n\n When defining a duplicate policy, just add `duplicate_rule` directives to this\n field as follows:\n\n Example:\n\n duplicate_policy=[\n duplicate_rule(pattern=\"^META-INF/services\", action=\"concat_text\"),\n duplicate_rule(pattern=\"^reference\\\\.conf\", action=\"concat_text\"),\n duplicate_rule(pattern=\"^org/apache/commons\", action=\"throw\"),\n ]\n\n Where:\n\n * The `pattern` field is treated as a regular expression\n * The `action` field must be one of `{list(DeployJarDuplicateRule.valid_actions)}`.\n\n Note that the order in which the rules are listed is relevant.\n \"\"\"\n )\n required = False\n\n expected_element_type = DeployJarDuplicateRule\n expected_type_description = \"a list of JAR duplicate rules\"\n\n default = (\n DeployJarDuplicateRule(pattern=\"^META-INF/services/\", action=\"concat_text\"),\n DeployJarDuplicateRule(pattern=\"^META-INF/LICENSE\", action=\"skip\"),\n )\n\n @classmethod\n def compute_value(\n cls, raw_value: Optional[Iterable[DeployJarDuplicateRule]], address: Address\n ) -> Optional[Tuple[DeployJarDuplicateRule, ...]]:\n value = super().compute_value(raw_value, address)\n if value:\n errors = []\n for duplicate_rule in value:\n err = duplicate_rule.validate()\n if err:\n errors.append(err)\n\n if errors:\n raise InvalidFieldException(\n softwrap(\n f\"\"\"\n Invalid value for `{DeployJarDuplicatePolicyField.alias}` field at target:\n {address}. Found following errors:\n\n {bullet_list(errors)}\n \"\"\"\n )\n )\n return value\n\n def value_or_default(self) -> tuple[DeployJarDuplicateRule, ...]:\n if self.value is not None:\n return self.value\n return self.default\n\n\nclass DeployJarShadingRulesField(JvmShadingRulesField):\n help = _shading_rules_field_help(\"Shading rules to be applied to the final JAR artifact.\")\n\n\nclass DeployJarExcludeFilesField(StringSequenceField):\n alias = \"exclude_files\"\n help = help_text(\n \"\"\"\n A list of patterns to exclude from the final jar.\n \"\"\"\n )\n\n\nclass DeployJarTarget(Target):\n alias = \"deploy_jar\"\n core_fields = (\n *COMMON_TARGET_FIELDS,\n RestartableField,\n OutputPathField,\n JvmDependenciesField,\n JvmRequiredMainClassNameField,\n JvmJdkField,\n JvmResolveField,\n DeployJarDuplicatePolicyField,\n DeployJarShadingRulesField,\n DeployJarExcludeFilesField,\n )\n help = help_text(\n \"\"\"\n A `jar` file with first and third-party code bundled for deploys.\n\n The JAR will contain class files for both first-party code and\n third-party dependencies, all in a common directory structure.\n \"\"\"\n )\n\n\n# -----------------------------------------------------------------------------------------------\n# `jvm_war` targets\n# -----------------------------------------------------------------------------------------------\n\n\nclass JvmWarDependenciesField(Dependencies):\n pass\n\n\nclass JvmWarDescriptorAddressField(SingleSourceField):\n alias = \"descriptor\"\n default = \"web.xml\"\n help = \"Path to a file containing the descriptor (i.e., `web.xml`) for this WAR file. Defaults to `web.xml`.\"\n\n\nclass JvmWarContentField(SpecialCasedDependencies):\n alias = \"content\"\n help = help_text(\n \"\"\"\n A list of addresses to `resources` and `files` targets with content to place in the\n document root of this WAR file.\n \"\"\"\n )\n\n\nclass JvmWarShadingRulesField(JvmShadingRulesField):\n help = _shading_rules_field_help(\n \"Shading rules to be applied to the individual JAR artifacts embedded in the `WEB-INF/lib` folder.\"\n )\n\n\nclass JvmWarTarget(Target):\n alias = \"jvm_war\"\n core_fields = (\n *COMMON_TARGET_FIELDS,\n JvmResolveField,\n JvmWarContentField,\n JvmWarDependenciesField,\n JvmWarDescriptorAddressField,\n JvmWarShadingRulesField,\n OutputPathField,\n )\n help = help_text(\n \"\"\"\n A JSR 154 \"web application archive\" (or \"war\") with first-party and third-party code bundled for\n deploys in Java Servlet containers.\n \"\"\"\n )\n\n\n# -----------------------------------------------------------------------------------------------\n# Dynamic Field defaults\n# -----------------------------------------------------------------------------------------------#\n\n\nclass JvmResolveFieldDefaultFactoryRequest(FieldDefaultFactoryRequest):\n field_type = JvmResolveField\n\n\n@rule\ndef jvm_resolve_field_default_factory(\n request: JvmResolveFieldDefaultFactoryRequest,\n jvm: JvmSubsystem,\n) -> FieldDefaultFactoryResult:\n return FieldDefaultFactoryResult(lambda f: f.normalized_value(jvm))\n\n\n@memoized\ndef _jvm_source_run_request_rule(cls: type[JvmRunnableSourceFieldSet]) -> Iterable[Rule]:\n from pants.jvm.run import rules as run_rules\n\n @rule(\n canonical_name_suffix=cls.__name__,\n _param_type_overrides={\"request\": cls},\n level=LogLevel.DEBUG,\n )\n async def jvm_source_run_request(request: JvmRunnableSourceFieldSet) -> RunRequest:\n return await Get(RunRequest, GenericJvmRunRequest(request))\n\n return [*run_rules(), *collect_rules(locals())]\n\n\ndef rules():\n return [\n *collect_rules(),\n UnionRule(GenerateTargetsRequest, GenerateFromPomXmlRequest),\n UnionRule(FieldDefaultFactoryRequest, JvmResolveFieldDefaultFactoryRequest),\n *JvmArtifactFieldSet.jvm_rules(),\n ]\n\n\ndef build_file_aliases():\n return BuildFileAliases(\n objects={\n JvmArtifactExclusion.alias: JvmArtifactExclusion,\n DeployJarDuplicateRule.alias: DeployJarDuplicateRule,\n **{rule.alias: rule for rule in JVM_SHADING_RULE_TYPES},\n }\n )\n", "path": "src/python/pants/jvm/target_types.py" } ]
diff --git a/docs/notes/2.22.x.md b/docs/notes/2.22.x.md index 86beb82ff1b..018defbb47b 100644 --- a/docs/notes/2.22.x.md +++ b/docs/notes/2.22.x.md @@ -36,6 +36,8 @@ docs [here](https://www.pantsbuild.org/2.22/docs/sql). for [`jvm_artifacts`](https://www.pantsbuild.org/2.22/reference/targets/jvm_artifacts) targets generator from `pom.xml`. +Exclusions for `jvm_artifact` and `scala_artifact` now correctly handle a `jvm_exclude` with only the group defined. + ##### Scala Setting the `orphan_files_behaviour = "ignore"` option for [`pants.backend.experimental.scala.lint.scalafix`](https://www.pantsbuild.org/2.22/reference/subsystems/scalafix#orphan_files_behavior) or [`pants.backend.experimental.scala.lint.scalafmt`](https://www.pantsbuild.org/2.22/reference/subsystems/scalafmt#orphan_files_behavior) backend is now properly silent. It previously showed spurious warnings. diff --git a/src/python/pants/jvm/resolve/coursier_fetch_integration_test.py b/src/python/pants/jvm/resolve/coursier_fetch_integration_test.py index 8167db55952..7e8efb87db5 100644 --- a/src/python/pants/jvm/resolve/coursier_fetch_integration_test.py +++ b/src/python/pants/jvm/resolve/coursier_fetch_integration_test.py @@ -18,7 +18,11 @@ from pants.jvm.resolve.coordinate import Coordinate, Coordinates from pants.jvm.resolve.coursier_fetch import CoursierLockfileEntry, CoursierResolvedLockfile from pants.jvm.resolve.coursier_fetch import rules as coursier_fetch_rules -from pants.jvm.target_types import JvmArtifactJarSourceField, JvmArtifactTarget +from pants.jvm.target_types import ( + JvmArtifactExclusion, + JvmArtifactJarSourceField, + JvmArtifactTarget, +) from pants.jvm.testutil import maybe_skip_jdk_test from pants.jvm.util_rules import ExtractFileDigest from pants.jvm.util_rules import rules as util_rules @@ -662,6 +666,25 @@ def test_transitive_excludes(rule_runner: RuleRunner) -> None: assert not any(i for i in entries if i.coord.artifact == "jackson-core") +@maybe_skip_jdk_test +def test_transitive_group_only_excludes(rule_runner: RuleRunner) -> None: + group_only_excludes = JvmArtifactExclusion(group="com.fasterxml.jackson.core", artifact=None) + + requirement = ArtifactRequirement( + coordinate=Coordinate( + group="com.fasterxml.jackson.module", + artifact="jackson-module-jaxb-annotations", + version="2.17.1", + ), + excludes=frozenset([group_only_excludes.to_coord_str()]), + ) + + resolve = rule_runner.request(CoursierResolvedLockfile, [ArtifactRequirements([requirement])]) + + entries = resolve.entries + assert not any(i for i in entries if i.coord.group == "com.fasterxml.jackson.core") + + @maybe_skip_jdk_test def test_missing_entry_for_transitive_dependency(rule_runner: RuleRunner) -> None: requirement = ArtifactRequirement( diff --git a/src/python/pants/jvm/target_types.py b/src/python/pants/jvm/target_types.py index ea7e3e6a5fc..c696eacbf65 100644 --- a/src/python/pants/jvm/target_types.py +++ b/src/python/pants/jvm/target_types.py @@ -309,6 +309,8 @@ def to_coord_str(self) -> str: result = self.group if self.artifact: result += f":{self.artifact}" + else: + result += ":*" return result
iterative__dvc-10208
dvc push: Unexpected error when pushing to Google Cloud storage or S3 # Bug Report dvc push: "Unexpected error" when pushing to Google Cloud storage or S3 ### Reproduce ``` dvc init dvc remote add -d s3 s3://bucket # or gcs gs://bucket dvc import-url https://data.dvc.org/get-started/data.xml dvc push -v ``` output (s3): ``` 2023-12-27 19:56:42,605 DEBUG: v3.36.1 (pip), CPython 3.9.18 on Linux-5.15.139-93.147.amzn2.x86_64-x86_64-with-glibc2.26 2023-12-27 19:56:42,605 DEBUG: command: /path/bin/dvc push -v Collecting |0.00 [00:00, ?entry/s] Pushing |0.00 [00:00, ?file/s] Collecting my.bucket/key on s3 |3.00 [00:00, 4.84entry/s] 2023-12-27 19:56:43,676 ERROR: unexpected error Traceback (most recent call last): File "/path/lib/python3.9/site-packages/dvc/cli/__init__.py", line 211, in main ret = cmd.do_run() File "/path/lib/python3.9/site-packages/dvc/cli/command.py", line 27, in do_run return self.run() File "/path/lib/python3.9/site-packages/dvc/commands/data_sync.py", line 64, in run processed_files_count = self.repo.push( File "/path/lib/python3.9/site-packages/dvc/repo/__init__.py", line 65, in wrapper return f(repo, *args, **kwargs) File "/path/lib/python3.9/site-packages/dvc/repo/push.py", line 144, in push push_transferred, push_failed = ipush( File "/path/lib/python3.9/site-packages/dvc_data/index/push.py", line 101, in push old = build(data.path, data.fs) File "/path/lib/python3.9/site-packages/dvc_data/index/build.py", line 90, in build for entry in build_entries(path, fs, ignore=ignore): File "/path/lib/python3.9/site-packages/dvc_data/index/build.py", line 55, in build_entries walk_iter = fs.walk(path, detail=detail) File "/path/lib/python3.9/site-packages/dvc_http/__init__.py", line 162, in walk raise NotImplementedError NotImplementedError 2023-12-27 19:56:43,752 DEBUG: link type reflink is not available ([Errno 95] no more link types left to try out) 2023-12-27 19:56:43,755 DEBUG: Removing '/path/.MHVNkr3eAijD7Q5aau3NRK.tmp' 2023-12-27 19:56:43,755 DEBUG: Removing '/path/.MHVNkr3eAijD7Q5aau3NRK.tmp' 2023-12-27 19:56:43,757 DEBUG: Removing '/path/.MHVNkr3eAijD7Q5aau3NRK.tmp' 2023-12-27 19:56:43,757 DEBUG: Removing '/path/bkw-9036/.dvc/cache/files/md5/.mnnSioPUuXvRUCqUV2ug87.tmp' 2023-12-27 19:56:43,777 DEBUG: Version info for developers: DVC version: 3.36.1 (pip) ------------------------- Platform: Python 3.9.18 on Linux-5.15.139-93.147.amzn2.x86_64-x86_64-with-glibc2.26 Subprojects: dvc_data = 3.3.0 dvc_objects = 3.0.0 dvc_render = 1.0.0 dvc_task = 0.3.0 scmrepo = 2.0.2 Supports: gs (gcsfs = 2023.12.2.post1), http (aiohttp = 3.9.1, aiohttp-retry = 2.8.3), https (aiohttp = 3.9.1, aiohttp-retry = 2.8.3), s3 (s3fs = 2023.12.2, boto3 = 1.33.13) Config: Global: /home/jdt/.config/dvc System: /etc/xdg/dvc Cache types: hardlink, symlink Cache directory: ext4 on /dev/nvme1n1p1 Caches: local Remotes: s3 Workspace directory: ext4 on /dev/nvme1n1p1 Repo: dvc, git Repo.site_cache_dir: /var/tmp/dvc/repo/9d9135fb99d9d827364c4dc5a42cdc60 Having any troubles? Hit us up at https://dvc.org/support, we are always happy to help! 2023-12-27 19:56:43,781 DEBUG: Analytics is enabled. 2023-12-27 19:56:43,860 DEBUG: Trying to spawn ['daemon', 'analytics', '/tmp/tmpccxiwrmd', '-v'] 2023-12-27 19:56:43,871 DEBUG: Spawned ['daemon', 'analytics', '/tmp/tmpccxiwrmd', '-v'] with pid 22406 ``` output (gcs): ``` 2023-12-27 19:47:22,768 DEBUG: v3.36.1 (pip), CPython 3.9.18 on Linux-5.15.139-93.147.amzn2.x86_64-x86_64-with-glibc2.26 2023-12-27 19:47:22,769 DEBUG: command: /path/bin/dvc push -v Collecting |0.00 [00:00, ?entry/s] Pushing |0.00 [00:00, ?file/s] Collecting bucket/path on gs |3.00 [00:01, 2.84entry/s] 2023-12-27 19:47:24,328 ERROR: unexpected error Traceback (most recent call last): File "/path/lib/python3.9/site-packages/dvc/cli/__init__.py", line 211, in main ret = cmd.do_run() File "/path/lib/python3.9/site-packages/dvc/cli/command.py", line 27, in do_run return self.run() File "/path/lib/python3.9/site-packages/dvc/commands/data_sync.py", line 64, in run processed_files_count = self.repo.push( File "/path/lib/python3.9/site-packages/dvc/repo/__init__.py", line 65, in wrapper return f(repo, *args, **kwargs) File "/path/lib/python3.9/site-packages/dvc/repo/push.py", line 144, in push push_transferred, push_failed = ipush( File "/path/lib/python3.9/site-packages/dvc_data/index/push.py", line 101, in push old = build(data.path, data.fs) File "/path/lib/python3.9/site-packages/dvc_data/index/build.py", line 90, in build for entry in build_entries(path, fs, ignore=ignore): File "/path/lib/python3.9/site-packages/dvc_data/index/build.py", line 55, in build_entries walk_iter = fs.walk(path, detail=detail) File "/path/lib/python3.9/site-packages/dvc_http/__init__.py", line 162, in walk raise NotImplementedError NotImplementedError 2023-12-27 19:47:24,370 DEBUG: link type reflink is not available ([Errno 95] no more link types left to try out) 2023-12-27 19:47:24,371 DEBUG: Removing '/path/.fJ4uXqQznknWmbrzzUTXLQ.tmp' 2023-12-27 19:47:24,371 DEBUG: Removing '/path/.fJ4uXqQznknWmbrzzUTXLQ.tmp' 2023-12-27 19:47:24,371 DEBUG: Removing '/path/.fJ4uXqQznknWmbrzzUTXLQ.tmp' 2023-12-27 19:47:24,371 DEBUG: Removing '/path/bkw-9036/.dvc/cache/files/md5/.M6iwnJkjQgKzg54kN6chVi.tmp' 2023-12-27 19:47:24,377 DEBUG: Version info for developers: DVC version: 3.36.1 (pip) ------------------------- Platform: Python 3.9.18 on Linux-5.15.139-93.147.amzn2.x86_64-x86_64-with-glibc2.26 Subprojects: dvc_data = 3.3.0 dvc_objects = 3.0.0 dvc_render = 1.0.0 dvc_task = 0.3.0 scmrepo = 2.0.2 Supports: gs (gcsfs = 2023.12.2.post1), http (aiohttp = 3.9.1, aiohttp-retry = 2.8.3), https (aiohttp = 3.9.1, aiohttp-retry = 2.8.3) Config: Global: /home/jdt/.config/dvc System: /etc/xdg/dvc Cache types: hardlink, symlink Cache directory: ext4 on /dev/nvme1n1p1 Caches: local Remotes: gs Workspace directory: ext4 on /dev/nvme1n1p1 Repo: dvc, git Repo.site_cache_dir: /var/tmp/dvc/repo/9d9135fb99d9d827364c4dc5a42cdc60 Having any troubles? Hit us up at https://dvc.org/support, we are always happy to help! 2023-12-27 19:47:24,379 DEBUG: Analytics is enabled. 2023-12-27 19:47:24,445 DEBUG: Trying to spawn ['daemon', 'analytics', '/tmp/tmpk_30nnlt', '-v'] 2023-12-27 19:47:24,455 DEBUG: Spawned ['daemon', 'analytics', '/tmp/tmpk_30nnlt', '-v'] with pid 15755 ``` ### Expected Successful push ### Environment information <!-- This is required to ensure that we can reproduce the bug. --> ``` DVC version: 3.36.1 (pip) ------------------------- Platform: Python 3.9.18 on Linux-5.15.139-93.147.amzn2.x86_64-x86_64-with-glibc2.26 Subprojects: dvc_data = 3.3.0 dvc_objects = 3.0.0 dvc_render = 1.0.0 dvc_task = 0.3.0 scmrepo = 2.0.2 Supports: gs (gcsfs = 2023.12.2.post1), http (aiohttp = 3.9.1, aiohttp-retry = 2.8.3), https (aiohttp = 3.9.1, aiohttp-retry = 2.8.3), s3 (s3fs = 2023.12.2, boto3 = 1.33.13) Config: Global: /home/jdt/.config/dvc System: /etc/xdg/dvc Cache types: hardlink, symlink Cache directory: ext4 on /dev/nvme1n1p1 Caches: local Remotes: s3 Workspace directory: ext4 on /dev/nvme1n1p1 Repo: dvc, git Repo.site_cache_dir: /var/tmp/dvc/repo/c9c73dbc105eb09a15137f49a60e6a5b ``` **Additional Information (if any):**
[ { "content": "import logging\nimport time\nfrom collections import defaultdict\nfrom functools import partial\nfrom itertools import chain\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n Iterable,\n Iterator,\n List,\n NamedTuple,\n Optional,\n Set,\n Tuple,\n Union,\n)\n\nfrom funcy.debug import format_time\n\nfrom dvc.dependency import ParamsDependency\nfrom dvc.fs import LocalFileSystem\nfrom dvc.fs.callbacks import DEFAULT_CALLBACK\nfrom dvc.log import logger\nfrom dvc.utils.objects import cached_property\n\nif TYPE_CHECKING:\n from networkx import DiGraph\n from pygtrie import Trie\n\n from dvc.dependency import Dependency\n from dvc.fs.callbacks import Callback\n from dvc.output import Output\n from dvc.repo import Repo\n from dvc.repo.stage import StageInfo\n from dvc.stage import Stage\n from dvc.types import TargetType\n from dvc_data.hashfile.db import HashFileDB\n from dvc_data.hashfile.hash_info import HashInfo\n from dvc_data.index import DataIndex, DataIndexKey, DataIndexView\n from dvc_objects.fs.base import FileSystem\n\n\nlogger = logger.getChild(__name__)\nObjectContainer = Dict[Optional[\"HashFileDB\"], Set[\"HashInfo\"]]\n\n\ndef log_walk(seq):\n for root, dirs, files in seq:\n start = time.perf_counter()\n yield root, dirs, files\n duration = format_time(time.perf_counter() - start)\n logger.trace(\"%s in collecting stages from %s\", duration, root)\n\n\ndef collect_files(\n repo: \"Repo\", onerror: Optional[Callable[[str, Exception], None]] = None\n):\n \"\"\"Collects all of the stages present in the DVC repo.\n\n Args:\n onerror (optional): callable that will be called with two args:\n the filepath whose collection failed and the exc instance.\n It can report the error to continue with the collection\n (and, skip failed ones), or raise the exception to abort\n the collection.\n \"\"\"\n from dvc.dvcfile import is_valid_filename\n from dvc.exceptions import DvcException\n from dvc.utils import relpath\n\n scm = repo.scm\n fs = repo.fs\n sep = fs.sep\n outs: Set[str] = set()\n\n is_local_fs = isinstance(fs, LocalFileSystem)\n\n def is_ignored(path):\n # apply only for the local fs\n return is_local_fs and scm.is_ignored(path)\n\n def is_dvcfile_and_not_ignored(root, file):\n return is_valid_filename(file) and not is_ignored(f\"{root}{sep}{file}\")\n\n def is_out_or_ignored(root, directory):\n dir_path = f\"{root}{sep}{directory}\"\n # trailing slash needed to check if a directory is gitignored\n return dir_path in outs or is_ignored(f\"{dir_path}{sep}\")\n\n walk_iter = repo.dvcignore.walk(fs, repo.root_dir, followlinks=False)\n if logger.isEnabledFor(logging.TRACE): # type: ignore[attr-defined]\n walk_iter = log_walk(walk_iter)\n\n for root, dirs, files in walk_iter:\n dvcfile_filter = partial(is_dvcfile_and_not_ignored, root)\n for file in filter(dvcfile_filter, files):\n file_path = fs.join(root, file)\n try:\n index = Index.from_file(repo, file_path)\n except DvcException as exc:\n if onerror:\n onerror(relpath(file_path), exc)\n continue\n raise\n\n outs.update(\n out.fspath\n for stage in index.stages\n for out in stage.outs\n if out.protocol == \"local\"\n )\n yield file_path, index\n dirs[:] = [d for d in dirs if not is_out_or_ignored(root, d)]\n\n\ndef _load_data_from_outs(index, prefix, outs):\n from dvc_data.index import DataIndexEntry, Meta\n\n parents = set()\n for out in outs:\n if not out.use_cache:\n continue\n\n ws, key = out.index_key\n\n for key_len in range(1, len(key)):\n parents.add((ws, key[:key_len]))\n\n loaded = None\n if out.files:\n loaded = True\n for okey, ometa, ohi in out.get_obj():\n for key_len in range(1, len(okey)):\n parents.add((ws, (*key, *okey[:key_len])))\n\n fkey = (*key, *okey)\n index[(*prefix, ws, *fkey)] = DataIndexEntry(\n key=fkey,\n meta=ometa,\n hash_info=ohi,\n )\n\n entry = DataIndexEntry(\n key=key,\n meta=out.meta,\n hash_info=out.hash_info,\n loaded=loaded,\n )\n\n if (\n out.stage.is_import\n and not out.stage.is_repo_import\n and not out.stage.is_db_import\n ):\n dep = out.stage.deps[0]\n entry.meta = dep.meta\n if out.hash_info:\n entry.hash_info = out.hash_info\n else:\n # partial import\n entry.hash_info = dep.hash_info\n\n # FIXME PyGTrie-based DataIndex doesn't remove entry.key during\n # index.add, so we have to set the entry manually here to make\n # index.view() work correctly.\n index[(*prefix, ws, *key)] = entry\n\n for ws, key in parents:\n index[(*prefix, ws, *key)] = DataIndexEntry(\n key=key, meta=Meta(isdir=True), loaded=True\n )\n\n\ndef _load_storage_from_out(storage_map, key, out):\n from dvc.cachemgr import LEGACY_HASH_NAMES\n from dvc.config import NoRemoteError\n from dvc_data.index import FileStorage, ObjectStorage\n\n if out.odb:\n storage_map.add_data(ObjectStorage(key, out.odb))\n storage_map.add_cache(ObjectStorage(key, out.cache))\n try:\n remote = out.repo.cloud.get_remote(out.remote)\n if remote.fs.version_aware:\n storage_map.add_remote(\n FileStorage(\n key=key,\n fs=remote.fs,\n path=remote.path,\n index=remote.index,\n prefix=(),\n )\n )\n else:\n odb = (\n remote.legacy_odb if out.hash_name in LEGACY_HASH_NAMES else remote.odb\n )\n storage_map.add_remote(ObjectStorage(key, odb, index=remote.index))\n except NoRemoteError:\n pass\n\n if out.stage.is_db_import:\n return\n\n if out.stage.is_import:\n dep = out.stage.deps[0]\n if not out.hash_info:\n from fsspec.utils import tokenize\n\n # partial import\n fs_cache = out.repo.cache.fs_cache\n storage_map.add_cache(\n FileStorage(\n key,\n fs_cache.fs,\n fs_cache.fs.join(\n fs_cache.path, dep.fs.protocol, tokenize(dep.fs_path)\n ),\n )\n )\n storage_map.add_remote(FileStorage(key, dep.fs, dep.fs_path))\n\n\nclass Index:\n def __init__(\n self,\n repo: \"Repo\",\n stages: Optional[List[\"Stage\"]] = None,\n metrics: Optional[Dict[str, List[str]]] = None,\n plots: Optional[Dict[str, List[str]]] = None,\n params: Optional[Dict[str, Any]] = None,\n artifacts: Optional[Dict[str, Any]] = None,\n ) -> None:\n self.repo = repo\n self.stages = stages or []\n self._metrics = metrics or {}\n self._plots = plots or {}\n self._params = params or {}\n self._artifacts = artifacts or {}\n self._collected_targets: Dict[int, List[\"StageInfo\"]] = {}\n\n @cached_property\n def rev(self) -> Optional[str]:\n if not isinstance(self.repo.fs, LocalFileSystem):\n return self.repo.get_rev()[:7]\n return None\n\n def __repr__(self) -> str:\n rev = self.rev or \"workspace\"\n return f\"Index({self.repo}, fs@{rev})\"\n\n @classmethod\n def from_repo(\n cls,\n repo: \"Repo\",\n onerror: Optional[Callable[[str, Exception], None]] = None,\n ) -> \"Index\":\n stages = []\n metrics = {}\n plots = {}\n params = {}\n artifacts = {}\n\n onerror = onerror or repo.stage_collection_error_handler\n for _, idx in collect_files(repo, onerror=onerror):\n stages.extend(idx.stages)\n metrics.update(idx._metrics)\n plots.update(idx._plots)\n params.update(idx._params)\n artifacts.update(idx._artifacts)\n return cls(\n repo,\n stages=stages,\n metrics=metrics,\n plots=plots,\n params=params,\n artifacts=artifacts,\n )\n\n @classmethod\n def from_file(cls, repo: \"Repo\", path: str) -> \"Index\":\n from dvc.dvcfile import load_file\n\n dvcfile = load_file(repo, path)\n return cls(\n repo,\n stages=list(dvcfile.stages.values()),\n metrics={path: dvcfile.metrics} if dvcfile.metrics else {},\n plots={path: dvcfile.plots} if dvcfile.plots else {},\n params={path: dvcfile.params} if dvcfile.params else {},\n artifacts={path: dvcfile.artifacts} if dvcfile.artifacts else {},\n )\n\n def update(self, stages: Iterable[\"Stage\"]) -> \"Index\":\n stages = set(stages)\n # we remove existing stages with same hashes at first\n # and then re-add the new ones later.\n stages_set = (set(self.stages) - stages) | stages\n return self.__class__(\n self.repo,\n stages=list(stages_set),\n metrics=self._metrics,\n plots=self._plots,\n params=self._params,\n artifacts=self._artifacts,\n )\n\n @cached_property\n def outs_trie(self) -> \"Trie\":\n from dvc.repo.trie import build_outs_trie\n\n return build_outs_trie(self.stages)\n\n @cached_property\n def outs_graph(self) -> \"DiGraph\":\n from dvc.repo.graph import build_outs_graph\n\n return build_outs_graph(self.graph, self.outs_trie)\n\n @cached_property\n def graph(self) -> \"DiGraph\":\n from dvc.repo.graph import build_graph\n\n return build_graph(self.stages, self.outs_trie)\n\n def check_graph(self) -> None:\n if not getattr(self.repo, \"_skip_graph_checks\", False):\n self.graph # noqa: B018\n\n @property\n def params(self) -> Iterator[\"ParamsDependency\"]:\n from dvc.dependency import ParamsDependency\n\n for dep in self.deps:\n if isinstance(dep, ParamsDependency):\n yield dep\n\n @property\n def outs(self) -> Iterator[\"Output\"]:\n for stage in self.stages:\n yield from stage.outs\n\n @cached_property\n def out_data_keys(self) -> Dict[str, Set[\"DataIndexKey\"]]:\n by_workspace: Dict[str, Set[\"DataIndexKey\"]] = defaultdict(set)\n\n by_workspace[\"repo\"] = set()\n by_workspace[\"local\"] = set()\n\n for out in self.outs:\n if not out.use_cache:\n continue\n\n ws, key = out.index_key\n by_workspace[ws].add(key)\n\n return dict(by_workspace)\n\n @property\n def decorated_outs(self) -> Iterator[\"Output\"]:\n for output in self.outs:\n if output.is_decorated:\n yield output\n\n @property\n def metrics(self) -> Iterator[\"Output\"]:\n for output in self.outs:\n if output.is_metric:\n yield output\n\n @property\n def plots(self) -> Iterator[\"Output\"]:\n for output in self.outs:\n if output.is_plot:\n yield output\n\n @property\n def deps(self) -> Iterator[\"Dependency\"]:\n for stage in self.stages:\n yield from stage.deps\n\n @cached_property\n def _plot_sources(self) -> List[str]:\n from dvc.repo.plots import _collect_pipeline_files\n\n sources: List[str] = []\n for data in _collect_pipeline_files(self.repo, [], {}).values():\n for plot_id, props in data.get(\"data\", {}).items():\n if isinstance(props.get(\"y\"), dict):\n sources.extend(props[\"y\"])\n if isinstance(props.get(\"x\"), dict):\n sources.extend(props[\"x\"])\n else:\n sources.append(plot_id)\n return sources\n\n @cached_property\n def data_keys(self) -> Dict[str, Set[\"DataIndexKey\"]]:\n by_workspace: Dict[str, Set[\"DataIndexKey\"]] = defaultdict(set)\n\n by_workspace[\"repo\"] = set()\n by_workspace[\"local\"] = set()\n\n for out in self.outs:\n if not out.use_cache:\n continue\n\n workspace, key = out.index_key\n by_workspace[workspace].add(key)\n\n return dict(by_workspace)\n\n @cached_property\n def metric_keys(self) -> Dict[str, Set[\"DataIndexKey\"]]:\n from .metrics.show import _collect_top_level_metrics\n\n by_workspace: Dict[str, Set[\"DataIndexKey\"]] = defaultdict(set)\n\n by_workspace[\"repo\"] = set()\n\n for out in self.outs:\n if not out.metric:\n continue\n\n workspace, key = out.index_key\n by_workspace[workspace].add(key)\n\n for path in _collect_top_level_metrics(self.repo):\n key = self.repo.fs.relparts(path, self.repo.root_dir)\n by_workspace[\"repo\"].add(key)\n\n return dict(by_workspace)\n\n @cached_property\n def param_keys(self) -> Dict[str, Set[\"DataIndexKey\"]]:\n from .params.show import _collect_top_level_params\n\n by_workspace: Dict[str, Set[\"DataIndexKey\"]] = defaultdict(set)\n by_workspace[\"repo\"] = set()\n\n param_paths = _collect_top_level_params(self.repo)\n default_file: str = ParamsDependency.DEFAULT_PARAMS_FILE\n if self.repo.fs.exists(f\"{self.repo.fs.root_marker}{default_file}\"):\n param_paths = chain(param_paths, [default_file])\n\n for path in param_paths:\n key = self.repo.fs.relparts(path, self.repo.root_dir)\n by_workspace[\"repo\"].add(key)\n\n return dict(by_workspace)\n\n @cached_property\n def plot_keys(self) -> Dict[str, Set[\"DataIndexKey\"]]:\n by_workspace: Dict[str, Set[\"DataIndexKey\"]] = defaultdict(set)\n\n by_workspace[\"repo\"] = set()\n\n for out in self.outs:\n if not out.plot:\n continue\n\n workspace, key = out.index_key\n by_workspace[workspace].add(key)\n\n for path in self._plot_sources:\n key = self.repo.fs.parts(path)\n by_workspace[\"repo\"].add(key)\n\n return dict(by_workspace)\n\n @cached_property\n def data_tree(self):\n from dvc_data.hashfile.tree import Tree\n\n tree = Tree()\n for out in self.outs:\n if not out.use_cache:\n continue\n\n ws, key = out.index_key\n\n tree.add((ws, *key), out.meta, out.hash_info)\n\n tree.digest()\n\n return tree\n\n @cached_property\n def data(self) -> \"Dict[str, DataIndex]\":\n prefix: \"DataIndexKey\"\n loaded = False\n\n index = self.repo.data_index\n prefix = (\"tree\", self.data_tree.hash_info.value)\n if index.has_node(prefix):\n loaded = True\n\n if not loaded:\n _load_data_from_outs(index, prefix, self.outs)\n index.commit()\n\n by_workspace = {}\n by_workspace[\"repo\"] = index.view((*prefix, \"repo\"))\n by_workspace[\"local\"] = index.view((*prefix, \"local\"))\n\n for out in self.outs:\n if not out.use_cache:\n continue\n\n if not out.is_in_repo:\n continue\n\n ws, key = out.index_key\n if ws not in by_workspace:\n by_workspace[ws] = index.view((*prefix, ws))\n\n data_index = by_workspace[ws]\n _load_storage_from_out(data_index.storage_map, key, out)\n\n return by_workspace\n\n @staticmethod\n def _hash_targets(\n targets: Iterable[Optional[str]],\n **kwargs: Any,\n ) -> int:\n return hash(\n (\n frozenset(targets),\n kwargs.get(\"with_deps\", False),\n kwargs.get(\"recursive\", False),\n )\n )\n\n def collect_targets(\n self, targets: Optional[\"TargetType\"], *, onerror=None, **kwargs: Any\n ) -> List[\"StageInfo\"]:\n from dvc.exceptions import DvcException\n from dvc.repo.stage import StageInfo\n from dvc.utils.collections import ensure_list\n\n if not onerror:\n\n def onerror(_target, _exc):\n raise\n\n targets = ensure_list(targets)\n if not targets:\n return [StageInfo(stage) for stage in self.stages]\n targets_hash = self._hash_targets(targets, **kwargs)\n if targets_hash not in self._collected_targets:\n collected = []\n for target in targets:\n try:\n collected.extend(self.repo.stage.collect_granular(target, **kwargs))\n except DvcException as exc:\n onerror(target, exc)\n self._collected_targets[targets_hash] = collected\n\n return self._collected_targets[targets_hash]\n\n def used_objs(\n self,\n targets: Optional[\"TargetType\"] = None,\n with_deps: bool = False,\n remote: Optional[str] = None,\n force: bool = False,\n recursive: bool = False,\n jobs: Optional[int] = None,\n push: bool = False,\n ) -> \"ObjectContainer\":\n used: \"ObjectContainer\" = defaultdict(set)\n pairs = self.collect_targets(targets, recursive=recursive, with_deps=with_deps)\n for stage, filter_info in pairs:\n for odb, objs in stage.get_used_objs(\n remote=remote,\n force=force,\n jobs=jobs,\n filter_info=filter_info,\n push=push,\n ).items():\n used[odb].update(objs)\n return used\n\n def _types_filter(self, types, out):\n ws, okey = out.index_key\n for typ in types:\n if typ == \"plots\":\n keys = self.plot_keys\n elif typ == \"metrics\":\n keys = self.metric_keys\n elif typ == \"params\":\n keys = self.param_keys\n else:\n raise ValueError(f\"unsupported type {typ}\")\n\n for key in keys.get(ws, []):\n if (len(key) >= len(okey) and key[: len(okey)] == okey) or (\n len(key) < len(okey) and okey[: len(key)] == key\n ):\n return True\n\n return False\n\n def targets_view(\n self,\n targets: Optional[\"TargetType\"],\n stage_filter: Optional[Callable[[\"Stage\"], bool]] = None,\n outs_filter: Optional[Callable[[\"Output\"], bool]] = None,\n max_size: Optional[int] = None,\n types: Optional[List[str]] = None,\n **kwargs: Any,\n ) -> \"IndexView\":\n \"\"\"Return read-only view of index for the specified targets.\n Args:\n targets: Targets to collect\n stage_filter: Optional stage filter to be applied after collecting\n targets.\n outs_filter: Optional output filter to be applied after collecting\n targets.\n Additional kwargs will be passed into the stage collector.\n Note:\n If both stage_filter and outs_filter are provided, stage_filter\n will be applied first, and the resulting view will only contain\n outputs from stages that matched stage_filter. Outputs from stages\n that did not match will be excluded from the view (whether or not\n the output would have matched outs_filter).\n \"\"\"\n stage_infos = [\n stage_info\n for stage_info in self.collect_targets(targets, **kwargs)\n if not stage_filter or stage_filter(stage_info.stage)\n ]\n\n def _outs_filter(out):\n if max_size and out.meta and out.meta.size and out.meta.size >= max_size:\n return False\n\n if types and not self._types_filter(types, out):\n return False\n\n if outs_filter:\n return outs_filter(out)\n\n return True\n\n return IndexView(self, stage_infos, outs_filter=_outs_filter)\n\n\nclass _DataPrefixes(NamedTuple):\n explicit: Set[\"DataIndexKey\"]\n recursive: Set[\"DataIndexKey\"]\n\n\nclass IndexView:\n \"\"\"Read-only view of Index.data using filtered stages.\"\"\"\n\n def __init__(\n self,\n index: Index,\n stage_infos: Iterable[\"StageInfo\"],\n outs_filter: Optional[Callable[[\"Output\"], bool]],\n ):\n self._index = index\n self._stage_infos = stage_infos\n # NOTE: stage_infos might have the same stage multiple times but with\n # different filter_info\n self.stages = list({stage for stage, _ in stage_infos})\n self._outs_filter = outs_filter\n\n @property\n def repo(self) -> \"Repo\":\n return self._index.repo\n\n @property\n def deps(self) -> Iterator[\"Dependency\"]:\n for stage in self.stages:\n yield from stage.deps\n\n @property\n def _filtered_outs(self) -> Iterator[Tuple[\"Output\", Optional[str]]]:\n for stage, filter_info in self._stage_infos:\n for out in stage.filter_outs(filter_info):\n if not self._outs_filter or self._outs_filter(out):\n yield out, filter_info\n\n @property\n def outs(self) -> Iterator[\"Output\"]:\n yield from {out for (out, _) in self._filtered_outs}\n\n @cached_property\n def out_data_keys(self) -> Dict[str, Set[\"DataIndexKey\"]]:\n by_workspace: Dict[str, Set[\"DataIndexKey\"]] = defaultdict(set)\n\n by_workspace[\"repo\"] = set()\n by_workspace[\"local\"] = set()\n\n for out in self.outs:\n if not out.use_cache:\n continue\n\n ws, key = out.index_key\n by_workspace[ws].add(key)\n\n return dict(by_workspace)\n\n @cached_property\n def _data_prefixes(self) -> Dict[str, \"_DataPrefixes\"]:\n prefixes: Dict[str, \"_DataPrefixes\"] = defaultdict(\n lambda: _DataPrefixes(set(), set())\n )\n for out, filter_info in self._filtered_outs:\n if not out.use_cache:\n continue\n workspace, key = out.index_key\n if filter_info and out.fs.isin(filter_info, out.fs_path):\n key = key + out.fs.relparts(filter_info, out.fs_path)\n entry = self._index.data[workspace].get(key)\n if entry and entry.meta and entry.meta.isdir:\n prefixes[workspace].recursive.add(key)\n prefixes[workspace].explicit.update(key[:i] for i in range(len(key), 0, -1))\n return prefixes\n\n @cached_property\n def data_keys(self) -> Dict[str, Set[\"DataIndexKey\"]]:\n ret: Dict[str, Set[\"DataIndexKey\"]] = defaultdict(set)\n\n for out, filter_info in self._filtered_outs:\n if not out.use_cache:\n continue\n\n workspace, key = out.index_key\n if filter_info and out.fs.isin(filter_info, out.fs_path):\n key = key + out.fs.relparts(filter_info, out.fs_path)\n ret[workspace].add(key)\n\n return dict(ret)\n\n @cached_property\n def data_tree(self):\n from dvc_data.hashfile.tree import Tree\n\n tree = Tree()\n for out in self.outs:\n if not out.use_cache:\n continue\n\n ws, key = out.index_key\n\n tree.add((ws, *key), out.meta, out.hash_info)\n\n tree.digest()\n\n return tree\n\n @cached_property\n def data(self) -> Dict[str, Union[\"DataIndex\", \"DataIndexView\"]]:\n from dvc_data.index import DataIndex, view\n\n def key_filter(workspace: str, key: \"DataIndexKey\"):\n try:\n prefixes = self._data_prefixes[workspace]\n return key in prefixes.explicit or any(\n key[: len(prefix)] == prefix for prefix in prefixes.recursive\n )\n except KeyError:\n return False\n\n data: Dict[str, Union[\"DataIndex\", \"DataIndexView\"]] = {}\n for workspace, data_index in self._index.data.items():\n if self.stages:\n data[workspace] = view(data_index, partial(key_filter, workspace))\n else:\n data[workspace] = DataIndex()\n return data\n\n\ndef build_data_index( # noqa: C901, PLR0912\n index: Union[\"Index\", \"IndexView\"],\n path: str,\n fs: \"FileSystem\",\n workspace: str = \"repo\",\n compute_hash: Optional[bool] = False,\n callback: \"Callback\" = DEFAULT_CALLBACK,\n) -> \"DataIndex\":\n from dvc_data.index import DataIndex, DataIndexEntry, Meta\n from dvc_data.index.build import build_entries, build_entry\n from dvc_data.index.save import build_tree\n\n ignore = None\n if workspace == \"repo\" and isinstance(fs, LocalFileSystem):\n ignore = index.repo.dvcignore\n\n data = DataIndex()\n parents = set()\n for key in index.data_keys.get(workspace, set()):\n out_path = fs.join(path, *key)\n\n for key_len in range(1, len(key)):\n parents.add(key[:key_len])\n\n if not fs.exists(out_path):\n continue\n\n hash_name = _get_entry_hash_name(index, workspace, key)\n try:\n out_entry = build_entry(\n out_path,\n fs,\n compute_hash=compute_hash,\n state=index.repo.state,\n hash_name=hash_name,\n )\n except FileNotFoundError:\n out_entry = DataIndexEntry()\n\n out_entry.key = key\n data.add(out_entry)\n callback.relative_update(1)\n\n if not out_entry.meta or not out_entry.meta.isdir:\n continue\n\n for entry in build_entries(\n out_path,\n fs,\n compute_hash=compute_hash,\n state=index.repo.state,\n ignore=ignore,\n hash_name=hash_name,\n ):\n if not entry.key or entry.key == (\"\",):\n # NOTE: whether the root will be returned by build_entries\n # depends on the filesystem (e.g. local doesn't, but s3 does).\n continue\n\n entry.key = key + entry.key\n data.add(entry)\n callback.relative_update(1)\n\n for key in parents:\n parent_path = fs.join(path, *key)\n if not fs.exists(parent_path):\n continue\n direntry = DataIndexEntry(key=key, meta=Meta(isdir=True), loaded=True)\n data.add(direntry)\n callback.relative_update(1)\n\n if compute_hash:\n out_keys = index.out_data_keys.get(workspace, set())\n data_keys = index.data_keys.get(workspace, set())\n for key in data_keys.intersection(out_keys):\n hash_name = _get_entry_hash_name(index, workspace, key)\n\n out_entry = data.get(key)\n if not out_entry or not out_entry.isdir:\n continue\n\n tree_meta, tree = build_tree(data, key, name=hash_name)\n out_entry.meta = tree_meta\n out_entry.hash_info = tree.hash_info\n out_entry.loaded = True\n data.add(out_entry)\n callback.relative_update(1)\n\n return data\n\n\ndef _get_entry_hash_name(\n index: Union[\"Index\", \"IndexView\"], workspace: str, key: \"DataIndexKey\"\n) -> str:\n from dvc_data.hashfile.hash import DEFAULT_ALGORITHM\n\n for idx in reversed(range(len(key) + 1)):\n prefix = key[:idx]\n try:\n src_entry = index.data[workspace][prefix]\n except KeyError:\n continue\n\n if src_entry.hash_info and src_entry.hash_info.name:\n return src_entry.hash_info.name\n\n return DEFAULT_ALGORITHM\n", "path": "dvc/repo/index.py" } ]
[ { "content": "import logging\nimport time\nfrom collections import defaultdict\nfrom functools import partial\nfrom itertools import chain\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n Iterable,\n Iterator,\n List,\n NamedTuple,\n Optional,\n Set,\n Tuple,\n Union,\n)\n\nfrom funcy.debug import format_time\n\nfrom dvc.dependency import ParamsDependency\nfrom dvc.fs import LocalFileSystem\nfrom dvc.fs.callbacks import DEFAULT_CALLBACK\nfrom dvc.log import logger\nfrom dvc.utils.objects import cached_property\n\nif TYPE_CHECKING:\n from networkx import DiGraph\n from pygtrie import Trie\n\n from dvc.dependency import Dependency\n from dvc.fs.callbacks import Callback\n from dvc.output import Output\n from dvc.repo import Repo\n from dvc.repo.stage import StageInfo\n from dvc.stage import Stage\n from dvc.types import TargetType\n from dvc_data.hashfile.db import HashFileDB\n from dvc_data.hashfile.hash_info import HashInfo\n from dvc_data.index import DataIndex, DataIndexKey, DataIndexView\n from dvc_objects.fs.base import FileSystem\n\n\nlogger = logger.getChild(__name__)\nObjectContainer = Dict[Optional[\"HashFileDB\"], Set[\"HashInfo\"]]\n\n\ndef log_walk(seq):\n for root, dirs, files in seq:\n start = time.perf_counter()\n yield root, dirs, files\n duration = format_time(time.perf_counter() - start)\n logger.trace(\"%s in collecting stages from %s\", duration, root)\n\n\ndef collect_files(\n repo: \"Repo\", onerror: Optional[Callable[[str, Exception], None]] = None\n):\n \"\"\"Collects all of the stages present in the DVC repo.\n\n Args:\n onerror (optional): callable that will be called with two args:\n the filepath whose collection failed and the exc instance.\n It can report the error to continue with the collection\n (and, skip failed ones), or raise the exception to abort\n the collection.\n \"\"\"\n from dvc.dvcfile import is_valid_filename\n from dvc.exceptions import DvcException\n from dvc.utils import relpath\n\n scm = repo.scm\n fs = repo.fs\n sep = fs.sep\n outs: Set[str] = set()\n\n is_local_fs = isinstance(fs, LocalFileSystem)\n\n def is_ignored(path):\n # apply only for the local fs\n return is_local_fs and scm.is_ignored(path)\n\n def is_dvcfile_and_not_ignored(root, file):\n return is_valid_filename(file) and not is_ignored(f\"{root}{sep}{file}\")\n\n def is_out_or_ignored(root, directory):\n dir_path = f\"{root}{sep}{directory}\"\n # trailing slash needed to check if a directory is gitignored\n return dir_path in outs or is_ignored(f\"{dir_path}{sep}\")\n\n walk_iter = repo.dvcignore.walk(fs, repo.root_dir, followlinks=False)\n if logger.isEnabledFor(logging.TRACE): # type: ignore[attr-defined]\n walk_iter = log_walk(walk_iter)\n\n for root, dirs, files in walk_iter:\n dvcfile_filter = partial(is_dvcfile_and_not_ignored, root)\n for file in filter(dvcfile_filter, files):\n file_path = fs.join(root, file)\n try:\n index = Index.from_file(repo, file_path)\n except DvcException as exc:\n if onerror:\n onerror(relpath(file_path), exc)\n continue\n raise\n\n outs.update(\n out.fspath\n for stage in index.stages\n for out in stage.outs\n if out.protocol == \"local\"\n )\n yield file_path, index\n dirs[:] = [d for d in dirs if not is_out_or_ignored(root, d)]\n\n\ndef _load_data_from_outs(index, prefix, outs):\n from dvc_data.index import DataIndexEntry, Meta\n\n parents = set()\n for out in outs:\n if not out.use_cache:\n continue\n\n ws, key = out.index_key\n\n for key_len in range(1, len(key)):\n parents.add((ws, key[:key_len]))\n\n loaded = None\n if out.files:\n loaded = True\n for okey, ometa, ohi in out.get_obj():\n for key_len in range(1, len(okey)):\n parents.add((ws, (*key, *okey[:key_len])))\n\n fkey = (*key, *okey)\n index[(*prefix, ws, *fkey)] = DataIndexEntry(\n key=fkey,\n meta=ometa,\n hash_info=ohi,\n )\n\n entry = DataIndexEntry(\n key=key,\n meta=out.meta,\n hash_info=out.hash_info,\n loaded=loaded,\n )\n\n if (\n out.stage.is_import\n and not out.stage.is_repo_import\n and not out.stage.is_db_import\n ):\n dep = out.stage.deps[0]\n entry.meta = dep.meta\n if out.hash_info:\n entry.hash_info = out.hash_info\n else:\n # partial import\n entry.hash_info = dep.hash_info\n\n # FIXME PyGTrie-based DataIndex doesn't remove entry.key during\n # index.add, so we have to set the entry manually here to make\n # index.view() work correctly.\n index[(*prefix, ws, *key)] = entry\n\n for ws, key in parents:\n index[(*prefix, ws, *key)] = DataIndexEntry(\n key=key, meta=Meta(isdir=True), loaded=True\n )\n\n\ndef _load_storage_from_out(storage_map, key, out):\n from dvc.cachemgr import LEGACY_HASH_NAMES\n from dvc.config import NoRemoteError\n from dvc_data.index import FileStorage, ObjectStorage\n\n if out.odb:\n storage_map.add_data(ObjectStorage(key, out.odb))\n storage_map.add_cache(ObjectStorage(key, out.cache))\n try:\n remote = out.repo.cloud.get_remote(out.remote)\n if remote.fs.version_aware:\n storage_map.add_remote(\n FileStorage(\n key=key,\n fs=remote.fs,\n path=remote.path,\n index=remote.index,\n prefix=(),\n )\n )\n else:\n odb = (\n remote.legacy_odb if out.hash_name in LEGACY_HASH_NAMES else remote.odb\n )\n storage_map.add_remote(ObjectStorage(key, odb, index=remote.index))\n except NoRemoteError:\n pass\n\n if out.stage.is_db_import:\n return\n\n if out.stage.is_import:\n dep = out.stage.deps[0]\n if not out.hash_info:\n from fsspec.utils import tokenize\n\n # partial import\n fs_cache = out.repo.cache.fs_cache\n storage_map.add_cache(\n FileStorage(\n key,\n fs_cache.fs,\n fs_cache.fs.join(\n fs_cache.path, dep.fs.protocol, tokenize(dep.fs_path)\n ),\n )\n )\n storage_map.add_remote(FileStorage(key, dep.fs, dep.fs_path, read_only=True))\n\n\nclass Index:\n def __init__(\n self,\n repo: \"Repo\",\n stages: Optional[List[\"Stage\"]] = None,\n metrics: Optional[Dict[str, List[str]]] = None,\n plots: Optional[Dict[str, List[str]]] = None,\n params: Optional[Dict[str, Any]] = None,\n artifacts: Optional[Dict[str, Any]] = None,\n ) -> None:\n self.repo = repo\n self.stages = stages or []\n self._metrics = metrics or {}\n self._plots = plots or {}\n self._params = params or {}\n self._artifacts = artifacts or {}\n self._collected_targets: Dict[int, List[\"StageInfo\"]] = {}\n\n @cached_property\n def rev(self) -> Optional[str]:\n if not isinstance(self.repo.fs, LocalFileSystem):\n return self.repo.get_rev()[:7]\n return None\n\n def __repr__(self) -> str:\n rev = self.rev or \"workspace\"\n return f\"Index({self.repo}, fs@{rev})\"\n\n @classmethod\n def from_repo(\n cls,\n repo: \"Repo\",\n onerror: Optional[Callable[[str, Exception], None]] = None,\n ) -> \"Index\":\n stages = []\n metrics = {}\n plots = {}\n params = {}\n artifacts = {}\n\n onerror = onerror or repo.stage_collection_error_handler\n for _, idx in collect_files(repo, onerror=onerror):\n stages.extend(idx.stages)\n metrics.update(idx._metrics)\n plots.update(idx._plots)\n params.update(idx._params)\n artifacts.update(idx._artifacts)\n return cls(\n repo,\n stages=stages,\n metrics=metrics,\n plots=plots,\n params=params,\n artifacts=artifacts,\n )\n\n @classmethod\n def from_file(cls, repo: \"Repo\", path: str) -> \"Index\":\n from dvc.dvcfile import load_file\n\n dvcfile = load_file(repo, path)\n return cls(\n repo,\n stages=list(dvcfile.stages.values()),\n metrics={path: dvcfile.metrics} if dvcfile.metrics else {},\n plots={path: dvcfile.plots} if dvcfile.plots else {},\n params={path: dvcfile.params} if dvcfile.params else {},\n artifacts={path: dvcfile.artifacts} if dvcfile.artifacts else {},\n )\n\n def update(self, stages: Iterable[\"Stage\"]) -> \"Index\":\n stages = set(stages)\n # we remove existing stages with same hashes at first\n # and then re-add the new ones later.\n stages_set = (set(self.stages) - stages) | stages\n return self.__class__(\n self.repo,\n stages=list(stages_set),\n metrics=self._metrics,\n plots=self._plots,\n params=self._params,\n artifacts=self._artifacts,\n )\n\n @cached_property\n def outs_trie(self) -> \"Trie\":\n from dvc.repo.trie import build_outs_trie\n\n return build_outs_trie(self.stages)\n\n @cached_property\n def outs_graph(self) -> \"DiGraph\":\n from dvc.repo.graph import build_outs_graph\n\n return build_outs_graph(self.graph, self.outs_trie)\n\n @cached_property\n def graph(self) -> \"DiGraph\":\n from dvc.repo.graph import build_graph\n\n return build_graph(self.stages, self.outs_trie)\n\n def check_graph(self) -> None:\n if not getattr(self.repo, \"_skip_graph_checks\", False):\n self.graph # noqa: B018\n\n @property\n def params(self) -> Iterator[\"ParamsDependency\"]:\n from dvc.dependency import ParamsDependency\n\n for dep in self.deps:\n if isinstance(dep, ParamsDependency):\n yield dep\n\n @property\n def outs(self) -> Iterator[\"Output\"]:\n for stage in self.stages:\n yield from stage.outs\n\n @cached_property\n def out_data_keys(self) -> Dict[str, Set[\"DataIndexKey\"]]:\n by_workspace: Dict[str, Set[\"DataIndexKey\"]] = defaultdict(set)\n\n by_workspace[\"repo\"] = set()\n by_workspace[\"local\"] = set()\n\n for out in self.outs:\n if not out.use_cache:\n continue\n\n ws, key = out.index_key\n by_workspace[ws].add(key)\n\n return dict(by_workspace)\n\n @property\n def decorated_outs(self) -> Iterator[\"Output\"]:\n for output in self.outs:\n if output.is_decorated:\n yield output\n\n @property\n def metrics(self) -> Iterator[\"Output\"]:\n for output in self.outs:\n if output.is_metric:\n yield output\n\n @property\n def plots(self) -> Iterator[\"Output\"]:\n for output in self.outs:\n if output.is_plot:\n yield output\n\n @property\n def deps(self) -> Iterator[\"Dependency\"]:\n for stage in self.stages:\n yield from stage.deps\n\n @cached_property\n def _plot_sources(self) -> List[str]:\n from dvc.repo.plots import _collect_pipeline_files\n\n sources: List[str] = []\n for data in _collect_pipeline_files(self.repo, [], {}).values():\n for plot_id, props in data.get(\"data\", {}).items():\n if isinstance(props.get(\"y\"), dict):\n sources.extend(props[\"y\"])\n if isinstance(props.get(\"x\"), dict):\n sources.extend(props[\"x\"])\n else:\n sources.append(plot_id)\n return sources\n\n @cached_property\n def data_keys(self) -> Dict[str, Set[\"DataIndexKey\"]]:\n by_workspace: Dict[str, Set[\"DataIndexKey\"]] = defaultdict(set)\n\n by_workspace[\"repo\"] = set()\n by_workspace[\"local\"] = set()\n\n for out in self.outs:\n if not out.use_cache:\n continue\n\n workspace, key = out.index_key\n by_workspace[workspace].add(key)\n\n return dict(by_workspace)\n\n @cached_property\n def metric_keys(self) -> Dict[str, Set[\"DataIndexKey\"]]:\n from .metrics.show import _collect_top_level_metrics\n\n by_workspace: Dict[str, Set[\"DataIndexKey\"]] = defaultdict(set)\n\n by_workspace[\"repo\"] = set()\n\n for out in self.outs:\n if not out.metric:\n continue\n\n workspace, key = out.index_key\n by_workspace[workspace].add(key)\n\n for path in _collect_top_level_metrics(self.repo):\n key = self.repo.fs.relparts(path, self.repo.root_dir)\n by_workspace[\"repo\"].add(key)\n\n return dict(by_workspace)\n\n @cached_property\n def param_keys(self) -> Dict[str, Set[\"DataIndexKey\"]]:\n from .params.show import _collect_top_level_params\n\n by_workspace: Dict[str, Set[\"DataIndexKey\"]] = defaultdict(set)\n by_workspace[\"repo\"] = set()\n\n param_paths = _collect_top_level_params(self.repo)\n default_file: str = ParamsDependency.DEFAULT_PARAMS_FILE\n if self.repo.fs.exists(f\"{self.repo.fs.root_marker}{default_file}\"):\n param_paths = chain(param_paths, [default_file])\n\n for path in param_paths:\n key = self.repo.fs.relparts(path, self.repo.root_dir)\n by_workspace[\"repo\"].add(key)\n\n return dict(by_workspace)\n\n @cached_property\n def plot_keys(self) -> Dict[str, Set[\"DataIndexKey\"]]:\n by_workspace: Dict[str, Set[\"DataIndexKey\"]] = defaultdict(set)\n\n by_workspace[\"repo\"] = set()\n\n for out in self.outs:\n if not out.plot:\n continue\n\n workspace, key = out.index_key\n by_workspace[workspace].add(key)\n\n for path in self._plot_sources:\n key = self.repo.fs.parts(path)\n by_workspace[\"repo\"].add(key)\n\n return dict(by_workspace)\n\n @cached_property\n def data_tree(self):\n from dvc_data.hashfile.tree import Tree\n\n tree = Tree()\n for out in self.outs:\n if not out.use_cache:\n continue\n\n ws, key = out.index_key\n\n tree.add((ws, *key), out.meta, out.hash_info)\n\n tree.digest()\n\n return tree\n\n @cached_property\n def data(self) -> \"Dict[str, DataIndex]\":\n prefix: \"DataIndexKey\"\n loaded = False\n\n index = self.repo.data_index\n prefix = (\"tree\", self.data_tree.hash_info.value)\n if index.has_node(prefix):\n loaded = True\n\n if not loaded:\n _load_data_from_outs(index, prefix, self.outs)\n index.commit()\n\n by_workspace = {}\n by_workspace[\"repo\"] = index.view((*prefix, \"repo\"))\n by_workspace[\"local\"] = index.view((*prefix, \"local\"))\n\n for out in self.outs:\n if not out.use_cache:\n continue\n\n if not out.is_in_repo:\n continue\n\n ws, key = out.index_key\n if ws not in by_workspace:\n by_workspace[ws] = index.view((*prefix, ws))\n\n data_index = by_workspace[ws]\n _load_storage_from_out(data_index.storage_map, key, out)\n\n return by_workspace\n\n @staticmethod\n def _hash_targets(\n targets: Iterable[Optional[str]],\n **kwargs: Any,\n ) -> int:\n return hash(\n (\n frozenset(targets),\n kwargs.get(\"with_deps\", False),\n kwargs.get(\"recursive\", False),\n )\n )\n\n def collect_targets(\n self, targets: Optional[\"TargetType\"], *, onerror=None, **kwargs: Any\n ) -> List[\"StageInfo\"]:\n from dvc.exceptions import DvcException\n from dvc.repo.stage import StageInfo\n from dvc.utils.collections import ensure_list\n\n if not onerror:\n\n def onerror(_target, _exc):\n raise\n\n targets = ensure_list(targets)\n if not targets:\n return [StageInfo(stage) for stage in self.stages]\n targets_hash = self._hash_targets(targets, **kwargs)\n if targets_hash not in self._collected_targets:\n collected = []\n for target in targets:\n try:\n collected.extend(self.repo.stage.collect_granular(target, **kwargs))\n except DvcException as exc:\n onerror(target, exc)\n self._collected_targets[targets_hash] = collected\n\n return self._collected_targets[targets_hash]\n\n def used_objs(\n self,\n targets: Optional[\"TargetType\"] = None,\n with_deps: bool = False,\n remote: Optional[str] = None,\n force: bool = False,\n recursive: bool = False,\n jobs: Optional[int] = None,\n push: bool = False,\n ) -> \"ObjectContainer\":\n used: \"ObjectContainer\" = defaultdict(set)\n pairs = self.collect_targets(targets, recursive=recursive, with_deps=with_deps)\n for stage, filter_info in pairs:\n for odb, objs in stage.get_used_objs(\n remote=remote,\n force=force,\n jobs=jobs,\n filter_info=filter_info,\n push=push,\n ).items():\n used[odb].update(objs)\n return used\n\n def _types_filter(self, types, out):\n ws, okey = out.index_key\n for typ in types:\n if typ == \"plots\":\n keys = self.plot_keys\n elif typ == \"metrics\":\n keys = self.metric_keys\n elif typ == \"params\":\n keys = self.param_keys\n else:\n raise ValueError(f\"unsupported type {typ}\")\n\n for key in keys.get(ws, []):\n if (len(key) >= len(okey) and key[: len(okey)] == okey) or (\n len(key) < len(okey) and okey[: len(key)] == key\n ):\n return True\n\n return False\n\n def targets_view(\n self,\n targets: Optional[\"TargetType\"],\n stage_filter: Optional[Callable[[\"Stage\"], bool]] = None,\n outs_filter: Optional[Callable[[\"Output\"], bool]] = None,\n max_size: Optional[int] = None,\n types: Optional[List[str]] = None,\n **kwargs: Any,\n ) -> \"IndexView\":\n \"\"\"Return read-only view of index for the specified targets.\n Args:\n targets: Targets to collect\n stage_filter: Optional stage filter to be applied after collecting\n targets.\n outs_filter: Optional output filter to be applied after collecting\n targets.\n Additional kwargs will be passed into the stage collector.\n Note:\n If both stage_filter and outs_filter are provided, stage_filter\n will be applied first, and the resulting view will only contain\n outputs from stages that matched stage_filter. Outputs from stages\n that did not match will be excluded from the view (whether or not\n the output would have matched outs_filter).\n \"\"\"\n stage_infos = [\n stage_info\n for stage_info in self.collect_targets(targets, **kwargs)\n if not stage_filter or stage_filter(stage_info.stage)\n ]\n\n def _outs_filter(out):\n if max_size and out.meta and out.meta.size and out.meta.size >= max_size:\n return False\n\n if types and not self._types_filter(types, out):\n return False\n\n if outs_filter:\n return outs_filter(out)\n\n return True\n\n return IndexView(self, stage_infos, outs_filter=_outs_filter)\n\n\nclass _DataPrefixes(NamedTuple):\n explicit: Set[\"DataIndexKey\"]\n recursive: Set[\"DataIndexKey\"]\n\n\nclass IndexView:\n \"\"\"Read-only view of Index.data using filtered stages.\"\"\"\n\n def __init__(\n self,\n index: Index,\n stage_infos: Iterable[\"StageInfo\"],\n outs_filter: Optional[Callable[[\"Output\"], bool]],\n ):\n self._index = index\n self._stage_infos = stage_infos\n # NOTE: stage_infos might have the same stage multiple times but with\n # different filter_info\n self.stages = list({stage for stage, _ in stage_infos})\n self._outs_filter = outs_filter\n\n @property\n def repo(self) -> \"Repo\":\n return self._index.repo\n\n @property\n def deps(self) -> Iterator[\"Dependency\"]:\n for stage in self.stages:\n yield from stage.deps\n\n @property\n def _filtered_outs(self) -> Iterator[Tuple[\"Output\", Optional[str]]]:\n for stage, filter_info in self._stage_infos:\n for out in stage.filter_outs(filter_info):\n if not self._outs_filter or self._outs_filter(out):\n yield out, filter_info\n\n @property\n def outs(self) -> Iterator[\"Output\"]:\n yield from {out for (out, _) in self._filtered_outs}\n\n @cached_property\n def out_data_keys(self) -> Dict[str, Set[\"DataIndexKey\"]]:\n by_workspace: Dict[str, Set[\"DataIndexKey\"]] = defaultdict(set)\n\n by_workspace[\"repo\"] = set()\n by_workspace[\"local\"] = set()\n\n for out in self.outs:\n if not out.use_cache:\n continue\n\n ws, key = out.index_key\n by_workspace[ws].add(key)\n\n return dict(by_workspace)\n\n @cached_property\n def _data_prefixes(self) -> Dict[str, \"_DataPrefixes\"]:\n prefixes: Dict[str, \"_DataPrefixes\"] = defaultdict(\n lambda: _DataPrefixes(set(), set())\n )\n for out, filter_info in self._filtered_outs:\n if not out.use_cache:\n continue\n workspace, key = out.index_key\n if filter_info and out.fs.isin(filter_info, out.fs_path):\n key = key + out.fs.relparts(filter_info, out.fs_path)\n entry = self._index.data[workspace].get(key)\n if entry and entry.meta and entry.meta.isdir:\n prefixes[workspace].recursive.add(key)\n prefixes[workspace].explicit.update(key[:i] for i in range(len(key), 0, -1))\n return prefixes\n\n @cached_property\n def data_keys(self) -> Dict[str, Set[\"DataIndexKey\"]]:\n ret: Dict[str, Set[\"DataIndexKey\"]] = defaultdict(set)\n\n for out, filter_info in self._filtered_outs:\n if not out.use_cache:\n continue\n\n workspace, key = out.index_key\n if filter_info and out.fs.isin(filter_info, out.fs_path):\n key = key + out.fs.relparts(filter_info, out.fs_path)\n ret[workspace].add(key)\n\n return dict(ret)\n\n @cached_property\n def data_tree(self):\n from dvc_data.hashfile.tree import Tree\n\n tree = Tree()\n for out in self.outs:\n if not out.use_cache:\n continue\n\n ws, key = out.index_key\n\n tree.add((ws, *key), out.meta, out.hash_info)\n\n tree.digest()\n\n return tree\n\n @cached_property\n def data(self) -> Dict[str, Union[\"DataIndex\", \"DataIndexView\"]]:\n from dvc_data.index import DataIndex, view\n\n def key_filter(workspace: str, key: \"DataIndexKey\"):\n try:\n prefixes = self._data_prefixes[workspace]\n return key in prefixes.explicit or any(\n key[: len(prefix)] == prefix for prefix in prefixes.recursive\n )\n except KeyError:\n return False\n\n data: Dict[str, Union[\"DataIndex\", \"DataIndexView\"]] = {}\n for workspace, data_index in self._index.data.items():\n if self.stages:\n data[workspace] = view(data_index, partial(key_filter, workspace))\n else:\n data[workspace] = DataIndex()\n return data\n\n\ndef build_data_index( # noqa: C901, PLR0912\n index: Union[\"Index\", \"IndexView\"],\n path: str,\n fs: \"FileSystem\",\n workspace: str = \"repo\",\n compute_hash: Optional[bool] = False,\n callback: \"Callback\" = DEFAULT_CALLBACK,\n) -> \"DataIndex\":\n from dvc_data.index import DataIndex, DataIndexEntry, Meta\n from dvc_data.index.build import build_entries, build_entry\n from dvc_data.index.save import build_tree\n\n ignore = None\n if workspace == \"repo\" and isinstance(fs, LocalFileSystem):\n ignore = index.repo.dvcignore\n\n data = DataIndex()\n parents = set()\n for key in index.data_keys.get(workspace, set()):\n out_path = fs.join(path, *key)\n\n for key_len in range(1, len(key)):\n parents.add(key[:key_len])\n\n if not fs.exists(out_path):\n continue\n\n hash_name = _get_entry_hash_name(index, workspace, key)\n try:\n out_entry = build_entry(\n out_path,\n fs,\n compute_hash=compute_hash,\n state=index.repo.state,\n hash_name=hash_name,\n )\n except FileNotFoundError:\n out_entry = DataIndexEntry()\n\n out_entry.key = key\n data.add(out_entry)\n callback.relative_update(1)\n\n if not out_entry.meta or not out_entry.meta.isdir:\n continue\n\n for entry in build_entries(\n out_path,\n fs,\n compute_hash=compute_hash,\n state=index.repo.state,\n ignore=ignore,\n hash_name=hash_name,\n ):\n if not entry.key or entry.key == (\"\",):\n # NOTE: whether the root will be returned by build_entries\n # depends on the filesystem (e.g. local doesn't, but s3 does).\n continue\n\n entry.key = key + entry.key\n data.add(entry)\n callback.relative_update(1)\n\n for key in parents:\n parent_path = fs.join(path, *key)\n if not fs.exists(parent_path):\n continue\n direntry = DataIndexEntry(key=key, meta=Meta(isdir=True), loaded=True)\n data.add(direntry)\n callback.relative_update(1)\n\n if compute_hash:\n out_keys = index.out_data_keys.get(workspace, set())\n data_keys = index.data_keys.get(workspace, set())\n for key in data_keys.intersection(out_keys):\n hash_name = _get_entry_hash_name(index, workspace, key)\n\n out_entry = data.get(key)\n if not out_entry or not out_entry.isdir:\n continue\n\n tree_meta, tree = build_tree(data, key, name=hash_name)\n out_entry.meta = tree_meta\n out_entry.hash_info = tree.hash_info\n out_entry.loaded = True\n data.add(out_entry)\n callback.relative_update(1)\n\n return data\n\n\ndef _get_entry_hash_name(\n index: Union[\"Index\", \"IndexView\"], workspace: str, key: \"DataIndexKey\"\n) -> str:\n from dvc_data.hashfile.hash import DEFAULT_ALGORITHM\n\n for idx in reversed(range(len(key) + 1)):\n prefix = key[:idx]\n try:\n src_entry = index.data[workspace][prefix]\n except KeyError:\n continue\n\n if src_entry.hash_info and src_entry.hash_info.name:\n return src_entry.hash_info.name\n\n return DEFAULT_ALGORITHM\n", "path": "dvc/repo/index.py" } ]
diff --git a/dvc/repo/index.py b/dvc/repo/index.py index b3dfb47ee5..bac2bfa36b 100644 --- a/dvc/repo/index.py +++ b/dvc/repo/index.py @@ -221,7 +221,7 @@ def _load_storage_from_out(storage_map, key, out): ), ) ) - storage_map.add_remote(FileStorage(key, dep.fs, dep.fs_path)) + storage_map.add_remote(FileStorage(key, dep.fs, dep.fs_path, read_only=True)) class Index: diff --git a/pyproject.toml b/pyproject.toml index 54c1b4614e..95e30fe919 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ dependencies = [ "configobj>=5.0.6", "distro>=1.3", "dpath<3,>=2.1.0", - "dvc-data>=3.4,<3.5", + "dvc-data>=3.5,<3.6", "dvc-http>=2.29.0", "dvc-render>=1.0.0,<2", "dvc-studio-client>=0.17.1,<1", diff --git a/tests/func/test_repo_index.py b/tests/func/test_repo_index.py index 9a0b06ea4a..42a5123a27 100644 --- a/tests/func/test_repo_index.py +++ b/tests/func/test_repo_index.py @@ -1,3 +1,4 @@ +import os from itertools import chain import pytest @@ -338,3 +339,53 @@ def test_param_keys_top_level_params(tmp_dir, dvc): tmp_dir.gen("dvc.yaml", top_level_params) index = Index.from_repo(dvc) assert index.param_keys == {"repo": {("classifier", "custom_params_file.yaml")}} + + +def test_data_index(tmp_dir, dvc, local_cloud, erepo_dir): + tmp_dir.dvc_gen( + { + "foo": b"foo", + "dir": {"bar": b"bar", "subdir": {"baz": b"baz"}}, + } + ) + + with erepo_dir.chdir(): + erepo_dir.dvc_gen("efoo", b"efoo", commit="create efoo") + erepo_dir.dvc_gen( + "edir", + {"ebar": b"ebar", "esubdir": {"ebaz": b"ebaz"}}, + commit="create edir", + ) + + dvc.imp(os.fspath(erepo_dir), "efoo") + dvc.imp(os.fspath(erepo_dir), "edir") + + local_cloud.gen("ifoo", b"ifoo") + local_cloud.gen("idir", {"ibar": b"ibar", "isubdir": {"ibaz": b"ibaz"}}) + + dvc.imp_url(str(local_cloud / "ifoo")) + dvc.imp_url(str(local_cloud / "idir")) + + index = Index.from_repo(dvc) + assert index.data_keys == { + "local": set(), + "repo": {("dir",), ("edir",), ("efoo",), ("foo",), ("idir",), ("ifoo",)}, + } + + data = index.data["repo"] + assert set(data.keys()) == { + ("dir",), + ("edir",), + ("efoo",), + ("foo",), + ("idir",), + ("ifoo",), + } + + assert not data.storage_map[("foo",)].remote + assert not data.storage_map[("dir",)].remote + + assert data.storage_map[("efoo",)].remote.read_only + assert data.storage_map[("edir",)].remote.read_only + assert data.storage_map[("ifoo",)].remote.read_only + assert data.storage_map[("idir",)].remote.read_only
getmoto__moto-1840
Cryptography Package has a Security Vulnerability Discovered using pipenv's security check feature that there's a vulnerability in the cryptography package versions<2.3. > Checking installed package safety... 36351: cryptography >=1.9.0,<2.3 resolved (2.2.2 installed)! python-cryptography versions >=1.9.0 and <2.3 did not enforce a minimum tag length for finalize_with_tag API. If a user did not validate the input length prior to passing it to finalize_with_tag an attacker could craft an invalid payload with a shortened tag (e.g. 1 byte) such that they would have a 1 in 256 chance of passing the MAC check. GCM tag forgeries can cause key leakage. More details here: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-10903 setup.py should be updated to require cryptography>=2.3.0.
[ { "content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nimport setuptools\nfrom setuptools import setup, find_packages\nimport sys\n\n\ninstall_requires = [\n \"Jinja2>=2.7.3\",\n \"boto>=2.36.0\",\n \"boto3>=1.6.16,<1.8\",\n \"botocore>=1.9.16,<1.11\",\n \"cryptography>=2.0.0\",\n \"requests>=2.5\",\n \"xmltodict\",\n \"six>1.9\",\n \"werkzeug\",\n \"pyaml\",\n \"pytz\",\n \"python-dateutil<3.0.0,>=2.1\",\n \"python-jose<3.0.0\",\n \"mock\",\n \"docker>=2.5.1\",\n \"jsondiff==1.1.1\",\n \"aws-xray-sdk<0.96,>=0.93\",\n \"responses>=0.9.0\",\n]\n\nextras_require = {\n 'server': ['flask'],\n}\n\n# https://hynek.me/articles/conditional-python-dependencies/\nif int(setuptools.__version__.split(\".\", 1)[0]) < 18:\n if sys.version_info[0:2] < (3, 3):\n install_requires.append(\"backports.tempfile\")\nelse:\n extras_require[\":python_version<'3.3'\"] = [\"backports.tempfile\"]\n\n\nsetup(\n name='moto',\n version='1.3.6',\n description='A library that allows your python tests to easily'\n ' mock out the boto library',\n author='Steve Pulec',\n author_email='[email protected]',\n url='https://github.com/spulec/moto',\n entry_points={\n 'console_scripts': [\n 'moto_server = moto.server:main',\n ],\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n extras_require=extras_require,\n include_package_data=True,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Testing\",\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nimport setuptools\nfrom setuptools import setup, find_packages\nimport sys\n\n\ninstall_requires = [\n \"Jinja2>=2.7.3\",\n \"boto>=2.36.0\",\n \"boto3>=1.6.16,<1.8\",\n \"botocore>=1.9.16,<1.11\",\n \"cryptography>=2.3.0\",\n \"requests>=2.5\",\n \"xmltodict\",\n \"six>1.9\",\n \"werkzeug\",\n \"pyaml\",\n \"pytz\",\n \"python-dateutil<3.0.0,>=2.1\",\n \"python-jose<3.0.0\",\n \"mock\",\n \"docker>=2.5.1\",\n \"jsondiff==1.1.1\",\n \"aws-xray-sdk<0.96,>=0.93\",\n \"responses>=0.9.0\",\n]\n\nextras_require = {\n 'server': ['flask'],\n}\n\n# https://hynek.me/articles/conditional-python-dependencies/\nif int(setuptools.__version__.split(\".\", 1)[0]) < 18:\n if sys.version_info[0:2] < (3, 3):\n install_requires.append(\"backports.tempfile\")\nelse:\n extras_require[\":python_version<'3.3'\"] = [\"backports.tempfile\"]\n\n\nsetup(\n name='moto',\n version='1.3.6',\n description='A library that allows your python tests to easily'\n ' mock out the boto library',\n author='Steve Pulec',\n author_email='[email protected]',\n url='https://github.com/spulec/moto',\n entry_points={\n 'console_scripts': [\n 'moto_server = moto.server:main',\n ],\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n extras_require=extras_require,\n include_package_data=True,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Testing\",\n ],\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index dad9ab9bb173..98780dd5a2e2 100755 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ "boto>=2.36.0", "boto3>=1.6.16,<1.8", "botocore>=1.9.16,<1.11", - "cryptography>=2.0.0", + "cryptography>=2.3.0", "requests>=2.5", "xmltodict", "six>1.9",
microsoft__DeepSpeed-2611
[BUG] pydantic DeepSpeedConfigModel has no validator for <class:object> **Describe the bug** During ```from deepspeed.inference.config import DtypeEnum```, got error ``` File "pydantic/main.py", line 299, in pydantic.main.ModelMetaclass.__new__ File "pydantic/fields.py", line 411, in pydantic.fields.ModelField.infer File "pydantic/fields.py", line 342, in pydantic.fields.ModelField.__init__ File "pydantic/fields.py", line 456, in pydantic.fields.ModelField.prepare File "pydantic/fields.py", line 670, in pydantic.fields.ModelField.populate_validators File "pydantic/validators.py", line 715, in find_validators RuntimeError: no validator found for <class 'object'>, see `arbitrary_types_allowed` in Config ``` **To Reproduce** Steps to reproduce the behavior: 1. Simple inference script to reproduce ```from deepspeed.inference.config import DtypeEnum``` 2. pydantic 1.8.2, deepspeed 0.8.0+384f17b **Expected behavior** successful import with no error **ds_report output** cannot produce due to the same import error **System info (please complete the following information):** - OS: Red Hat Enterprise Linux Server 7.9 (Maipo) - GPU count and types: one machine with 8 A100s, three machines with 8 A100s each - Hugging Face Transformers 4.19.2, no accelerate - Python version 3.8.13
[ { "content": "\"\"\"\nCopyright (c) Microsoft Corporation\nLicensed under the MIT license.\n\"\"\"\n\"\"\"\nCollection of DeepSpeed configuration utilities\n\"\"\"\nimport json\nimport collections\nimport collections.abc\nfrom functools import reduce\nfrom pydantic import BaseModel\nfrom deepspeed.utils import logger\n\n\nclass DeepSpeedConfigModel(BaseModel):\n \"\"\"\n This class should be used as a base for all DeepSpeed configs. It extends\n pydantic.BaseModel to allow for deprecated fields. To enable this feature,\n add deprecated=True to pydantic.Field:\n\n my_dep_field: int = Field(0, deprecated=True)\n\n Deprecated Field kwargs:\n - deprecated: [True|False], default False\n Enables / Disables deprecated fields\n - new_param: str, default \"\"\n Name of the field replacing the deprecated field\n - set_new_param: [True|False], default True\n If new_param is provided, enables setting the value of that param with\n deprecated field value\n - new_param_fn: callable, default (lambda x: x)\n If new_param is provided and set_new_param is True, this function will\n modify the value of the deprecated field before placing that value in\n the new_param field\n\n Example:\n my_new_field is replacing a deprecated my_old_field. The expected type\n for my_new_field is int while the expected type for my_old_field is\n str. We want to maintain backward compatibility with our configs, so we\n define the fields with:\n\n class MyExampleConfig(DeepSpeedConfigModel):\n my_new_field: int = 0\n my_old_field: str = Field('0',\n deprecated=True,\n new_param='my_new_field',\n new_param_fn=(lambda x: int(x)))\n \"\"\"\n def __init__(self, strict=False, **data):\n if (\n not strict\n ): # This is temporary until we refactor all DS configs, allows HF to load models\n data = {k: v for k, v in data.items() if v != \"auto\"}\n super().__init__(**data)\n self._deprecated_fields_check(self)\n\n def _process_deprecated_field(self, pydantic_config, field):\n # Get information about the deprecated field\n fields_set = pydantic_config.__fields_set__\n dep_param = field.name\n kwargs = field.field_info.extra\n new_param_fn = kwargs.get(\"new_param_fn\", lambda x: x)\n param_value = new_param_fn(getattr(pydantic_config, dep_param))\n new_param = kwargs.get(\"new_param\", \"\")\n if dep_param in fields_set:\n logger.warning(f\"Config parameter {dep_param} is deprecated\" +\n (f\" use {new_param} instead\" if new_param else \"\"))\n # Check if there is a new param and if it should be set with a value\n if new_param and kwargs.get(\"set_new_param\", True):\n # Remove the deprecate field if there is a replacing field\n try:\n delattr(pydantic_config, dep_param)\n except Exception as e:\n logger.error(f\"Tried removing deprecated '{dep_param}' from config\")\n raise e\n\n # Set new param value\n new_param_nested = new_param.split(\".\")\n if len(new_param_nested) > 1:\n # If the new param exists in a subconfig, we need to get\n # the fields set for that subconfig\n pydantic_config = reduce(getattr,\n new_param_nested[:-1],\n pydantic_config)\n fields_set = pydantic_config.__fields_set__\n new_param_name = new_param_nested[-1]\n assert (\n new_param_name not in fields_set\n ), f\"Cannot provide deprecated parameter '{dep_param}' and replacing parameter '{new_param}' together\"\n # A custom function for converting the old param value to new param value can be provided\n try:\n setattr(pydantic_config, new_param_name, param_value)\n except Exception as e:\n logger.error(\n f\"Tried setting value for '{new_param}' with value from deprecated '{dep_param}'\"\n )\n raise e\n\n def _deprecated_fields_check(self, pydantic_config):\n fields = pydantic_config.__fields__\n for field in fields.values():\n if field.field_info.extra.get(\"deprecated\", False):\n self._process_deprecated_field(pydantic_config, field)\n\n class Config:\n validate_all = True\n validate_assignment = True\n use_enum_values = True\n allow_population_by_field_name = True\n extra = \"forbid\"\n\n\nclass pp_int(int):\n \"\"\"\n A wrapper for integers that will return a custom string or comma-formatted\n string of the integer. For example, print(pp_int(1e5)) will return\n \"10,000\". This is useful mainly for auto-generated documentation purposes.\n \"\"\"\n def __new__(cls, val, custom_print_str=None):\n inst = super().__new__(cls, val)\n inst.custom_print_str = custom_print_str\n return inst\n\n def __repr__(self):\n if self.custom_print_str:\n return self.custom_print_str\n return f\"{self.real:,}\"\n\n\n# adapted from https://stackoverflow.com/a/50701137/9201239\nclass ScientificNotationEncoder(json.JSONEncoder):\n \"\"\"\n This class overrides ``json.dumps`` default formatter.\n\n This version keeps everything as normal except formats numbers bigger than 1e3 using scientific notation.\n\n Just pass ``cls=ScientificNotationEncoder`` to ``json.dumps`` to activate it\n\n \"\"\"\n def iterencode(self, o, _one_shot=False, level=0):\n indent = self.indent if self.indent is not None else 4\n prefix_close = \" \" * level * indent\n level += 1\n prefix = \" \" * level * indent\n if isinstance(o, bool):\n return \"true\" if o else \"false\"\n elif isinstance(o, float) or isinstance(o, int):\n if o > 1e3:\n return f\"{o:e}\"\n else:\n return f\"{o}\"\n elif isinstance(o, collections.abc.Mapping):\n x = [\n f'\\n{prefix}\"{k}\": {self.iterencode(v, level=level)}' for k,\n v in o.items()\n ]\n return \"{\" + \", \".join(x) + f\"\\n{prefix_close}\" + \"}\"\n elif isinstance(o, collections.abc.Sequence) and not isinstance(o, str):\n return f\"[{ f', '.join(map(self.iterencode, o)) }]\"\n return \"\\n, \".join(super().iterencode(o, _one_shot))\n\n\nclass DeepSpeedConfigObject(object):\n \"\"\"\n For json serialization\n \"\"\"\n def repr(self):\n return self.__dict__\n\n def __repr__(self):\n return json.dumps(\n self.__dict__,\n sort_keys=True,\n indent=4,\n cls=ScientificNotationEncoder,\n )\n\n\ndef get_scalar_param(param_dict, param_name, param_default_value):\n return param_dict.get(param_name, param_default_value)\n\n\ndef get_list_param(param_dict, param_name, param_default_value):\n return param_dict.get(param_name, param_default_value)\n\n\ndef get_dict_param(param_dict, param_name, param_default_value):\n return param_dict.get(param_name, param_default_value)\n\n\ndef dict_raise_error_on_duplicate_keys(ordered_pairs):\n \"\"\"Reject duplicate keys.\"\"\"\n d = dict((k, v) for k, v in ordered_pairs)\n if len(d) != len(ordered_pairs):\n counter = collections.Counter([pair[0] for pair in ordered_pairs])\n keys = [key for key, value in counter.items() if value > 1]\n raise ValueError(\"Duplicate keys in DeepSpeed config: {}\".format(keys))\n return d\n", "path": "deepspeed/runtime/config_utils.py" } ]
[ { "content": "\"\"\"\nCopyright (c) Microsoft Corporation\nLicensed under the MIT license.\n\"\"\"\n\"\"\"\nCollection of DeepSpeed configuration utilities\n\"\"\"\nimport json\nimport collections\nimport collections.abc\nfrom functools import reduce\nfrom pydantic import BaseModel\nfrom deepspeed.utils import logger\n\n\nclass DeepSpeedConfigModel(BaseModel):\n \"\"\"\n This class should be used as a base for all DeepSpeed configs. It extends\n pydantic.BaseModel to allow for deprecated fields. To enable this feature,\n add deprecated=True to pydantic.Field:\n\n my_dep_field: int = Field(0, deprecated=True)\n\n Deprecated Field kwargs:\n - deprecated: [True|False], default False\n Enables / Disables deprecated fields\n - new_param: str, default \"\"\n Name of the field replacing the deprecated field\n - set_new_param: [True|False], default True\n If new_param is provided, enables setting the value of that param with\n deprecated field value\n - new_param_fn: callable, default (lambda x: x)\n If new_param is provided and set_new_param is True, this function will\n modify the value of the deprecated field before placing that value in\n the new_param field\n\n Example:\n my_new_field is replacing a deprecated my_old_field. The expected type\n for my_new_field is int while the expected type for my_old_field is\n str. We want to maintain backward compatibility with our configs, so we\n define the fields with:\n\n class MyExampleConfig(DeepSpeedConfigModel):\n my_new_field: int = 0\n my_old_field: str = Field('0',\n deprecated=True,\n new_param='my_new_field',\n new_param_fn=(lambda x: int(x)))\n \"\"\"\n def __init__(self, strict=False, **data):\n if (\n not strict\n ): # This is temporary until we refactor all DS configs, allows HF to load models\n data = {k: v for k, v in data.items() if v != \"auto\"}\n super().__init__(**data)\n self._deprecated_fields_check(self)\n\n def _process_deprecated_field(self, pydantic_config, field):\n # Get information about the deprecated field\n fields_set = pydantic_config.__fields_set__\n dep_param = field.name\n kwargs = field.field_info.extra\n new_param_fn = kwargs.get(\"new_param_fn\", lambda x: x)\n param_value = new_param_fn(getattr(pydantic_config, dep_param))\n new_param = kwargs.get(\"new_param\", \"\")\n if dep_param in fields_set:\n logger.warning(f\"Config parameter {dep_param} is deprecated\" +\n (f\" use {new_param} instead\" if new_param else \"\"))\n # Check if there is a new param and if it should be set with a value\n if new_param and kwargs.get(\"set_new_param\", True):\n # Remove the deprecate field if there is a replacing field\n try:\n delattr(pydantic_config, dep_param)\n except Exception as e:\n logger.error(f\"Tried removing deprecated '{dep_param}' from config\")\n raise e\n\n # Set new param value\n new_param_nested = new_param.split(\".\")\n if len(new_param_nested) > 1:\n # If the new param exists in a subconfig, we need to get\n # the fields set for that subconfig\n pydantic_config = reduce(getattr,\n new_param_nested[:-1],\n pydantic_config)\n fields_set = pydantic_config.__fields_set__\n new_param_name = new_param_nested[-1]\n assert (\n new_param_name not in fields_set\n ), f\"Cannot provide deprecated parameter '{dep_param}' and replacing parameter '{new_param}' together\"\n # A custom function for converting the old param value to new param value can be provided\n try:\n setattr(pydantic_config, new_param_name, param_value)\n except Exception as e:\n logger.error(\n f\"Tried setting value for '{new_param}' with value from deprecated '{dep_param}'\"\n )\n raise e\n\n def _deprecated_fields_check(self, pydantic_config):\n fields = pydantic_config.__fields__\n for field in fields.values():\n if field.field_info.extra.get(\"deprecated\", False):\n self._process_deprecated_field(pydantic_config, field)\n\n class Config:\n validate_all = True\n validate_assignment = True\n use_enum_values = True\n allow_population_by_field_name = True\n extra = \"forbid\"\n arbitrary_types_allowed = True\n\n\nclass pp_int(int):\n \"\"\"\n A wrapper for integers that will return a custom string or comma-formatted\n string of the integer. For example, print(pp_int(1e5)) will return\n \"10,000\". This is useful mainly for auto-generated documentation purposes.\n \"\"\"\n def __new__(cls, val, custom_print_str=None):\n inst = super().__new__(cls, val)\n inst.custom_print_str = custom_print_str\n return inst\n\n def __repr__(self):\n if self.custom_print_str:\n return self.custom_print_str\n return f\"{self.real:,}\"\n\n\n# adapted from https://stackoverflow.com/a/50701137/9201239\nclass ScientificNotationEncoder(json.JSONEncoder):\n \"\"\"\n This class overrides ``json.dumps`` default formatter.\n\n This version keeps everything as normal except formats numbers bigger than 1e3 using scientific notation.\n\n Just pass ``cls=ScientificNotationEncoder`` to ``json.dumps`` to activate it\n\n \"\"\"\n def iterencode(self, o, _one_shot=False, level=0):\n indent = self.indent if self.indent is not None else 4\n prefix_close = \" \" * level * indent\n level += 1\n prefix = \" \" * level * indent\n if isinstance(o, bool):\n return \"true\" if o else \"false\"\n elif isinstance(o, float) or isinstance(o, int):\n if o > 1e3:\n return f\"{o:e}\"\n else:\n return f\"{o}\"\n elif isinstance(o, collections.abc.Mapping):\n x = [\n f'\\n{prefix}\"{k}\": {self.iterencode(v, level=level)}' for k,\n v in o.items()\n ]\n return \"{\" + \", \".join(x) + f\"\\n{prefix_close}\" + \"}\"\n elif isinstance(o, collections.abc.Sequence) and not isinstance(o, str):\n return f\"[{ f', '.join(map(self.iterencode, o)) }]\"\n return \"\\n, \".join(super().iterencode(o, _one_shot))\n\n\nclass DeepSpeedConfigObject(object):\n \"\"\"\n For json serialization\n \"\"\"\n def repr(self):\n return self.__dict__\n\n def __repr__(self):\n return json.dumps(\n self.__dict__,\n sort_keys=True,\n indent=4,\n cls=ScientificNotationEncoder,\n )\n\n\ndef get_scalar_param(param_dict, param_name, param_default_value):\n return param_dict.get(param_name, param_default_value)\n\n\ndef get_list_param(param_dict, param_name, param_default_value):\n return param_dict.get(param_name, param_default_value)\n\n\ndef get_dict_param(param_dict, param_name, param_default_value):\n return param_dict.get(param_name, param_default_value)\n\n\ndef dict_raise_error_on_duplicate_keys(ordered_pairs):\n \"\"\"Reject duplicate keys.\"\"\"\n d = dict((k, v) for k, v in ordered_pairs)\n if len(d) != len(ordered_pairs):\n counter = collections.Counter([pair[0] for pair in ordered_pairs])\n keys = [key for key, value in counter.items() if value > 1]\n raise ValueError(\"Duplicate keys in DeepSpeed config: {}\".format(keys))\n return d\n", "path": "deepspeed/runtime/config_utils.py" } ]
diff --git a/deepspeed/runtime/config_utils.py b/deepspeed/runtime/config_utils.py index 81ef972ac0c4..08a50785ceb9 100755 --- a/deepspeed/runtime/config_utils.py +++ b/deepspeed/runtime/config_utils.py @@ -109,6 +109,7 @@ class Config: use_enum_values = True allow_population_by_field_name = True extra = "forbid" + arbitrary_types_allowed = True class pp_int(int):
PlasmaPy__PlasmaPy-1369
Make test order deterministic so we can use `pytest-xdist` in `plasmapy.particles` The order that tests are run in (what will soon be) `plasmapy.particles` is not deterministic. Some of the functionality in that subpackage uses [set](https://docs.python.org/3/tutorial/datastructures.html#sets) operations, which do not preserve order. Since the order of our tests sometimes depends on set operations, the order of tests changes. Nominally, unit tests should be able to be run in any order. However, I ran into a problem when trying to use [`pytest-xdist`](https://docs.pytest.org/en/3.0.1/xdist.html) to run tests in parallel, as this package currently requires test order to be deterministic when figuring out which tests to send to which processor. Since our test order will only get bigger with time, it would be helpful to make our test order deterministic so that we have the capability of running tests in parallel. The two possibilities that I can think of are: - Use [OrderedSets](https://pypi.org/project/ordered-set/) instead of regular sets (with the disadvantage that this would require adding another dependency to PlasmaPy) - Use sorting in the operations in which tests get selected (probably using `sorted`). The files that are affected include: - [ ] `test_ionization_state.py` - [ ] `test_ionization_states.py` - [ ] `test_parsing.py` - [ ] `test_particle_class.py` - [ ] `test_special_particles.py` The quick way to test this is to install `pytest-xdist`, go to the tests directory, and then try running in the command prompt: ```pytest -n 4 test_parsing.py``` Thanks! Make `IonizationState` and `IonizationStateCollection` tests independent of each other I tried running our tests with the pytest extension pytest-randomly, and it turns out that many of the tests in `plasmapy/particles/test_ionization_state.py` and `plasmapy/particles/test_ionization_collection.py` fail when they are run in random order. This is because the tests depend on each other, which is generally something to avoid, which I only learned recently. We should modify these tests so that they don't depend on the order of test execution. That is to say, we should make these tests completely independent of each other. I found this out by running these lines in the command line: ```bash pip install pytest-randomly pytest --randomly-seed=1235 ``` These tests might be a good place to use pytest fixtures.
[ { "content": "\"\"\"Utilities to help with testing.\"\"\"\n\n__all__ = [\n \"assert_can_handle_nparray\",\n \"run_test\",\n \"run_test_equivalent_calls\",\n]\n\nimport astropy.constants as const\nimport astropy.tests.helper as astrohelper\nimport astropy.units as u\nimport collections\nimport functools\nimport inspect\nimport numpy as np\nimport pytest\nimport warnings\n\nfrom typing import Any, Callable, Dict\n\nfrom plasmapy.tests.helpers.exceptions import (\n InvalidTestError,\n MissingExceptionFail,\n MissingWarningFail,\n TypeMismatchFail,\n UnexpectedExceptionFail,\n UnexpectedResultFail,\n)\nfrom plasmapy.utils.code_repr import _name_with_article, _object_name, call_string\nfrom plasmapy.utils.exceptions import PlasmaPyWarning\n\n\ndef _process_input(wrapped_function: Callable): # coverage: ignore\n \"\"\"\n Allow `run_test` to take a single positional argument that is a\n `list` or `tuple` in lieu of using multiple positional/keyword\n arguments as usual. If `len` of this argument returns `3`, then\n it assumes that `kwargs` is an empty `dict` and that the expected\n result/outcome is the last item.\n \"\"\"\n\n def decorator(wrapped_function: Callable):\n wrapped_signature = inspect.signature(wrapped_function)\n\n @functools.wraps(wrapped_function)\n def wrapper(*args, **kwargs):\n arguments = wrapped_signature.bind(*args, **kwargs).arguments\n if (\n len(args) == 1\n and len(kwargs) == 0\n and isinstance(args[0], (list, tuple))\n ):\n inputs = args[0]\n if len(inputs) not in (3, 4):\n raise RuntimeError(f\"{args} is an invalid input to run_test.\")\n new_kwargs = {\"func\": inputs[0], \"args\": inputs[1]}\n new_kwargs[\"kwargs\"] = inputs[2] if len(inputs) == 4 else {}\n new_kwargs[\"expected_outcome\"] = (\n inputs[3] if len(inputs) == 4 else inputs[2]\n )\n else:\n new_kwargs = {argname: argval for argname, argval in arguments.items()}\n return wrapped_function(**new_kwargs)\n\n return wrapper\n\n return decorator(wrapped_function)\n\n\n@_process_input\ndef run_test(\n func,\n args: Any = (),\n kwargs: Dict = None,\n expected_outcome: Any = None,\n rtol: float = 0.0,\n atol: float = 0.0,\n): # coverage: ignore\n \"\"\"\n Test that a function or class returns the expected result, raises\n the expected exception, or issues an expected warning for the\n supplied positional and keyword arguments.\n\n Parameters\n ----------\n func: callable, list, or tuple\n The `callable` to be tested. The first (and sole) argument to\n `~plasmapy.utils.run_test` may alternatively be a list or tuple\n containing these arguments (optionally omitting `kwargs` if the\n `len` returns 3).\n\n args: tuple or object\n The positional arguments to `func`.\n\n kwargs: dict\n The keyword arguments to `func`.\n\n expected_outcome: object\n The expected result, exception, or warning from\n `func(*args, **kwargs)`. This may also be a `tuple` of length\n two that contains the expected result as the first item and the\n expected warning as the second item.\n\n rtol : float\n The relative tolerance to be used by `~numpy.allclose` in an\n element-wise comparison, defaulting to `0`.\n\n atol : float\n The absolute tolerance to be used by `~numpy.allclose` in an\n element-wise comparison, defaulting to `0`.\n\n Returns\n -------\n `None`\n\n Raises\n ------\n ~plasmapy.tests.helpers.exceptions.UnexpectedResultFail\n If the test returns a result that is different from the expected\n result.\n\n ~plasmapy.tests.helpers.exceptions.TypeMismatchFail\n If the actual result is of a different type than the expected\n result.\n\n ~plasmapy.tests.helpers.exceptions.UnexpectedExceptionFail\n If an exception occurs when no exception or a different\n exception is expected.\n\n ~plasmapy.tests.helpers.exceptions.MissingExceptionFail\n If no exception is raised when an exception is expected.\n\n ~plasmapy.tests.helpers.exceptions.MissingWarningFail\n An expected warning is not issued.\n\n ~astropy.units.UnitsError\n If the result has different units than expected.\n\n TypeError\n If the equality of the actual result and expected result cannot\n be determined (e.g., for a class lacking an `__eq__` method.\n\n Examples\n --------\n The simplest way to use `~plasmapy.utils.run_test` is with inputs\n for the function to be tests, the positional arguments in a `tuple`\n or `list`, the keyword arguments in a `dict`, and then finally the\n expected result or outcome.\n\n >>> args = tuple()\n >>> kwargs = dict()\n >>> run_test(lambda: 0, args, kwargs, 0)\n\n If `expected` is a an exception or warning, then\n `~plasmapy.utils.pytest_helpers.run_test` will raise an exception if\n the expected exception is not raised or the expected warning is not\n issued.\n\n >>> from warnings import warn\n\n >>> issue_warning = lambda: warn(\"Electrons are weird!\", UserWarning)\n >>> run_test(issue_warning, args, kwargs, UserWarning)\n\n >>> def raise_exception(): raise RuntimeError\n >>> run_test(raise_exception, args, kwargs, RuntimeError)\n\n For warnings, `~plasmapy.utils.run_test` can accept a `tuple` of two\n items where the first item is the expected result and the second\n item is the expected warning.\n\n .. code-block:: python\n\n def return_arg_and_warn(x):\n warn(\"\", UserWarning)\n return x\n\n run_test(return_arg_and_warn, 1, {}, (1, UserWarning))\n\n This function is also flexible enough that it can accept a `tuple`\n or `list` as its sole argument, with the arguments in the same\n order as in the function signature.\n\n >>> return_arg = lambda x: x\n >>> inputs = (return_arg, 42, {}, 42)\n >>> run_test(inputs)\n\n If the `tuple` or `list` has a length of `3`, then\n `~plasmapy.utils.run_test` assumes that `kwargs` is missing.\n\n >>> inputs_without_kwargs = [return_arg, 42, 42]\n >>> run_test(inputs_without_kwargs)\n\n .. code-block:: python\n\n import pytest\n\n def func(x, raise_exception=False, issue_warning=False):\n if raise_exception:\n raise ValueError(\"I'm sorry, Dave. I'm afraid I can't do that.\")\n elif issue_warning:\n warn(\"Open the pod bay doors, HAL.\", UserWarning)\n return x\n\n inputs_table = [\n (func, 1, 1),\n (func, (2,), {}, 2),\n (func, 3, {'raise_exception': True}, ValueError),\n (func, 4, {'issue_warning': True}, UserWarning),\n (func, 5, {'issue_warning': True}, (5, UserWarning)),\n ]\n\n @pytest.mark.parametrize('inputs', inputs_table)\n def test_func(inputs):\n run_test(inputs)\n\n \"\"\"\n\n if kwargs is None:\n kwargs = {}\n\n if not isinstance(args, tuple):\n args = (args,)\n\n if not callable(func):\n raise InvalidTestError(\n f\"The argument func = {func} to run_test must be callable.\"\n )\n\n # By including the function call that is run during a test in error\n # messages, we can make it easier to reproduce the error in an\n # interactive session.\n\n call_str = call_string(func, args, kwargs)\n\n # There are many possibilities for expected outcomes that we must\n # keep track of, including exceptions being raised and warnings\n # being issued.\n\n expected = collections.defaultdict(lambda: None)\n\n if inspect.isclass(expected_outcome):\n subclass_of_Exception = issubclass(expected_outcome, Exception)\n subclass_of_Warning = issubclass(expected_outcome, Warning)\n if subclass_of_Warning:\n expected[\"warning\"] = expected_outcome\n elif subclass_of_Exception and not subclass_of_Warning:\n expected[\"exception\"] = expected_outcome\n\n # If a warning is issued, then there may also be an expected result.\n\n if isinstance(expected_outcome, tuple):\n length_not_two = len(expected_outcome) != 2\n is_not_class = not inspect.isclass(expected_outcome[1])\n is_not_warning = (\n True if is_not_class else not issubclass(expected_outcome[1], Warning)\n )\n if length_not_two or is_not_warning:\n raise InvalidTestError(\"Invalid expected outcome in run_test.\")\n expected[\"result\"] = expected_outcome[0]\n expected[\"warning\"] = expected_outcome[1]\n\n if expected[\"exception\"] is None and expected[\"warning\"] is None:\n expected[\"result\"] = expected_outcome\n\n # First we go through all of the possibilities for when an exception\n # is expected to be raised. If no exception is raised, then we want\n # an error message that includes the result. If the wrong exception\n # is raised, then we want an error message that includes that\n # exception. An alternative would be to use `with pytest.raises()`\n # but this makes it easier to break down what the error messages\n # should be.\n\n if expected[\"exception\"]:\n\n expected_exception = expected[\"exception\"]\n\n try:\n result = func(*args, **kwargs)\n except expected_exception as exc_result:\n resulting_exception = exc_result.__reduce__()[0]\n if resulting_exception.__name__ == expected_exception.__name__:\n return None\n else:\n raise UnexpectedExceptionFail(\n f\"The command {call_str} did not specifically raise \"\n f\"{_name_with_article(expected_exception)} as expected, but \"\n f\"instead raised {_name_with_article(resulting_exception)} \"\n f\"which is a subclass of the expected exception.\"\n )\n except Exception as exc_unexpected_exception:\n unexpected_exception = exc_unexpected_exception.__reduce__()[0]\n raise UnexpectedExceptionFail(\n f\"The command {call_str} did not raise \"\n f\"{_name_with_article(expected_exception)} as expected, \"\n f\"but instead raised {_name_with_article(unexpected_exception)}.\"\n ) from exc_unexpected_exception\n else:\n raise MissingExceptionFail(\n f\"The command {call_str} did not raise \"\n f\"{_name_with_article(expected_exception)} as expected, but instead \"\n f\"returned {_object_name(result)}.\"\n )\n\n try:\n with pytest.warns(expected[\"warning\"]):\n result = func(*args, **kwargs)\n except pytest.raises.Exception as missing_warning:\n raise MissingWarningFail(\n f\"The command {call_str} should issue \"\n f\"{_name_with_article(expected['warning'])}, but instead returned \"\n f\"{_object_name(result)}.\"\n ) from missing_warning\n except Exception as exception_no_warning:\n raise UnexpectedExceptionFail(\n f\"The command {call_str} unexpectedly raised \"\n f\"{_name_with_article(exception_no_warning.__reduce__()[0])} \"\n f\"instead of returning the expected value of \"\n f\"{_object_name(expected['result'])}.\"\n ) from exception_no_warning\n\n if isinstance(expected[\"result\"], u.UnitBase):\n\n if isinstance(result, u.UnitBase):\n if result != expected[\"result\"]:\n raise u.UnitsError(\n f\"The command {call_str} returned \"\n f\"{_object_name(result)} instead of the expected \"\n f\"value of {_object_name(expected['result'])}.\"\n )\n return None\n\n if not isinstance(result, (u.Quantity, const.Constant, const.EMConstant)):\n raise u.UnitsError(\n f\"The command {call_str} returned \"\n f\"{_object_name(result)} instead of a quantity or \"\n f\"constant with units of \"\n f\"{_object_name(expected['result'])}.\"\n )\n\n if result.unit != expected[\"result\"]:\n raise u.UnitsError(\n f\"The command {call_str} returned \"\n f\"{_object_name(result)}, which has units of \"\n f\"{result.unit} instead of the expected units of \"\n f\"{_object_name(expected['result'])}.\"\n )\n\n return None\n\n if isinstance(expected[\"result\"], (u.Quantity, const.Constant, const.EMConstant)):\n if not result.unit == expected[\"result\"].unit:\n raise u.UnitsError(\n f\"The command {call_str} returned \"\n f\"{_object_name(result)} which has different units \"\n f\"than the expected result of \"\n f\"{_object_name(expected['result'])}.\"\n )\n\n if np.allclose(result.value, expected[\"result\"].value):\n return None\n\n if expected[\"result\"] is None:\n return None\n\n if type(result) != type(expected[\"result\"]):\n raise TypeMismatchFail(\n f\"The command {call_str} returned \"\n f\"{_object_name(result)} which has type \"\n f\"{_object_name(type(result))}, \"\n f\"instead of the expected value of \"\n f\"{_object_name(expected['result'])} which has type \"\n f\"{_object_name(type(expected['result']))}.\"\n )\n\n try:\n if result == expected[\"result\"]:\n return None\n except Exception as exc_equality: # coverage: ignore\n raise TypeError(\n f\"The equality of {_object_name(result)} and \"\n f\"{_object_name(expected['result'])} \"\n f\"cannot be evaluated.\"\n ) from exc_equality\n\n try:\n different_length = len(result) != len(expected[\"result\"])\n except Exception:\n different_length = False\n\n try:\n all_close = np.allclose(expected[\"result\"], result, rtol=rtol, atol=atol)\n if all_close and not different_length:\n return None\n except Exception:\n pass\n\n errmsg = (\n f\"The command {call_str} returned \"\n f\"{_object_name(result)} instead of the expected \"\n f\"value of {_object_name(expected['result'])}.\"\n )\n\n if atol or rtol:\n errmsg += \" with \"\n if atol:\n errmsg += f\"atol = {atol}\"\n if atol and rtol:\n errmsg += \" and \"\n if rtol:\n errmsg += f\"rtol = {rtol}\"\n errmsg += \".\"\n\n raise UnexpectedResultFail(errmsg)\n\n\ndef run_test_equivalent_calls(*test_inputs, require_same_type: bool = True):\n \"\"\"\n Test that different functions/inputs return equivalent results.\n\n Parameters\n ----------\n test_inputs\n The functions and inputs to the tests in an allowed format, as\n described below.\n\n require_same_type: bool\n If `True` (the default), then all of the results are required to\n be of the same type. If `False`, results do not need to be of\n the same type (e.g., cases like `1.0 == 1` will not raise an\n exception).\n\n Raises\n ------\n ~plasmapy.tests.helpers.exceptions.UnexpectedResultFail\n If not all of the results are equivalent, or not all of the\n results are of the same type and `require_same_type` evaluates\n to `True`.\n\n ~plasmapy.tests.helpers.exceptions.UnexpectedExceptionFail\n If an exception is raised whilst attempting to run one of the\n test cases.\n\n ~plasmapy.tests.helpers.exceptions.InvalidTestError\n If there is an error associated with the inputs or the test is\n set up incorrectly.\n\n Examples\n --------\n There are several possible formats that can be accepted by this\n `~plasmapy.utils.run_test_equivalent_calls` to test that different\n combinations of functions (or other `callable` objects), positional\n arguments, and keyword arguments return equivalent results.\n\n To test a single function that takes a single positional argument,\n then `test_inputs` may be the function followed by an arbitrary\n number of positional arguments to be included into the function.\n\n >>> def f(x): return x ** 2\n >>> run_test_equivalent_calls(f, -1, 1)\n\n To test a single function with an arbitrary number of positional and\n keyword arguments, the first argument should be the function,\n followed by an arbitrary number of `tuple` or `list` objects that\n contain a `tuple` or `list` containing the positional arguments, and\n a `dict` containing the keyword arguments.\n\n >>> def g(x, y, z): return x + y + z\n >>> run_test_equivalent_calls(g, ((1, 2, 3), {}), ((3, 2), {'z': 1}))\n\n If there is only one positional argument, then it is not necessary\n to include it in a `tuple` or `list`.\n\n >>> run_test_equivalent_calls(f, ([1], {}), ([1], {}))\n >>> run_test_equivalent_calls(f, (1, {}), (1, {}))\n\n To test multiple functions with an arbitrary number of positional\n and keyword arguments, use a series of `tuple` or `list` objects\n that contain the function for each test, a `tuple` or `list` with\n the positional arguments, and a `dict` with the keyword arguments.\n\n >>> def p(x, y=None): return x + y if y else x\n >>> def q(x, y=None): return x + 1 if y else x\n\n >>> run_test_equivalent_calls([p, (1,), {'y': 1}], [q, (2,), {'y': False}])\n\n The inputs may also be passed in as a whole as a `tuple` or `list`.\n\n >>> run_test_equivalent_calls(f, -1, 1)\n >>> run_test_equivalent_calls([f, -1, 1])\n\n If `require_same_type` is `False`, then an exception will not be\n raised if the results are of different types.\n\n >>> run_test_equivalent_calls(f, -1, 1.0, require_same_type=False)\n\n \"\"\"\n\n if len(test_inputs) == 1:\n test_inputs = test_inputs[0]\n\n if not isinstance(test_inputs, (tuple, list)):\n raise InvalidTestError(\n f\"The argument to run_test_equivalent_calls must be a tuple \"\n f\"or list. The provided inputs are: {test_inputs}\"\n )\n\n if callable(test_inputs[0]):\n func = test_inputs[0]\n test_inputs = test_inputs[1:]\n else:\n func = None\n\n # Make sure everything is a list to allow f(*args)\n\n test_inputs = [\n test_input if isinstance(test_input, (list, tuple)) else [test_input]\n for test_input in test_inputs\n ]\n\n # Construct a list of dicts, of which each dict contains the\n # function, positional arguments, and keyword arguments for each\n # test case.\n\n test_cases = []\n\n for inputs in test_inputs:\n test_case = {}\n\n test_case[\"function\"] = func if func else inputs[0]\n test_case[\"args\"] = inputs[0] if func else inputs[1]\n\n if not isinstance(test_case[\"args\"], (list, tuple)):\n test_case[\"args\"] = [test_case[\"args\"]]\n\n if func:\n test_case[\"kwargs\"] = inputs[1] if len(inputs) == 2 else {}\n else:\n test_case[\"kwargs\"] = inputs[2] if len(inputs) == 3 else {}\n\n try:\n test_case[\"call string\"] = call_string(\n test_case[\"function\"], test_case[\"args\"], test_case[\"kwargs\"]\n )\n except Exception:\n test_case[\"call string\"] = (\n f\"function = {test_case['function']}, \"\n f\"args = {test_case['args']}, and \"\n f\"kwargs = {test_case['kwargs']}\"\n )\n\n test_cases.append(test_case)\n\n if len(test_cases) < 2:\n raise InvalidTestError(\n \"At least two tests are needed for run_test_equivalent_calls\"\n )\n\n # Check to make sure that each function is callable, each set of\n # args is a list or tuple, and each set of kwargs is a dict. Make\n # sure that the error message contains all of the problems.\n\n bad_inputs_errmsg = \"\"\n\n for test_case in test_cases:\n if not callable(test_case[\"function\"]):\n bad_inputs_errmsg += f\"\\n{test_case['function']} is not callable \"\n if not isinstance(test_case[\"args\"], (tuple, list)):\n bad_inputs_errmsg += f\"\\n{test_case['args']} is not a list or tuple \"\n if not isinstance(test_case[\"kwargs\"], dict):\n bad_inputs_errmsg += f\"\\n{test_case['kwargs']} is not a dict \"\n\n if bad_inputs_errmsg:\n raise InvalidTestError(bad_inputs_errmsg)\n\n # Now we can get the results for each test case.\n\n for test_case in test_cases:\n try:\n f, args, kwargs = (\n test_case[\"function\"],\n test_case[\"args\"],\n test_case[\"kwargs\"],\n )\n test_case[\"result\"] = f(*args, **kwargs)\n test_case[\"type\"] = type(test_case[\"result\"])\n except Exception as exc:\n raise UnexpectedExceptionFail(\n f\"Unable to evaluate {test_case['call string']}.\"\n )\n\n # Make sure that all of the results evaluate as equal to the first\n # result.\n\n results = [test_case[\"result\"] for test_case in test_cases]\n types = [test_case[\"type\"] for test_case in test_cases]\n\n try:\n equals_first_result = [result == results[0] for result in results]\n except Exception as exc: # coverage: ignore\n raise UnexpectedExceptionFail(\n \"Unable to determine equality properties of results.\"\n ) from exc\n\n equals_first_type = [result_type == types[0] for result_type in types]\n\n not_all_equal = not all(equals_first_result)\n not_all_same_type = not all(equals_first_type)\n\n if not_all_equal:\n errmsg = \"The following tests did not all produce identical results:\"\n elif not_all_same_type and require_same_type:\n errmsg = \"The following tests did not all produce results of the same type:\"\n\n if not_all_equal or (not_all_same_type and require_same_type):\n\n for test_case in test_cases:\n errmsg += (\n f\"\\n {test_case['call string']} yielded {test_case['result']} \"\n f\"of type {test_case['type']}\"\n )\n\n raise UnexpectedResultFail(errmsg)\n\n\ndef assert_can_handle_nparray(\n function_to_test,\n insert_some_nans=None,\n insert_all_nans=None,\n kwargs=None,\n):\n \"\"\"\n Test for ability to handle numpy array quantities.\n\n Parameters\n ----------\n function_to_test\n The function to be tested for ability to handle numpy array quantities.\n Arguments are automatically given a vector input based on their\n variable name. Current args that are interpreted as vectors are:\n `[\"T\", \"T_i\", \"T_e\", \"temperature\"]`\n `[\"n\", \"n_i\", \"n_e\", \"density\"]`\n `[\"B\"]`\n `[\"V\", \"Vperp\"]`\n `[\"coulomb_log\"]`\n `[\"characteristic_length\"]`\n\n insert_some_nans: `list`\n List of argument names in which to insert some np.nan values.\n These must be arguments that will be tested as vectors as listed\n above.\n\n insert_all_nans: `list`\n List of argument names to fill entirely with np.nan values.\n\n kwargs: `dict`\n Arguments to pass directly to the function in under test, in the\n normal kwargs python dictionary format.\n\n Raises\n ------\n ValueError\n If this function cannot interpret a parameter of function_to_test.\n\n Examples\n --------\n >>> from plasmapy.formulary.parameters import Alfven_speed, gyrofrequency\n >>> assert_can_handle_nparray(Alfven_speed)\n >>> assert_can_handle_nparray(gyrofrequency, kwargs={\"signed\": True})\n >>> assert_can_handle_nparray(gyrofrequency, kwargs={\"signed\": False})\n \"\"\"\n\n if insert_some_nans is None:\n insert_some_nans = []\n\n if insert_all_nans is None:\n insert_all_nans = []\n\n if kwargs is None:\n kwargs = {}\n\n def _prepare_input(\n param_name, param_default, insert_some_nans, insert_all_nans, kwargs\n ):\n \"\"\"\n Parse parameter names and set up values to input for 0d, 1d, and 2d array tests.\n \"\"\"\n # first things first: let any passed in kwarg right through (VIP access)\n if param_name in kwargs.keys():\n return (kwargs[param_name],) * 4\n\n # else, if it's a recognized variable name, give it a reasonable unit and magnitude\n elif param_name in [\"particle\", \"ion_particle\", \"ion\"]:\n if not (param_default is inspect._empty or param_default is None):\n return (param_default,) * 4\n else:\n return (\"p\",) * 4\n elif param_name == \"particles\" or param_name == \"species\":\n if not (param_default is inspect._empty):\n return (param_default,) * 4\n else:\n return ((\"e\", \"p\"),) * 4\n elif param_name in [\"T\", \"T_i\", \"T_e\", \"temperature\"]:\n unit = u.eV\n magnitude = 1.0\n elif param_name in [\"n\", \"n_i\", \"n_e\", \"density\"]:\n unit = u.m ** -3\n magnitude = 1e20\n elif param_name == \"B\":\n unit = u.G\n magnitude = 1e3\n elif param_name in [\"V\", \"Vperp\"]:\n unit = u.m / u.s\n magnitude = 1e5\n elif param_name == \"coulomb_log\":\n unit = 1.0\n magnitude = 1e1\n elif param_name == \"characteristic_length\":\n unit = u.m\n magnitude = 1.0\n elif param_name == \"k\":\n unit = u.m ** -1\n magnitude = 1.0\n\n # else, last resort, if it has a default argument, go with that:\n elif not (param_default is inspect._empty):\n return (param_default,) * 4\n\n else:\n raise ValueError(f\"Unrecognized function input: {param_name}\")\n\n # now knowing unit and magnitude, set up the 0d, 1d, 2d, and 3d arrays:\n input_data_3d = np.reshape(np.arange(1.0, 9.0, 1.0), (2, 2, 2))\n input_data_2d = np.reshape(np.arange(1.0, 5.0, 1.0), (2, 2))\n input_data_1d = np.arange(1.0, 5.0, 1.0)\n if param_name in insert_some_nans:\n input_data_3d[0, 0, 1] = np.nan\n input_data_3d[0, 1, 0] = np.nan\n input_data_2d[0, 1] = np.nan\n input_data_2d[1, 0] = np.nan\n input_data_1d[1] = np.nan\n elif param_name in insert_all_nans:\n input_data_3d = np.ones((2, 2, 2)) * np.nan\n input_data_2d = np.ones((2, 2)) * np.nan\n input_data_1d = np.ones(4) * np.nan\n input_data_3d *= magnitude\n input_data_3d *= unit\n input_data_2d *= magnitude\n input_data_2d *= unit\n input_data_1d *= magnitude\n input_data_1d *= unit\n input_data_0d = input_data_1d[3]\n return input_data_0d, input_data_1d, input_data_2d, input_data_3d\n\n # call _prepare_input to prepare 0d, 1d, and 2d sets of arguments for the function:\n function_sig = inspect.signature(function_to_test)\n function_params = function_sig.parameters\n args_0d = dict()\n args_1d = dict()\n args_2d = dict()\n args_3d = dict()\n param_names = [elm for elm in function_params.keys()]\n for idx, key in enumerate(function_params):\n args_0d[key], args_1d[key], args_2d[key], args_3d[key] = _prepare_input(\n param_names[idx],\n function_params[key].default,\n insert_some_nans,\n insert_all_nans,\n kwargs,\n )\n\n # call the function with the prepared argument sets:\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=PlasmaPyWarning)\n result_0d = function_to_test(**args_0d)\n result_1d = function_to_test(**args_1d)\n result_2d = function_to_test(**args_2d)\n result_3d = function_to_test(**args_3d)\n\n # assert that the 1d, 2d, 3d versions get the same result (elementwise) as the 0d version:\n # (if the function returns multiple values, loop through and test each)\n try:\n scalar_testable = result_0d.value\n except AttributeError:\n scalar_testable = result_0d\n if np.isscalar(scalar_testable):\n astrohelper.assert_quantity_allclose(result_0d, result_1d[3])\n astrohelper.assert_quantity_allclose(result_0d, result_2d[1, 1])\n astrohelper.assert_quantity_allclose(result_0d, result_3d[0, 1, 1])\n else:\n for idx, res_0d in enumerate(result_0d):\n astrohelper.assert_quantity_allclose(res_0d, result_1d[idx][3])\n astrohelper.assert_quantity_allclose(res_0d, result_2d[idx][1, 1])\n astrohelper.assert_quantity_allclose(res_0d, result_3d[idx][0, 1, 1])\n", "path": "plasmapy/utils/pytest_helpers/pytest_helpers.py" } ]
[ { "content": "\"\"\"Utilities to help with testing.\"\"\"\n\n__all__ = [\n \"assert_can_handle_nparray\",\n \"run_test\",\n \"run_test_equivalent_calls\",\n]\n\nimport astropy.constants as const\nimport astropy.tests.helper as astrohelper\nimport astropy.units as u\nimport collections\nimport functools\nimport inspect\nimport numpy as np\nimport pytest\nimport warnings\n\nfrom typing import Any, Callable, Dict\n\nfrom plasmapy.tests.helpers.exceptions import (\n InvalidTestError,\n MissingExceptionFail,\n MissingWarningFail,\n TypeMismatchFail,\n UnexpectedExceptionFail,\n UnexpectedResultFail,\n)\nfrom plasmapy.utils.code_repr import _name_with_article, _object_name, call_string\nfrom plasmapy.utils.exceptions import PlasmaPyWarning\n\n\ndef _process_input(wrapped_function: Callable): # coverage: ignore\n \"\"\"\n Allow `run_test` to take a single positional argument that is a\n `list` or `tuple` in lieu of using multiple positional/keyword\n arguments as usual. If `len` of this argument returns `3`, then\n it assumes that `kwargs` is an empty `dict` and that the expected\n result/outcome is the last item.\n \"\"\"\n\n def decorator(wrapped_function: Callable):\n wrapped_signature = inspect.signature(wrapped_function)\n\n @functools.wraps(wrapped_function)\n def wrapper(*args, **kwargs):\n arguments = wrapped_signature.bind(*args, **kwargs).arguments\n if (\n len(args) == 1\n and len(kwargs) == 0\n and isinstance(args[0], (list, tuple))\n ):\n inputs = args[0]\n if len(inputs) not in (3, 4):\n raise RuntimeError(f\"{args} is an invalid input to run_test.\")\n new_kwargs = {\"func\": inputs[0], \"args\": inputs[1]}\n new_kwargs[\"kwargs\"] = inputs[2] if len(inputs) == 4 else {}\n new_kwargs[\"expected_outcome\"] = (\n inputs[3] if len(inputs) == 4 else inputs[2]\n )\n else:\n new_kwargs = {argname: argval for argname, argval in arguments.items()}\n return wrapped_function(**new_kwargs)\n\n return wrapper\n\n return decorator(wrapped_function)\n\n\n@_process_input\ndef run_test(\n func,\n args: Any = (),\n kwargs: Dict = None,\n expected_outcome: Any = None,\n rtol: float = 0.0,\n atol: float = 0.0,\n): # coverage: ignore\n \"\"\"\n Test that a function or class returns the expected result, raises\n the expected exception, or issues an expected warning for the\n supplied positional and keyword arguments.\n\n Parameters\n ----------\n func: callable, list, or tuple\n The `callable` to be tested. The first (and sole) argument to\n `~plasmapy.utils.run_test` may alternatively be a list or tuple\n containing these arguments (optionally omitting `kwargs` if the\n `len` returns 3).\n\n args: tuple or object\n The positional arguments to `func`.\n\n kwargs: dict\n The keyword arguments to `func`.\n\n expected_outcome: object\n The expected result, exception, or warning from\n `func(*args, **kwargs)`. This may also be a `tuple` of length\n two that contains the expected result as the first item and the\n expected warning as the second item.\n\n rtol : float\n The relative tolerance to be used by `~numpy.allclose` in an\n element-wise comparison, defaulting to `0`.\n\n atol : float\n The absolute tolerance to be used by `~numpy.allclose` in an\n element-wise comparison, defaulting to `0`.\n\n Returns\n -------\n `None`\n\n Raises\n ------\n ~plasmapy.tests.helpers.exceptions.UnexpectedResultFail\n If the test returns a result that is different from the expected\n result.\n\n ~plasmapy.tests.helpers.exceptions.TypeMismatchFail\n If the actual result is of a different type than the expected\n result.\n\n ~plasmapy.tests.helpers.exceptions.UnexpectedExceptionFail\n If an exception occurs when no exception or a different\n exception is expected.\n\n ~plasmapy.tests.helpers.exceptions.MissingExceptionFail\n If no exception is raised when an exception is expected.\n\n ~plasmapy.tests.helpers.exceptions.MissingWarningFail\n An expected warning is not issued.\n\n ~astropy.units.UnitsError\n If the result has different units than expected.\n\n TypeError\n If the equality of the actual result and expected result cannot\n be determined (e.g., for a class lacking an `__eq__` method.\n\n Examples\n --------\n The simplest way to use `~plasmapy.utils.run_test` is with inputs\n for the function to be tests, the positional arguments in a `tuple`\n or `list`, the keyword arguments in a `dict`, and then finally the\n expected result or outcome.\n\n >>> args = tuple()\n >>> kwargs = dict()\n >>> run_test(lambda: 0, args, kwargs, 0)\n\n If `expected` is a an exception or warning, then\n `~plasmapy.utils.pytest_helpers.run_test` will raise an exception if\n the expected exception is not raised or the expected warning is not\n issued.\n\n >>> from warnings import warn\n\n >>> issue_warning = lambda: warn(\"Electrons are weird!\", UserWarning)\n >>> run_test(issue_warning, args, kwargs, UserWarning)\n\n >>> def raise_exception(): raise RuntimeError\n >>> run_test(raise_exception, args, kwargs, RuntimeError)\n\n For warnings, `~plasmapy.utils.run_test` can accept a `tuple` of two\n items where the first item is the expected result and the second\n item is the expected warning.\n\n .. code-block:: python\n\n def return_arg_and_warn(x):\n warn(\"\", UserWarning)\n return x\n\n run_test(return_arg_and_warn, 1, {}, (1, UserWarning))\n\n This function is also flexible enough that it can accept a `tuple`\n or `list` as its sole argument, with the arguments in the same\n order as in the function signature.\n\n >>> return_arg = lambda x: x\n >>> inputs = (return_arg, 42, {}, 42)\n >>> run_test(inputs)\n\n If the `tuple` or `list` has a length of `3`, then\n `~plasmapy.utils.run_test` assumes that `kwargs` is missing.\n\n >>> inputs_without_kwargs = [return_arg, 42, 42]\n >>> run_test(inputs_without_kwargs)\n\n .. code-block:: python\n\n import pytest\n\n def func(x, raise_exception=False, issue_warning=False):\n if raise_exception:\n raise ValueError(\"I'm sorry, Dave. I'm afraid I can't do that.\")\n elif issue_warning:\n warn(\"Open the pod bay doors, HAL.\", UserWarning)\n return x\n\n inputs_table = [\n (func, 1, 1),\n (func, (2,), {}, 2),\n (func, 3, {'raise_exception': True}, ValueError),\n (func, 4, {'issue_warning': True}, UserWarning),\n (func, 5, {'issue_warning': True}, (5, UserWarning)),\n ]\n\n @pytest.mark.parametrize('inputs', inputs_table)\n def test_func(inputs):\n run_test(inputs)\n\n \"\"\"\n\n if kwargs is None:\n kwargs = {}\n\n if not type(args) in [tuple, list]:\n args = (args,)\n\n if not callable(func):\n raise InvalidTestError(\n f\"The argument func = {func} to run_test must be callable.\"\n )\n\n # By including the function call that is run during a test in error\n # messages, we can make it easier to reproduce the error in an\n # interactive session.\n\n call_str = call_string(func, args, kwargs)\n\n # There are many possibilities for expected outcomes that we must\n # keep track of, including exceptions being raised and warnings\n # being issued.\n\n expected = collections.defaultdict(lambda: None)\n\n if inspect.isclass(expected_outcome):\n subclass_of_Exception = issubclass(expected_outcome, Exception)\n subclass_of_Warning = issubclass(expected_outcome, Warning)\n if subclass_of_Warning:\n expected[\"warning\"] = expected_outcome\n elif subclass_of_Exception and not subclass_of_Warning:\n expected[\"exception\"] = expected_outcome\n\n # If a warning is issued, then there may also be an expected result.\n\n if isinstance(expected_outcome, tuple):\n length_not_two = len(expected_outcome) != 2\n is_not_class = not inspect.isclass(expected_outcome[1])\n is_not_warning = (\n True if is_not_class else not issubclass(expected_outcome[1], Warning)\n )\n if length_not_two or is_not_warning:\n raise InvalidTestError(\"Invalid expected outcome in run_test.\")\n expected[\"result\"] = expected_outcome[0]\n expected[\"warning\"] = expected_outcome[1]\n\n if expected[\"exception\"] is None and expected[\"warning\"] is None:\n expected[\"result\"] = expected_outcome\n\n # First we go through all of the possibilities for when an exception\n # is expected to be raised. If no exception is raised, then we want\n # an error message that includes the result. If the wrong exception\n # is raised, then we want an error message that includes that\n # exception. An alternative would be to use `with pytest.raises()`\n # but this makes it easier to break down what the error messages\n # should be.\n\n if expected[\"exception\"]:\n\n expected_exception = expected[\"exception\"]\n\n try:\n result = func(*args, **kwargs)\n except expected_exception as exc_result:\n resulting_exception = exc_result.__reduce__()[0]\n if resulting_exception.__name__ == expected_exception.__name__:\n return None\n else:\n raise UnexpectedExceptionFail(\n f\"The command {call_str} did not specifically raise \"\n f\"{_name_with_article(expected_exception)} as expected, but \"\n f\"instead raised {_name_with_article(resulting_exception)} \"\n f\"which is a subclass of the expected exception.\"\n )\n except Exception as exc_unexpected_exception:\n unexpected_exception = exc_unexpected_exception.__reduce__()[0]\n raise UnexpectedExceptionFail(\n f\"The command {call_str} did not raise \"\n f\"{_name_with_article(expected_exception)} as expected, \"\n f\"but instead raised {_name_with_article(unexpected_exception)}.\"\n ) from exc_unexpected_exception\n else:\n raise MissingExceptionFail(\n f\"The command {call_str} did not raise \"\n f\"{_name_with_article(expected_exception)} as expected, but instead \"\n f\"returned {_object_name(result)}.\"\n )\n\n try:\n with pytest.warns(expected[\"warning\"]):\n result = func(*args, **kwargs)\n except pytest.raises.Exception as missing_warning:\n raise MissingWarningFail(\n f\"The command {call_str} should issue \"\n f\"{_name_with_article(expected['warning'])}, but instead returned \"\n f\"{_object_name(result)}.\"\n ) from missing_warning\n except Exception as exception_no_warning:\n raise UnexpectedExceptionFail(\n f\"The command {call_str} unexpectedly raised \"\n f\"{_name_with_article(exception_no_warning.__reduce__()[0])} \"\n f\"instead of returning the expected value of \"\n f\"{_object_name(expected['result'])}.\"\n ) from exception_no_warning\n\n if isinstance(expected[\"result\"], u.UnitBase):\n\n if isinstance(result, u.UnitBase):\n if result != expected[\"result\"]:\n raise u.UnitsError(\n f\"The command {call_str} returned \"\n f\"{_object_name(result)} instead of the expected \"\n f\"value of {_object_name(expected['result'])}.\"\n )\n return None\n\n if not isinstance(result, (u.Quantity, const.Constant, const.EMConstant)):\n raise u.UnitsError(\n f\"The command {call_str} returned \"\n f\"{_object_name(result)} instead of a quantity or \"\n f\"constant with units of \"\n f\"{_object_name(expected['result'])}.\"\n )\n\n if result.unit != expected[\"result\"]:\n raise u.UnitsError(\n f\"The command {call_str} returned \"\n f\"{_object_name(result)}, which has units of \"\n f\"{result.unit} instead of the expected units of \"\n f\"{_object_name(expected['result'])}.\"\n )\n\n return None\n\n if isinstance(expected[\"result\"], (u.Quantity, const.Constant, const.EMConstant)):\n if not result.unit == expected[\"result\"].unit:\n raise u.UnitsError(\n f\"The command {call_str} returned \"\n f\"{_object_name(result)} which has different units \"\n f\"than the expected result of \"\n f\"{_object_name(expected['result'])}.\"\n )\n\n if np.allclose(result.value, expected[\"result\"].value):\n return None\n\n if expected[\"result\"] is None:\n return None\n\n if type(result) != type(expected[\"result\"]):\n raise TypeMismatchFail(\n f\"The command {call_str} returned \"\n f\"{_object_name(result)} which has type \"\n f\"{_object_name(type(result))}, \"\n f\"instead of the expected value of \"\n f\"{_object_name(expected['result'])} which has type \"\n f\"{_object_name(type(expected['result']))}.\"\n )\n\n try:\n if result == expected[\"result\"]:\n return None\n except Exception as exc_equality: # coverage: ignore\n raise TypeError(\n f\"The equality of {_object_name(result)} and \"\n f\"{_object_name(expected['result'])} \"\n f\"cannot be evaluated.\"\n ) from exc_equality\n\n try:\n different_length = len(result) != len(expected[\"result\"])\n except Exception:\n different_length = False\n\n try:\n all_close = np.allclose(expected[\"result\"], result, rtol=rtol, atol=atol)\n if all_close and not different_length:\n return None\n except Exception:\n pass\n\n errmsg = (\n f\"The command {call_str} returned \"\n f\"{_object_name(result)} instead of the expected \"\n f\"value of {_object_name(expected['result'])}.\"\n )\n\n if atol or rtol:\n errmsg += \" with \"\n if atol:\n errmsg += f\"atol = {atol}\"\n if atol and rtol:\n errmsg += \" and \"\n if rtol:\n errmsg += f\"rtol = {rtol}\"\n errmsg += \".\"\n\n raise UnexpectedResultFail(errmsg)\n\n\ndef run_test_equivalent_calls(*test_inputs, require_same_type: bool = True):\n \"\"\"\n Test that different functions/inputs return equivalent results.\n\n Parameters\n ----------\n test_inputs\n The functions and inputs to the tests in an allowed format, as\n described below.\n\n require_same_type: bool\n If `True` (the default), then all of the results are required to\n be of the same type. If `False`, results do not need to be of\n the same type (e.g., cases like `1.0 == 1` will not raise an\n exception).\n\n Raises\n ------\n ~plasmapy.tests.helpers.exceptions.UnexpectedResultFail\n If not all of the results are equivalent, or not all of the\n results are of the same type and `require_same_type` evaluates\n to `True`.\n\n ~plasmapy.tests.helpers.exceptions.UnexpectedExceptionFail\n If an exception is raised whilst attempting to run one of the\n test cases.\n\n ~plasmapy.tests.helpers.exceptions.InvalidTestError\n If there is an error associated with the inputs or the test is\n set up incorrectly.\n\n Examples\n --------\n There are several possible formats that can be accepted by this\n `~plasmapy.utils.run_test_equivalent_calls` to test that different\n combinations of functions (or other `callable` objects), positional\n arguments, and keyword arguments return equivalent results.\n\n To test a single function that takes a single positional argument,\n then `test_inputs` may be the function followed by an arbitrary\n number of positional arguments to be included into the function.\n\n >>> def f(x): return x ** 2\n >>> run_test_equivalent_calls(f, -1, 1)\n\n To test a single function with an arbitrary number of positional and\n keyword arguments, the first argument should be the function,\n followed by an arbitrary number of `tuple` or `list` objects that\n contain a `tuple` or `list` containing the positional arguments, and\n a `dict` containing the keyword arguments.\n\n >>> def g(x, y, z): return x + y + z\n >>> run_test_equivalent_calls(g, ((1, 2, 3), {}), ((3, 2), {'z': 1}))\n\n If there is only one positional argument, then it is not necessary\n to include it in a `tuple` or `list`.\n\n >>> run_test_equivalent_calls(f, ([1], {}), ([1], {}))\n >>> run_test_equivalent_calls(f, (1, {}), (1, {}))\n\n To test multiple functions with an arbitrary number of positional\n and keyword arguments, use a series of `tuple` or `list` objects\n that contain the function for each test, a `tuple` or `list` with\n the positional arguments, and a `dict` with the keyword arguments.\n\n >>> def p(x, y=None): return x + y if y else x\n >>> def q(x, y=None): return x + 1 if y else x\n\n >>> run_test_equivalent_calls([p, (1,), {'y': 1}], [q, (2,), {'y': False}])\n\n The inputs may also be passed in as a whole as a `tuple` or `list`.\n\n >>> run_test_equivalent_calls(f, -1, 1)\n >>> run_test_equivalent_calls([f, -1, 1])\n\n If `require_same_type` is `False`, then an exception will not be\n raised if the results are of different types.\n\n >>> run_test_equivalent_calls(f, -1, 1.0, require_same_type=False)\n\n \"\"\"\n\n if len(test_inputs) == 1:\n test_inputs = test_inputs[0]\n\n if not isinstance(test_inputs, (tuple, list)):\n raise InvalidTestError(\n f\"The argument to run_test_equivalent_calls must be a tuple \"\n f\"or list. The provided inputs are: {test_inputs}\"\n )\n\n if callable(test_inputs[0]):\n func = test_inputs[0]\n test_inputs = test_inputs[1:]\n else:\n func = None\n\n # Make sure everything is a list to allow f(*args)\n\n test_inputs = [\n test_input if isinstance(test_input, (list, tuple)) else [test_input]\n for test_input in test_inputs\n ]\n\n # Construct a list of dicts, of which each dict contains the\n # function, positional arguments, and keyword arguments for each\n # test case.\n\n test_cases = []\n\n for inputs in test_inputs:\n test_case = {}\n\n test_case[\"function\"] = func if func else inputs[0]\n test_case[\"args\"] = inputs[0] if func else inputs[1]\n\n if not isinstance(test_case[\"args\"], (list, tuple)):\n test_case[\"args\"] = [test_case[\"args\"]]\n\n if func:\n test_case[\"kwargs\"] = inputs[1] if len(inputs) == 2 else {}\n else:\n test_case[\"kwargs\"] = inputs[2] if len(inputs) == 3 else {}\n\n try:\n test_case[\"call string\"] = call_string(\n test_case[\"function\"], test_case[\"args\"], test_case[\"kwargs\"]\n )\n except Exception:\n test_case[\"call string\"] = (\n f\"function = {test_case['function']}, \"\n f\"args = {test_case['args']}, and \"\n f\"kwargs = {test_case['kwargs']}\"\n )\n\n test_cases.append(test_case)\n\n if len(test_cases) < 2:\n raise InvalidTestError(\n \"At least two tests are needed for run_test_equivalent_calls\"\n )\n\n # Check to make sure that each function is callable, each set of\n # args is a list or tuple, and each set of kwargs is a dict. Make\n # sure that the error message contains all of the problems.\n\n bad_inputs_errmsg = \"\"\n\n for test_case in test_cases:\n if not callable(test_case[\"function\"]):\n bad_inputs_errmsg += f\"\\n{test_case['function']} is not callable \"\n if not isinstance(test_case[\"args\"], (tuple, list)):\n bad_inputs_errmsg += f\"\\n{test_case['args']} is not a list or tuple \"\n if not isinstance(test_case[\"kwargs\"], dict):\n bad_inputs_errmsg += f\"\\n{test_case['kwargs']} is not a dict \"\n\n if bad_inputs_errmsg:\n raise InvalidTestError(bad_inputs_errmsg)\n\n # Now we can get the results for each test case.\n\n for test_case in test_cases:\n try:\n f, args, kwargs = (\n test_case[\"function\"],\n test_case[\"args\"],\n test_case[\"kwargs\"],\n )\n test_case[\"result\"] = f(*args, **kwargs)\n test_case[\"type\"] = type(test_case[\"result\"])\n except Exception as exc:\n raise UnexpectedExceptionFail(\n f\"Unable to evaluate {test_case['call string']}.\"\n )\n\n # Make sure that all of the results evaluate as equal to the first\n # result.\n\n results = [test_case[\"result\"] for test_case in test_cases]\n types = [test_case[\"type\"] for test_case in test_cases]\n\n try:\n equals_first_result = [result == results[0] for result in results]\n except Exception as exc: # coverage: ignore\n raise UnexpectedExceptionFail(\n \"Unable to determine equality properties of results.\"\n ) from exc\n\n equals_first_type = [result_type == types[0] for result_type in types]\n\n not_all_equal = not all(equals_first_result)\n not_all_same_type = not all(equals_first_type)\n\n if not_all_equal:\n errmsg = \"The following tests did not all produce identical results:\"\n elif not_all_same_type and require_same_type:\n errmsg = \"The following tests did not all produce results of the same type:\"\n\n if not_all_equal or (not_all_same_type and require_same_type):\n\n for test_case in test_cases:\n errmsg += (\n f\"\\n {test_case['call string']} yielded {test_case['result']} \"\n f\"of type {test_case['type']}\"\n )\n\n raise UnexpectedResultFail(errmsg)\n\n\ndef assert_can_handle_nparray(\n function_to_test,\n insert_some_nans=None,\n insert_all_nans=None,\n kwargs=None,\n):\n \"\"\"\n Test for ability to handle numpy array quantities.\n\n Parameters\n ----------\n function_to_test\n The function to be tested for ability to handle numpy array quantities.\n Arguments are automatically given a vector input based on their\n variable name. Current args that are interpreted as vectors are:\n `[\"T\", \"T_i\", \"T_e\", \"temperature\"]`\n `[\"n\", \"n_i\", \"n_e\", \"density\"]`\n `[\"B\"]`\n `[\"V\", \"Vperp\"]`\n `[\"coulomb_log\"]`\n `[\"characteristic_length\"]`\n\n insert_some_nans: `list`\n List of argument names in which to insert some np.nan values.\n These must be arguments that will be tested as vectors as listed\n above.\n\n insert_all_nans: `list`\n List of argument names to fill entirely with np.nan values.\n\n kwargs: `dict`\n Arguments to pass directly to the function in under test, in the\n normal kwargs python dictionary format.\n\n Raises\n ------\n ValueError\n If this function cannot interpret a parameter of function_to_test.\n\n Examples\n --------\n >>> from plasmapy.formulary.parameters import Alfven_speed, gyrofrequency\n >>> assert_can_handle_nparray(Alfven_speed)\n >>> assert_can_handle_nparray(gyrofrequency, kwargs={\"signed\": True})\n >>> assert_can_handle_nparray(gyrofrequency, kwargs={\"signed\": False})\n \"\"\"\n\n if insert_some_nans is None:\n insert_some_nans = []\n\n if insert_all_nans is None:\n insert_all_nans = []\n\n if kwargs is None:\n kwargs = {}\n\n def _prepare_input(\n param_name, param_default, insert_some_nans, insert_all_nans, kwargs\n ):\n \"\"\"\n Parse parameter names and set up values to input for 0d, 1d, and 2d array tests.\n \"\"\"\n # first things first: let any passed in kwarg right through (VIP access)\n if param_name in kwargs.keys():\n return (kwargs[param_name],) * 4\n\n # else, if it's a recognized variable name, give it a reasonable unit and magnitude\n elif param_name in [\"particle\", \"ion_particle\", \"ion\"]:\n if not (param_default is inspect._empty or param_default is None):\n return (param_default,) * 4\n else:\n return (\"p\",) * 4\n elif param_name == \"particles\" or param_name == \"species\":\n if not (param_default is inspect._empty):\n return (param_default,) * 4\n else:\n return ((\"e\", \"p\"),) * 4\n elif param_name in [\"T\", \"T_i\", \"T_e\", \"temperature\"]:\n unit = u.eV\n magnitude = 1.0\n elif param_name in [\"n\", \"n_i\", \"n_e\", \"density\"]:\n unit = u.m ** -3\n magnitude = 1e20\n elif param_name == \"B\":\n unit = u.G\n magnitude = 1e3\n elif param_name in [\"V\", \"Vperp\"]:\n unit = u.m / u.s\n magnitude = 1e5\n elif param_name == \"coulomb_log\":\n unit = 1.0\n magnitude = 1e1\n elif param_name == \"characteristic_length\":\n unit = u.m\n magnitude = 1.0\n elif param_name == \"k\":\n unit = u.m ** -1\n magnitude = 1.0\n\n # else, last resort, if it has a default argument, go with that:\n elif not (param_default is inspect._empty):\n return (param_default,) * 4\n\n else:\n raise ValueError(f\"Unrecognized function input: {param_name}\")\n\n # now knowing unit and magnitude, set up the 0d, 1d, 2d, and 3d arrays:\n input_data_3d = np.reshape(np.arange(1.0, 9.0, 1.0), (2, 2, 2))\n input_data_2d = np.reshape(np.arange(1.0, 5.0, 1.0), (2, 2))\n input_data_1d = np.arange(1.0, 5.0, 1.0)\n if param_name in insert_some_nans:\n input_data_3d[0, 0, 1] = np.nan\n input_data_3d[0, 1, 0] = np.nan\n input_data_2d[0, 1] = np.nan\n input_data_2d[1, 0] = np.nan\n input_data_1d[1] = np.nan\n elif param_name in insert_all_nans:\n input_data_3d = np.ones((2, 2, 2)) * np.nan\n input_data_2d = np.ones((2, 2)) * np.nan\n input_data_1d = np.ones(4) * np.nan\n input_data_3d *= magnitude\n input_data_3d *= unit\n input_data_2d *= magnitude\n input_data_2d *= unit\n input_data_1d *= magnitude\n input_data_1d *= unit\n input_data_0d = input_data_1d[3]\n return input_data_0d, input_data_1d, input_data_2d, input_data_3d\n\n # call _prepare_input to prepare 0d, 1d, and 2d sets of arguments for the function:\n function_sig = inspect.signature(function_to_test)\n function_params = function_sig.parameters\n args_0d = dict()\n args_1d = dict()\n args_2d = dict()\n args_3d = dict()\n param_names = [elm for elm in function_params.keys()]\n for idx, key in enumerate(function_params):\n args_0d[key], args_1d[key], args_2d[key], args_3d[key] = _prepare_input(\n param_names[idx],\n function_params[key].default,\n insert_some_nans,\n insert_all_nans,\n kwargs,\n )\n\n # call the function with the prepared argument sets:\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=PlasmaPyWarning)\n result_0d = function_to_test(**args_0d)\n result_1d = function_to_test(**args_1d)\n result_2d = function_to_test(**args_2d)\n result_3d = function_to_test(**args_3d)\n\n # assert that the 1d, 2d, 3d versions get the same result (elementwise) as the 0d version:\n # (if the function returns multiple values, loop through and test each)\n try:\n scalar_testable = result_0d.value\n except AttributeError:\n scalar_testable = result_0d\n if np.isscalar(scalar_testable):\n astrohelper.assert_quantity_allclose(result_0d, result_1d[3])\n astrohelper.assert_quantity_allclose(result_0d, result_2d[1, 1])\n astrohelper.assert_quantity_allclose(result_0d, result_3d[0, 1, 1])\n else:\n for idx, res_0d in enumerate(result_0d):\n astrohelper.assert_quantity_allclose(res_0d, result_1d[idx][3])\n astrohelper.assert_quantity_allclose(res_0d, result_2d[idx][1, 1])\n astrohelper.assert_quantity_allclose(res_0d, result_3d[idx][0, 1, 1])\n", "path": "plasmapy/utils/pytest_helpers/pytest_helpers.py" } ]
diff --git a/changelog/1369.trivial.rst b/changelog/1369.trivial.rst new file mode 100644 index 0000000000..be90a5f509 --- /dev/null +++ b/changelog/1369.trivial.rst @@ -0,0 +1 @@ +Refactored tests in `plasmapy.particles`. diff --git a/plasmapy/particles/tests/test_atomic.py b/plasmapy/particles/tests/test_atomic.py index 576b99b9e5..dae299f780 100644 --- a/plasmapy/particles/tests/test_atomic.py +++ b/plasmapy/particles/tests/test_atomic.py @@ -3,6 +3,7 @@ from astropy import constants as const from astropy import units as u +from astropy.tests.helper import assert_quantity_allclose from plasmapy.particles.exceptions import ( ChargeError, @@ -45,345 +46,207 @@ # The following lists (with the name of a function -atomic_symbol_table = [ - [1, "H"], - [1, "H"], - ["H", "H"], - ["p", "H"], - ["T", "H"], - ["deuterium", "H"], - ["deuteron", "H"], - ["Tritium", "H"], - ["triton", "H"], - ["H-2", "H"], - ["D", "H"], - ["T", "H"], - ["H-3", "H"], - ["Hydrogen-3", "H"], - ["helium", "He"], - [2, "He"], - ["alpha", "He"], - ["gold", "Au"], - ["Gold", "Au"], - [79, "Au"], - ["79", "Au"], - ["P", "P"], - [118, "Og"], - ["N-14", "N"], - ["N", "N"], - ["H +1", "H"], - ["H 1+", "H"], - ["hydrogen 1+", "H"], - ["deuterium 1+", "H"], - ["Fe 24+", "Fe"], - ["Fe +24", "Fe"], - ["Fe 2-", "Fe"], - ["Fe -2", "Fe"], - ["Fe+", "Fe"], - ["Fe++", "Fe"], - ["Fe-", "Fe"], - ["Fe++++++++++++++", "Fe"], - ["H-0", InvalidParticleError], - [3.14159, TypeError], - ["Og-294b", InvalidParticleError], - ["H-934361079326356530741942970523610389", InvalidParticleError], - ["Fe 2+4", InvalidParticleError], - ["Fe+24", InvalidParticleError], - ["Fe +59", InvalidParticleError], - ["C++++++++++++++++", InvalidParticleError], - ["C-++++", InvalidParticleError], - ["neutron", InvalidElementError], - ["n", InvalidElementError], - ["n-1", InvalidElementError], - ["h", InvalidParticleError], - ["d", InvalidParticleError], - ["he", InvalidParticleError], - ["au", InvalidParticleError], - ["p-", InvalidElementError], - [0, InvalidParticleError], - [119, InvalidParticleError], - ["antiproton", InvalidElementError], -] - -isotope_symbol_table = [ - [("He", 4), "He-4"], - [("helium-4",), "He-4"], - [("H-2",), "D"], - [("Deuterium",), "D"], - [("deuterium",), "D"], - [("deuteron",), "D"], - [("tritium",), "T"], - [("triton",), "T"], - [("Hydrogen-3",), "T"], - [("hydrogen-3",), "T"], - [("H-3",), "T"], - [(1, 2), "D"], - [("Hydrogen", 3), "T"], - [("tritium",), "T"], - [("H", 2), "D"], - [("Alpha",), "He-4"], - [("alpha",), "He-4"], - [(79, 197), "Au-197"], - [("p",), "H-1"], - [("beryllium-8",), "Be-8"], - [("N-13",), "N-13"], - [("p",), "H-1"], - [("proton",), "H-1"], - [("protium",), "H-1"], - [("N-13 2+",), "N-13"], - [("Hydrogen-3 +1",), "T"], - ["Md-260", {"mass_numb": 261}, InvalidParticleError], - ["protium", {"mass_numb": 2}, InvalidParticleError], - ["alpha", {"mass_numb": 3}, InvalidParticleError], - ["O-18", {"mass_numb": 19}, InvalidParticleError], - ["lead-209", {"mass_numb": 511}, InvalidParticleError], - ["He-1", {}, InvalidParticleError], - [24, {"mass_numb": 23}, InvalidParticleError], - ["H", {"mass_numb": 0}, InvalidParticleError], - ["H-1", {"mass_numb": 2}, InvalidParticleError], - ["P", {}, InvalidIsotopeError], - [1, {}, InvalidIsotopeError], - [4, {}, InvalidIsotopeError], - ["hydrogen-444444", {}, InvalidParticleError], - ["Fe", {"mass_numb": 2.1}, TypeError], - ["He", {"mass_numb": "c"}, TypeError], - ["He-3", {"mass_numb": 4}, InvalidParticleError], - ["D", {"mass_numb": 3}, InvalidParticleError], - ["T", {"mass_numb": 2}, InvalidParticleError], - ["Fe", {"mass_numb": None}, InvalidIsotopeError], - ["He", {"mass_numb": 99}, InvalidParticleError], - ["d", {}, InvalidParticleError], - ["h-3", {}, InvalidParticleError], - ["h", {}, InvalidParticleError], - ["d+", {}, InvalidParticleError], - ["H-1", {"mass_numb": 1}, ParticleWarning], - ["H-2", {"mass_numb": 2}, ParticleWarning], - ["T", {"mass_numb": 3}, ParticleWarning], - ["Li-6", {"mass_numb": 6}, ParticleWarning], - ["lithium-6", {"mass_numb": 6}, ParticleWarning], - ["alpha", {"mass_numb": 4}, ParticleWarning], - ["p", {"mass_numb": 1}, ParticleWarning], -] - -atomic_number_table = [ - ["H", 1], - ["D", 1], - ["deuterium", 1], - ["Deuterium", 1], - ["tritium", 1], - ["p", 1], - ["P", 15], - ["Alpha", 2], - ["C-12", 6], - ["Argon", 18], - ["protium", 1], - ["H-3", 1], - ["p+", 1], - ["Be-8", 4], - ["N", 7], - ["N 2+", 7], - ["N +1", 7], - ["N+++", 7], - ["H-3934", InvalidParticleError], - ["C-12b", InvalidParticleError], - [-1.5, TypeError], - ["n", InvalidElementError], - ["n-1", InvalidElementError], - ["neutron", InvalidElementError], - ["Neutron", InvalidElementError], - ["d", InvalidParticleError], - ["t", InvalidParticleError], - ["s-36", InvalidParticleError], -] - -mass_number_table = [ - ["helium-3", 3], - ["Au-197", 197], - ["deuterium", 2], - ["D", 2], - ["H-2", 2], - ["tritium", 3], - ["T", 3], - ["alpha", 4], - ["p", 1], - ["Be-8", 8], - ["N-13", 13], - ["N-13 2+", 13], - ["N-13 +2", 13], - ["N-13+++", 13], - ["H-359", InvalidParticleError], - ["C-12b", InvalidParticleError], - [-1.5, TypeError], - ["N-13+-+-", InvalidParticleError], - ["h-3", InvalidParticleError], - ["n", InvalidIsotopeError], - ["n-1", InvalidIsotopeError], -] - - -element_name_table = [ - ["D", "hydrogen"], - ["deuterium", "hydrogen"], - ["Au", "gold"], - ["alpha", "helium"], - ["helium-4", "helium"], - ["H-2", "hydrogen"], - ["Deuterium", "hydrogen"], - ["Hydrogen-3", "hydrogen"], - ["hydrogen-3", "hydrogen"], - ["H-3", "hydrogen"], - ["tritium", "hydrogen"], - ["Alpha", "helium"], - ["alpha", "helium"], - [1, "hydrogen"], - [26, "iron"], - [79, "gold"], - ["p", "hydrogen"], - ["P", "phosphorus"], - ["Be-8", "beryllium"], - ["Li-7", "lithium"], - ["N", "nitrogen"], - ["N+++", "nitrogen"], - ["D-", "hydrogen"], - ["vegancupcakes", InvalidParticleError], - ["C-+-", InvalidParticleError], - [1.24, TypeError], - ["n", InvalidElementError], - ["neutron", InvalidElementError], - [0, InvalidParticleError], - ["H++", InvalidParticleError], - ["t", InvalidParticleError], - ["pb", InvalidParticleError], - ["d", InvalidParticleError], - ["h-3", InvalidParticleError], - ["Pb-9", InvalidParticleError], - ["H 2+", InvalidParticleError], -] - -standard_atomic_weight_table = [ - ["H", (1.008 * u.u).to(u.kg)], - [1, (1.008 * u.u).to(u.kg)], - ["Hydrogen", (1.008 * u.u).to(u.kg)], - ["Au", u.kg], - ["H-1", ParticleError], - ["help i'm trapped in a unit test", InvalidParticleError], - [1.1, TypeError], - ["n", InvalidElementError], - ["p", ParticleError], - ["alpha", ParticleError], - ["deuteron", ParticleError], - ["tritium", ParticleError], - ["Au+", ParticleError], - ["Fe -2", ParticleError], - ["Og 2+", ParticleError], - ["h", InvalidParticleError], - ["fe", InvalidParticleError], -] - -particle_mass_table = [ - ["proton", const.m_p], - ["H-1+", const.m_p], - ["H-1 +1", const.m_p], - ["H-1 1+", const.m_p], - ["H-1", {"Z": 1}, const.m_p], - ["hydrogen-1", {"Z": 1}, const.m_p], - ["p+", const.m_p], - ["F-19", {"Z": 3}, u.kg], - ["Og 1+", {}, MissingParticleDataError], - ["Fe-56", {"Z": 1.4}, TypeError], - ["H-1 +1", {"Z": 0}, InvalidParticleError], - [26, {"Z": 1, "mass_numb": "a"}, TypeError], - [26, {"Z": 27, "mass_numb": 56}, InvalidParticleError], - ["Og", {"Z": 1}, MissingParticleDataError], - ["Og", {"mass_numb": 696, "Z": 1}, InvalidParticleError], - ["He 1+", {"mass_numb": 99}, InvalidParticleError], - ["fe-56 1+", {}, InvalidParticleError], - ["H-1", {"mass_numb": 1, "Z": 1}, ParticleWarning], - ["H", standard_atomic_weight("H")], -] - -is_stable_table = [ - ["H-1", True], - [(1, 1), True], - ["N-14", True], - [("N", 14), True], - ["P-31", True], - [("P", 31), True], - ["p", True], - ["alpha", True], - ["Xe-124", True], - ["Fe", {"mass_numb": 56}, True], - ["Fe-56", True], - ["iron-56", True], - ["Iron-56", True], - [(26, 56), True], - ["Be-8", False], - ["U-235", False], - ["uranium-235", False], - ["T", False], - [(4, 8), False], - ["tritium", False], - ["Pb-209", False], - ["lead-209", False], - ["Lead-209", False], - ["Pb", {"mass_numb": 209}, False], - [(82, 209), False], - [("hydrogen-444444",), InvalidParticleError], - [("hydrogen", 0), InvalidParticleError], - [("",), InvalidParticleError], - [("pb-209",), InvalidParticleError], - [("h",), InvalidParticleError], - [("He",), InvalidIsotopeError], - [("B",), InvalidIsotopeError], -] - -charge_number_table = [ - ["H+", 1], - ["D +1", 1], - ["tritium 1+", 1], - ["H-", -1], - ["Fe -2", -2], - ["Fe 2-", -2], - ["N--", -2], - ["N++", 2], - ["alpha", 2], - ["proton", 1], - ["deuteron", 1], - ["triton", 1], - ["electron", -1], - ["e-", -1], - ["e+", 1], - ["positron", 1], - ["n", 0], - ["neutron", 0], - ["p-", -1], - ["antiproton", -1], - ["fads", InvalidParticleError], - ["H++", InvalidParticleError], - ["h+", InvalidParticleError], - ["fe 1+", InvalidParticleError], - ["d+", InvalidParticleError], - ["Fe 29+", InvalidParticleError], - ["H-1", ChargeError], - ["H---", ParticleWarning], - ["Fe -26", ParticleWarning], - ["Og 10-", ParticleWarning], +table_functions_args_kwargs_output = [ + [ + atomic_symbol, + [ + 1, + ], + {}, + "H", + ], + [atomic_symbol, [1], {}, "H"], + [atomic_symbol, ["H"], {}, "H"], + [atomic_symbol, ["p"], {}, "H"], + [atomic_symbol, ["T"], {}, "H"], + [atomic_symbol, ["deuterium"], {}, "H"], + [atomic_symbol, ["deuteron"], {}, "H"], + [atomic_symbol, ["Tritium"], {}, "H"], + [atomic_symbol, ["triton"], {}, "H"], + [atomic_symbol, ["H-2"], {}, "H"], + [atomic_symbol, ["D"], {}, "H"], + [atomic_symbol, ["T"], {}, "H"], + [atomic_symbol, ["H-3"], {}, "H"], + [atomic_symbol, ["Hydrogen-3"], {}, "H"], + [atomic_symbol, ["helium"], {}, "He"], + [atomic_symbol, [2], {}, "He"], + [atomic_symbol, ["alpha"], {}, "He"], + [atomic_symbol, ["gold"], {}, "Au"], + [atomic_symbol, ["Gold"], {}, "Au"], + [atomic_symbol, [79], {}, "Au"], + [atomic_symbol, ["79"], {}, "Au"], + [atomic_symbol, ["P"], {}, "P"], + [atomic_symbol, [118], {}, "Og"], + [atomic_symbol, ["N-14"], {}, "N"], + [atomic_symbol, ["N"], {}, "N"], + [atomic_symbol, ["H +1"], {}, "H"], + [atomic_symbol, ["H 1+"], {}, "H"], + [atomic_symbol, ["hydrogen 1+"], {}, "H"], + [atomic_symbol, ["deuterium 1+"], {}, "H"], + [atomic_symbol, ["Fe 24+"], {}, "Fe"], + [atomic_symbol, ["Fe +24"], {}, "Fe"], + [atomic_symbol, ["Fe 2-"], {}, "Fe"], + [atomic_symbol, ["Fe -2"], {}, "Fe"], + [atomic_symbol, ["Fe+"], {}, "Fe"], + [atomic_symbol, ["Fe++"], {}, "Fe"], + [atomic_symbol, ["Fe-"], {}, "Fe"], + [atomic_symbol, ["Fe++++++++++++++"], {}, "Fe"], + [isotope_symbol, ("He", 4), {}, "He-4"], + [isotope_symbol, ("helium-4",), {}, "He-4"], + [isotope_symbol, ("H-2",), {}, "D"], + [isotope_symbol, ("Deuterium",), {}, "D"], + [isotope_symbol, ("deuterium",), {}, "D"], + [isotope_symbol, ("deuteron",), {}, "D"], + [isotope_symbol, ("tritium",), {}, "T"], + [isotope_symbol, ("triton",), {}, "T"], + [isotope_symbol, ("Hydrogen-3",), {}, "T"], + [isotope_symbol, ("hydrogen-3",), {}, "T"], + [isotope_symbol, ("H-3",), {}, "T"], + [isotope_symbol, (1, 2), {}, "D"], + [isotope_symbol, ("Hydrogen", 3), {}, "T"], + [isotope_symbol, ("tritium",), {}, "T"], + [isotope_symbol, ("H", 2), {}, "D"], + [isotope_symbol, ("Alpha",), {}, "He-4"], + [isotope_symbol, ("alpha",), {}, "He-4"], + [isotope_symbol, (79, 197), {}, "Au-197"], + [isotope_symbol, ("p",), {}, "H-1"], + [isotope_symbol, ("beryllium-8",), {}, "Be-8"], + [isotope_symbol, ("N-13",), {}, "N-13"], + [isotope_symbol, ("p",), {}, "H-1"], + [isotope_symbol, ("proton",), {}, "H-1"], + [isotope_symbol, ("protium",), {}, "H-1"], + [isotope_symbol, ("N-13 2+",), {}, "N-13"], + [isotope_symbol, ("Hydrogen-3 +1",), {}, "T"], + [atomic_number, ["H"], {}, 1], + [atomic_number, ["D"], {}, 1], + [atomic_number, ["deuterium"], {}, 1], + [atomic_number, ["Deuterium"], {}, 1], + [atomic_number, ["tritium"], {}, 1], + [atomic_number, ["p"], {}, 1], + [atomic_number, ["P"], {}, 15], + [atomic_number, ["Alpha"], {}, 2], + [atomic_number, ["C-12"], {}, 6], + [atomic_number, ["Argon"], {}, 18], + [atomic_number, ["protium"], {}, 1], + [atomic_number, ["H-3"], {}, 1], + [atomic_number, ["p+"], {}, 1], + [atomic_number, ["Be-8"], {}, 4], + [atomic_number, ["N"], {}, 7], + [atomic_number, ["N 2+"], {}, 7], + [atomic_number, ["N +1"], {}, 7], + [atomic_number, ["N+++"], {}, 7], + [mass_number, ["helium-3"], {}, 3], + [mass_number, ["Au-197"], {}, 197], + [mass_number, ["deuterium"], {}, 2], + [mass_number, ["D"], {}, 2], + [mass_number, ["H-2"], {}, 2], + [mass_number, ["tritium"], {}, 3], + [mass_number, ["T"], {}, 3], + [mass_number, ["alpha"], {}, 4], + [mass_number, ["p"], {}, 1], + [mass_number, ["Be-8"], {}, 8], + [mass_number, ["N-13"], {}, 13], + [mass_number, ["N-13 2+"], {}, 13], + [mass_number, ["N-13 +2"], {}, 13], + [mass_number, ["N-13+++"], {}, 13], + [element_name, ["D"], {}, "hydrogen"], + [element_name, ["deuterium"], {}, "hydrogen"], + [element_name, ["Au"], {}, "gold"], + [element_name, ["alpha"], {}, "helium"], + [element_name, ["helium-4"], {}, "helium"], + [element_name, ["H-2"], {}, "hydrogen"], + [element_name, ["Deuterium"], {}, "hydrogen"], + [element_name, ["Hydrogen-3"], {}, "hydrogen"], + [element_name, ["hydrogen-3"], {}, "hydrogen"], + [element_name, ["H-3"], {}, "hydrogen"], + [element_name, ["tritium"], {}, "hydrogen"], + [element_name, ["Alpha"], {}, "helium"], + [element_name, ["alpha"], {}, "helium"], + [element_name, [1], {}, "hydrogen"], + [element_name, [26], {}, "iron"], + [element_name, [79], {}, "gold"], + [element_name, ["p"], {}, "hydrogen"], + [element_name, ["P"], {}, "phosphorus"], + [element_name, ["Be-8"], {}, "beryllium"], + [element_name, ["Li-7"], {}, "lithium"], + [element_name, ["N"], {}, "nitrogen"], + [element_name, ["N+++"], {}, "nitrogen"], + [element_name, ["D-"], {}, "hydrogen"], + [standard_atomic_weight, ["H"], {}, (1.008 * u.u).to(u.kg)], + [standard_atomic_weight, [1], {}, (1.008 * u.u).to(u.kg)], + [standard_atomic_weight, ["Hydrogen"], {}, (1.008 * u.u).to(u.kg)], + [standard_atomic_weight, ["Au"], {}, u.kg], + [particle_mass, ["proton"], {}, const.m_p], + [particle_mass, ["H-1+"], {}, const.m_p], + [particle_mass, ["H-1 +1"], {}, const.m_p], + [particle_mass, ["H-1 1+"], {}, const.m_p], + [particle_mass, ["H-1"], {"Z": 1}, const.m_p], + [particle_mass, ["hydrogen-1"], {"Z": 1}, const.m_p], + [particle_mass, ["p+"], {}, const.m_p], + [particle_mass, ["F-19"], {"Z": 3}, u.kg], + [particle_mass, ["H"], {}, standard_atomic_weight("H")], + [is_stable, ["H-1"], {}, True], + [is_stable, [1, 1], {}, True], + [is_stable, ["N-14"], {}, True], + [is_stable, ["N", 14], {}, True], + [is_stable, ["P-31"], {}, True], + [is_stable, ["P", 31], {}, True], + [is_stable, ["p"], {}, True], + [is_stable, ["alpha"], {}, True], + [is_stable, ["Xe-124"], {}, True], + [is_stable, ("Fe",), {"mass_numb": 56}, True], + [is_stable, ["Fe-56"], {}, True], + [is_stable, ["iron-56"], {}, True], + [is_stable, ["Iron-56"], {}, True], + [is_stable, [26, 56], {}, True], + [is_stable, ["Be-8"], {}, False], + [is_stable, ["U-235"], {}, False], + [is_stable, ["uranium-235"], {}, False], + [is_stable, ["T"], {}, False], + [is_stable, [4, 8], {}, False], + [is_stable, ["tritium"], {}, False], + [is_stable, ["Pb-209"], {}, False], + [is_stable, ["lead-209"], {}, False], + [is_stable, ["Lead-209"], {}, False], + [is_stable, ("Pb",), {"mass_numb": 209}, False], + [is_stable, [82, 209], {}, False], + [charge_number, ["H+"], {}, 1], + [charge_number, ["D +1"], {}, 1], + [charge_number, ["tritium 1+"], {}, 1], + [charge_number, ["H-"], {}, -1], + [charge_number, ["Fe -2"], {}, -2], + [charge_number, ["Fe 2-"], {}, -2], + [charge_number, ["N--"], {}, -2], + [charge_number, ["N++"], {}, 2], + [charge_number, ["alpha"], {}, 2], + [charge_number, ["proton"], {}, 1], + [charge_number, ["deuteron"], {}, 1], + [charge_number, ["triton"], {}, 1], + [charge_number, ["electron"], {}, -1], + [charge_number, ["e-"], {}, -1], + [charge_number, ["e+"], {}, 1], + [charge_number, ["positron"], {}, 1], + [charge_number, ["n"], {}, 0], + [charge_number, ["neutron"], {}, 0], + [charge_number, ["p-"], {}, -1], + [charge_number, ["antiproton"], {}, -1], + [electric_charge, ["p"], {}, u.C], + [electric_charge, ["p"], {}, 1.6021766208e-19 * u.C], + [electric_charge, ["e"], {}, -1.6021766208e-19 * u.C], + [electric_charge, ["alpha"], {}, 3.2043532416e-19 * u.C], + [electric_charge, ["n"], {}, 0 * u.C], + [half_life, ["H-1"], {}, u.s], + [half_life, ["tritium"], {}, u.s], + [half_life, ["H-1"], {}, np.inf * u.s], ] -electric_charge_table = [ - ["p", u.C], - ["p", 1.6021766208e-19 * u.C], - ["e", -1.6021766208e-19 * u.C], - ["alpha", 3.2043532416e-19 * u.C], - ["n", 0 * u.C], - ["badinput", InvalidParticleError], - ["h+", InvalidParticleError], - ["Au 81+", InvalidParticleError], - ["Au 81-", ParticleWarning], - ["H---", ParticleWarning], -] -half_life_table = [["H-1", u.s], ["tritium", u.s], ["H-1", np.inf * u.s]] [email protected]( + "tested_function, args, kwargs, expected_output", + table_functions_args_kwargs_output, +) +def test_functions_and_values(tested_function, args, kwargs, expected_output): + run_test(tested_function, args, kwargs, expected_output) class TestInvalidPeriodicElement: @@ -404,112 +267,6 @@ def test_periodic_table_group(self): periodic_table_group(("B", "Ti", "Ge")) -# The tables above do not include the function to be tested in order to -# avoid cluttering up the code. The following block of code prepends -# the correct function to each list containing args, kwargs, and the -# expected outcome prior to being passed through to run_test. - - -tables_and_functions = [ - (atomic_symbol, atomic_symbol_table), - (isotope_symbol, isotope_symbol_table), - (atomic_number, atomic_number_table), - (mass_number, mass_number_table), - (element_name, element_name_table), - (standard_atomic_weight, standard_atomic_weight_table), - (is_stable, is_stable_table), - (particle_mass, particle_mass_table), - (charge_number, charge_number_table), - (electric_charge, electric_charge_table), - (half_life, half_life_table), -] - -all_tests = [] - -for func, table in tables_and_functions: - for inputs in table: - inputs.insert(0, func) - if len(inputs) == 3: - inputs.insert(2, {}) - all_tests += table - -# Set up tests for a variety of atomic functions to make sure that bad -# inputs lead to the expected errors. - -atomic_TypeError_funcs_table = [ - atomic_symbol, - isotope_symbol, - atomic_number, - is_stable, - half_life, - mass_number, - element_name, - standard_atomic_weight, - nuclear_binding_energy, - nuclear_reaction_energy, -] - -atomic_TypeError_badargs = [1.1, {"cats": "bats"}, 1 + 1j] - -atomic_ParticleErrors_funcs_table = [ - atomic_symbol, - isotope_symbol, - atomic_number, - is_stable, - half_life, - mass_number, - element_name, - standard_atomic_weight, - particle_mass, - known_isotopes, - stable_isotopes, - common_isotopes, - isotopic_abundance, - charge_number, - electric_charge, -] - -atomic_ParticleError_badargs = [ - -1, - 119, - "grumblemuffins", - "H-0", - "Og-294b", - "H-9343610", - "Fe 2+4", - "Fe+24", - "Fe +59", - "C++++++++++++++++", - "C-++++", - "h", - "d", - "he", - "au", - "alpha 1+", - "alpha-4", -] - -metatable = [ - (atomic_TypeError_funcs_table, atomic_TypeError_badargs, TypeError), - ( - atomic_ParticleErrors_funcs_table, - atomic_ParticleError_badargs, - InvalidParticleError, - ), -] - -for funcs, badargs, error in metatable: - for func in funcs: - for badarg in badargs: - all_tests += [[func, badarg, error]] - - [email protected]("inputs", all_tests) -def test_atomic_functions(inputs): - print(inputs) - run_test(inputs) - - # Next we have tests that do not fall nicely into equality comparisons. diff --git a/plasmapy/particles/tests/test_exceptions.py b/plasmapy/particles/tests/test_exceptions.py new file mode 100644 index 0000000000..f2ff0cc82d --- /dev/null +++ b/plasmapy/particles/tests/test_exceptions.py @@ -0,0 +1,1077 @@ +import itertools +import numpy as np +import pytest + +from astropy import units as u + +from plasmapy.particles import ( + atomic_symbol, + IonizationState, + IonizationStateCollection, + nuclear_binding_energy, + nuclear_reaction_energy, +) +from plasmapy.particles.atomic import ( + atomic_number, + common_isotopes, + electric_charge, + half_life, + integer_charge, + is_stable, + isotopic_abundance, + known_isotopes, + mass_number, + particle_mass, + stable_isotopes, + standard_atomic_weight, +) +from plasmapy.particles.exceptions import ( + ChargeError, + InvalidElementError, + InvalidIsotopeError, + InvalidParticleError, + MissingParticleDataError, + ParticleError, + ParticleWarning, +) +from plasmapy.particles.nuclear import nuclear_binding_energy, nuclear_reaction_energy +from plasmapy.particles.symbols import atomic_symbol, element_name, isotope_symbol +from plasmapy.utils.exceptions import PlasmaPyFutureWarning + +tests_for_exceptions = { + "too few nstates": ( + IonizationState, + [], + {"particle": "H", "ionic_fractions": [1.0]}, + ParticleError, + ), + "too many nstates": ( + IonizationState, + [], + {"particle": "H", "ionic_fractions": [1, 0, 0, 0]}, + ParticleError, + ), + "ionic fraction < 0": ( + IonizationState, + [], + {"particle": "He", "ionic_fractions": [-0.1, 0.1, 1]}, + ParticleError, + ), + "ionic fraction > 1": ( + IonizationState, + [], + {"particle": "He", "ionic_fractions": [1.1, 0.0, 0.0]}, + ParticleError, + ), + "invalid ionic fraction": ( + IonizationState, + [], + {"particle": "He", "ionic_fractions": [1.0, 0.0, "a"]}, + ParticleError, + ), + "bad n_elem units": ( + IonizationState, + [], + {"particle": "H", "ionic_fractions": [0, 1], "n_elem": 3 * u.m ** 3}, + u.UnitTypeError, + ), + "bad T_e units": ( + IonizationState, + [], + {"particle": "H", "ionic_fractions": [0, 1], "T_e": 1 * u.m}, + u.UnitTypeError, + ), + "negative n_elem": ( + IonizationState, + [], + { + "particle": "He", + "ionic_fractions": [1.0, 0.0, 0.0], + "n_elem": -1 * u.m ** -3, + }, + ParticleError, + ), + "negative T_e": ( + IonizationState, + [], + {"particle": "He", "ionic_fractions": [1.0, 0.0, 0.0], "T_e": -1 * u.K}, + ParticleError, + ), + "redundant ndens": ( + IonizationState, + [], + { + "particle": "H", + "ionic_fractions": np.array([3, 4]) * u.m ** -3, + "n_elem": 4 * u.m ** -3, + }, + ParticleError, + ), + "wrong type": (IonizationStateCollection, [], {"inputs": None}, ParticleError), + "not normalized": ( + IonizationStateCollection, + [], + {"inputs": {"He": [0.4, 0.5, 0.0]}, "tol": 1e-9}, + ParticleError, + ), + "negative ionfrac": ( + IonizationStateCollection, + [], + {"inputs": {"H": [-0.1, 1.1]}}, + ParticleError, + ), + "ion": ( + IonizationStateCollection, + [], + {"inputs": {"H": [0.1, 0.9], "He+": [0.0, 0.9, 0.1]}}, + ParticleError, + ), + "repeat elements": ( + IonizationStateCollection, + [], + {"inputs": {"H": [0.1, 0.9], "hydrogen": [0.2, 0.8]}}, + ParticleError, + ), + "isotope of element": ( + IonizationStateCollection, + [], + {"inputs": {"H": [0.1, 0.9], "D": [0.2, 0.8]}}, + ParticleError, + ), + "negative abundance": ( + IonizationStateCollection, + [], + { + "inputs": {"H": [0.1, 0.9], "He": [0.4, 0.5, 0.1]}, + "abundances": {"H": 1, "He": -0.1}, + }, + ParticleError, + ), + "imaginary abundance": ( + IonizationStateCollection, + [], + { + "inputs": {"H": [0.1, 0.9], "He": [0.4, 0.5, 0.1]}, + "abundances": {"H": 1, "He": 0.1j}, + }, + ParticleError, + ), + "wrong density units": ( + IonizationStateCollection, + [], + { + "inputs": {"H": [10, 90] * u.m ** -3, "He": [0.1, 0.9, 0] * u.m ** -2}, + "abundances": {"H": 1, "He": 0.1}, + }, + ParticleError, + ), + "abundance redundance": ( + IonizationStateCollection, + [], + { + "inputs": {"H": [10, 90] * u.m ** -3, "He": [0.1, 0.9, 0] * u.m ** -3}, + "abundances": {"H": 1, "He": 0.1}, + }, + ParticleError, + ), + "abundance contradiction": ( + IonizationStateCollection, + [], + { + "inputs": {"H": [10, 90] * u.m ** -3, "He": [0.1, 0.9, 0] * u.m ** -3}, + "abundances": {"H": 1, "He": 0.11}, + }, + ParticleError, + ), + "kappa too small": ( + IonizationStateCollection, + [], + {"inputs": ["H"], "kappa": 1.499999}, + ParticleError, + ), + "negative n": ( + IonizationStateCollection, + [], + {"inputs": ["H"], "n0": -1 * u.cm ** -3}, + ParticleError, + ), + "negative T_e for collection": ( + IonizationStateCollection, + [], + {"inputs": ["H-1"], "T_e": -1 * u.K}, + ParticleError, + ), +} + + [email protected]( + ["tested_object", "args", "kwargs", "expected_exception"], + list(tests_for_exceptions.values()), + ids=list(tests_for_exceptions.keys()), +) +def test_named_tests_for_exceptions(tested_object, args, kwargs, expected_exception): + """ + Test that appropriate exceptions are raised for inappropriate inputs + to `IonizationState` or `IonizationStateCollection` + """ + with pytest.raises(expected_exception) as exc_info: + tested_object(*args, **kwargs) + + assert expected_exception == exc_info.type + + +tests_from_nuclear = [ + [ + nuclear_reaction_energy, + [], + {"reactants": ["n"], "products": 3}, + pytest.raises(TypeError), + ], + [ + nuclear_reaction_energy, + [], + {"reactants": ["n"], "products": ["He-4"]}, + pytest.raises(ParticleError), + ], + [ + nuclear_reaction_energy, + [], + {"reactants": ["h"], "products": ["H-1"]}, + pytest.raises(ParticleError), + ], + [ + nuclear_reaction_energy, + [], + {"reactants": ["e-", "n"], "products": ["p+"]}, + pytest.raises(ParticleError), + ], + [ + nuclear_reaction_energy, + [], + {"reactants": ["e+", "n"], "products": ["p-"]}, + pytest.raises(ParticleError), + ], + [ + nuclear_reaction_energy, + [], + {"reactants": ["ksdf"], "products": ["H-3"]}, + pytest.raises(ParticleError), + ], + [ + nuclear_reaction_energy, + [], + {"reactants": ["H"], "products": ["H-1"]}, + pytest.raises(ParticleError), + ], + [ + nuclear_reaction_energy, + [], + {"reactants": ["p"], "products": ["n", "n", "e-"]}, + pytest.raises(ParticleError), + ], + [ + nuclear_reaction_energy, + ["p --> p"], + {"reactants": "p", "products": "p"}, + pytest.raises(ParticleError), + ], + [nuclear_binding_energy, ["H"], {}, pytest.raises(ParticleError)], + [nuclear_binding_energy, ["He-99"], {}, pytest.raises(InvalidParticleError)], + [ + nuclear_binding_energy, + ["He"], + {"mass_numb": 99}, + pytest.raises(InvalidParticleError), + ], + [nuclear_binding_energy, [3.1415926535j], {}, pytest.raises(TypeError)], +] + +tests_from_atomic = [ + [ + atomic_symbol, + [ + "H-0", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + 3.14159, + ], + {}, + pytest.raises(TypeError), + ], + [ + atomic_symbol, + [ + "Og-294b", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "H-934361079326356530741942970523610389", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "Fe 2+4", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "Fe+24", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "Fe +59", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "C++++++++++++++++", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "C-++++", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "neutron", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + atomic_symbol, + [ + "n", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + atomic_symbol, + [ + "n-1", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + atomic_symbol, + [ + "h", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "d", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "he", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "au", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "p-", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + atomic_symbol, + [ + 0, + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + 119, + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_symbol, + [ + "antiproton", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + atomic_number, + [ + "H-3934", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_number, + [ + "C-12b", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_number, + [ + -1.5, + ], + {}, + pytest.raises(TypeError), + ], + [ + atomic_number, + [ + "n", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + atomic_number, + [ + "n-1", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + atomic_number, + [ + "neutron", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + atomic_number, + [ + "Neutron", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + atomic_number, + [ + "d", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_number, + [ + "t", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + atomic_number, + [ + "s-36", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + mass_number, + [ + "H-359", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + mass_number, + [ + "C-12b", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + mass_number, + [ + -1.5, + ], + {}, + pytest.raises(TypeError), + ], + [ + mass_number, + [ + "N-13+-+-", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + mass_number, + [ + "h-3", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + mass_number, + [ + "n", + ], + {}, + pytest.raises(InvalidIsotopeError), + ], + [ + mass_number, + [ + "n-1", + ], + {}, + pytest.raises(InvalidIsotopeError), + ], + [ + element_name, + [ + "vegancupcakes", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + element_name, + [ + "C-+-", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + element_name, + [ + 1.24, + ], + {}, + pytest.raises(TypeError), + ], + [ + element_name, + [ + "n", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + element_name, + [ + "neutron", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + element_name, + [ + 0, + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + element_name, + [ + "H++", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + element_name, + [ + "t", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + element_name, + [ + "pb", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + element_name, + [ + "d", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + element_name, + [ + "h-3", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + element_name, + [ + "Pb-9", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + element_name, + [ + "H 2+", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + standard_atomic_weight, + [ + "H-1", + ], + {}, + pytest.raises(ParticleError), + ], + [ + standard_atomic_weight, + [ + "help i'm trapped in a unit test", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + standard_atomic_weight, + [ + 1.1, + ], + {}, + pytest.raises(TypeError), + ], + [ + standard_atomic_weight, + [ + "n", + ], + {}, + pytest.raises(InvalidElementError), + ], + [ + standard_atomic_weight, + [ + "p", + ], + {}, + pytest.raises(ParticleError), + ], + [ + standard_atomic_weight, + [ + "alpha", + ], + {}, + pytest.raises(ParticleError), + ], + [ + standard_atomic_weight, + [ + "deuteron", + ], + {}, + pytest.raises(ParticleError), + ], + [ + standard_atomic_weight, + [ + "tritium", + ], + {}, + pytest.raises(ParticleError), + ], + [ + standard_atomic_weight, + [ + "Au+", + ], + {}, + pytest.raises(ParticleError), + ], + [ + standard_atomic_weight, + [ + "Fe -2", + ], + {}, + pytest.raises(ParticleError), + ], + [ + standard_atomic_weight, + [ + "Og 2+", + ], + {}, + pytest.raises(ParticleError), + ], + [ + standard_atomic_weight, + [ + "h", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + standard_atomic_weight, + [ + "fe", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + electric_charge, + [ + "badinput", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + electric_charge, + [ + "h+", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + electric_charge, + [ + "Au 81+", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + electric_charge, + [ + "Au 81-", + ], + {}, + pytest.warns(ParticleWarning), + ], + [ + electric_charge, + [ + "H---", + ], + {}, + pytest.warns(ParticleWarning), + ], + [ + integer_charge, + [ + "fads", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + integer_charge, + [ + "H++", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + integer_charge, + [ + "h+", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + integer_charge, + [ + "fe 1+", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + integer_charge, + [ + "d+", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + integer_charge, + [ + "Fe 29+", + ], + {}, + pytest.raises(InvalidParticleError), + ], + [ + integer_charge, + [ + "H-1", + ], + {}, + pytest.raises(ChargeError), + ], + [ + integer_charge, + [ + "H---", + ], + {}, + pytest.warns(PlasmaPyFutureWarning), + ], + [ + integer_charge, + [ + "Fe -26", + ], + {}, + pytest.warns(PlasmaPyFutureWarning), + ], + [ + integer_charge, + [ + "Og 10-", + ], + {}, + pytest.warns(PlasmaPyFutureWarning), + ], + [ + isotope_symbol, + ("Md-260",), + {"mass_numb": 261}, + pytest.raises(InvalidParticleError), + ], + [ + isotope_symbol, + ("protium",), + {"mass_numb": 2}, + pytest.raises(InvalidParticleError), + ], + [isotope_symbol, ("alpha",), {"mass_numb": 3}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("O-18",), {"mass_numb": 19}, pytest.raises(InvalidParticleError)], + [ + isotope_symbol, + ("lead-209",), + {"mass_numb": 511}, + pytest.raises(InvalidParticleError), + ], + [isotope_symbol, ("He-1",), {}, pytest.raises(InvalidParticleError)], + [isotope_symbol, [24], {"mass_numb": 23}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("H",), {"mass_numb": 0}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("H-1",), {"mass_numb": 2}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("P",), {}, pytest.raises(InvalidIsotopeError)], + [isotope_symbol, [1], {}, pytest.raises(InvalidIsotopeError)], + [isotope_symbol, [4], {}, pytest.raises(InvalidIsotopeError)], + [isotope_symbol, ("hydrogen-444444",), {}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("Fe",), {"mass_numb": 2.1}, pytest.raises(TypeError)], + [isotope_symbol, ("He",), {"mass_numb": "c"}, pytest.raises(TypeError)], + [isotope_symbol, ("He-3",), {"mass_numb": 4}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("D",), {"mass_numb": 3}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("T",), {"mass_numb": 2}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("Fe",), {"mass_numb": None}, pytest.raises(InvalidIsotopeError)], + [isotope_symbol, ("He",), {"mass_numb": 99}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("d",), {}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("h-3",), {}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("h",), {}, pytest.raises(InvalidParticleError)], + [isotope_symbol, ("d+",), {}, pytest.raises(InvalidParticleError)], + [particle_mass, ["Og 1+"], {}, pytest.raises(MissingParticleDataError)], + [particle_mass, ["Fe-56"], {"Z": 1.4}, pytest.raises(TypeError)], + [particle_mass, ["H-1 +1"], {"Z": 0}, pytest.raises(InvalidParticleError)], + [particle_mass, [26], {"Z": 1, "mass_numb": "a"}, pytest.raises(TypeError)], + [ + particle_mass, + [26], + {"Z": 27, "mass_numb": 56}, + pytest.raises(InvalidParticleError), + ], + [particle_mass, ["Og"], {"Z": 1}, pytest.raises(MissingParticleDataError)], + [ + particle_mass, + ["Og"], + {"mass_numb": 696, "Z": 1}, + pytest.raises(InvalidParticleError), + ], + [particle_mass, ["He 1+"], {"mass_numb": 99}, pytest.raises(InvalidParticleError)], + [particle_mass, ["fe-56 1+"], {}, pytest.raises(InvalidParticleError)], + [is_stable, ["hydrogen-444444"], {}, pytest.raises(InvalidParticleError)], + [is_stable, ["hydrogen", 0], {}, pytest.raises(InvalidParticleError)], + [is_stable, [""], {}, pytest.raises(InvalidParticleError)], + [is_stable, ["pb-209"], {}, pytest.raises(InvalidParticleError)], + [is_stable, ["h"], {}, pytest.raises(InvalidParticleError)], + [is_stable, ["He"], {}, pytest.raises(InvalidIsotopeError)], + [is_stable, ["B"], {}, pytest.raises(InvalidIsotopeError)], + [particle_mass, ["H-1"], {"mass_numb": 1, "Z": 1}, pytest.warns(ParticleWarning)], + [isotope_symbol, ("H-1",), {"mass_numb": 1}, pytest.warns(ParticleWarning)], + [isotope_symbol, ("H-2",), {"mass_numb": 2}, pytest.warns(ParticleWarning)], + [isotope_symbol, ("T",), {"mass_numb": 3}, pytest.warns(ParticleWarning)], + [isotope_symbol, ("Li-6",), {"mass_numb": 6}, pytest.warns(ParticleWarning)], + [isotope_symbol, ("lithium-6",), {"mass_numb": 6}, pytest.warns(ParticleWarning)], + [isotope_symbol, ("alpha",), {"mass_numb": 4}, pytest.warns(ParticleWarning)], + [isotope_symbol, ("p",), {"mass_numb": 1}, pytest.warns(ParticleWarning)], +] + + +atomic_TypeError_funcs_table = [ + atomic_symbol, + isotope_symbol, + atomic_number, + is_stable, + half_life, + mass_number, + element_name, + standard_atomic_weight, + nuclear_binding_energy, + nuclear_reaction_energy, +] + +atomic_TypeError_badargs = [1.1, {"cats": "bats"}, 1 + 1j] + +atomic_ParticleErrors_funcs_table = [ + atomic_symbol, + isotope_symbol, + atomic_number, + is_stable, + half_life, + mass_number, + element_name, + standard_atomic_weight, + particle_mass, + known_isotopes, + stable_isotopes, + common_isotopes, + isotopic_abundance, + integer_charge, + electric_charge, +] + +atomic_ParticleError_badargs = [ + -1, + 119, + "grumblemuffins", + "H-0", + "Og-294b", + "H-9343610", + "Fe 2+4", + "Fe+24", + "Fe +59", + "C++++++++++++++++", + "C-++++", + "h", + "d", + "he", + "au", + "alpha 1+", + "alpha-4", +] + +particle_error_tests = [ + (function, [bad_argument], {}, pytest.raises(InvalidParticleError)) + for function, bad_argument in itertools.product( + atomic_ParticleErrors_funcs_table, atomic_ParticleError_badargs + ) +] +type_error_tests = [ + (function, [bad_argument], {}, pytest.raises(TypeError)) + for function, bad_argument in itertools.product( + atomic_TypeError_funcs_table, atomic_TypeError_badargs + ) +] + + [email protected]( + ["tested_object", "args", "kwargs", "expectation"], + tests_from_nuclear + tests_from_atomic + particle_error_tests + type_error_tests, +) +def test_unnamed_tests_exceptions(tested_object, args, kwargs, expectation): + """ + Test that appropriate exceptions are raised for inappropriate inputs + to `IonizationState`. + """ + with expectation as exc_info: + tested_object(*args, **kwargs) + + if hasattr(expectation, "expected_exception"): + assert type(expectation.expected_exception()) == exc_info.type + + # TODO tbh given how ugly this is I don't think we should even be doing this check + if hasattr(expectation, "expected_warning"): + for expected_warning, recorded_warning in zip( + exc_info.expected_warning, exc_info.list + ): + assert expected_warning == recorded_warning.category diff --git a/plasmapy/particles/tests/test_ionization_collection.py b/plasmapy/particles/tests/test_ionization_collection.py index 6a85784f02..0ffc217672 100644 --- a/plasmapy/particles/tests/test_ionization_collection.py +++ b/plasmapy/particles/tests/test_ionization_collection.py @@ -16,9 +16,9 @@ mass_number, Particle, particle_symbol, + ParticleList, ) from plasmapy.particles.exceptions import InvalidIsotopeError, ParticleError -from plasmapy.particles.particle_collections import ParticleList from plasmapy.utils.pytest_helpers import run_test @@ -743,75 +743,6 @@ def test_base_particles_equal_ionic_fraction_particles(self): ) -IE = collections.namedtuple("IE", ["inputs", "expected_exception"]) - -tests_for_exceptions = { - "wrong type": IE({"inputs": None}, ParticleError), - "not normalized": IE( - {"inputs": {"He": [0.4, 0.5, 0.0]}, "tol": 1e-9}, ParticleError - ), - "negative ionfrac": IE({"inputs": {"H": [-0.1, 1.1]}}, ParticleError), - "ion": IE({"inputs": {"H": [0.1, 0.9], "He+": [0.0, 0.9, 0.1]}}, ParticleError), - "repeat elements": IE( - {"inputs": {"H": [0.1, 0.9], "hydrogen": [0.2, 0.8]}}, ParticleError - ), - "isotope of element": IE( - {"inputs": {"H": [0.1, 0.9], "D": [0.2, 0.8]}}, ParticleError - ), - "negative abundance": IE( - { - "inputs": {"H": [0.1, 0.9], "He": [0.4, 0.5, 0.1]}, - "abundances": {"H": 1, "He": -0.1}, - }, - ParticleError, - ), - "imaginary abundance": IE( - { - "inputs": {"H": [0.1, 0.9], "He": [0.4, 0.5, 0.1]}, - "abundances": {"H": 1, "He": 0.1j}, - }, - ParticleError, - ), - "wrong density units": IE( - { - "inputs": {"H": [10, 90] * u.m ** -3, "He": [0.1, 0.9, 0] * u.m ** -2}, - "abundances": {"H": 1, "He": 0.1}, - }, - ParticleError, - ), - "abundance redundance": IE( - { - "inputs": {"H": [10, 90] * u.m ** -3, "He": [0.1, 0.9, 0] * u.m ** -3}, - "abundances": {"H": 1, "He": 0.1}, - }, - ParticleError, - ), - "abundance contradiction": IE( - { - "inputs": {"H": [10, 90] * u.m ** -3, "He": [0.1, 0.9, 0] * u.m ** -3}, - "abundances": {"H": 1, "He": 0.11}, - }, - ParticleError, - ), - "kappa too small": IE({"inputs": ["H"], "kappa": 1.499999}, ParticleError), - "negative n": IE({"inputs": ["H"], "n0": -1 * u.cm ** -3}, ParticleError), - "negative T_e": IE({"inputs": ["H-1"], "T_e": -1 * u.K}, ParticleError), -} - - [email protected]("test_name", tests_for_exceptions.keys()) -def test_exceptions_upon_instantiation(test_name): - """ - Test that appropriate exceptions are raised for inappropriate inputs - to IonizationStateCollection when first instantiated. - """ - run_test( - IonizationStateCollection, - kwargs=tests_for_exceptions[test_name].inputs, - expected_outcome=tests_for_exceptions[test_name].expected_exception, - ) - - class TestIonizationStateCollectionDensityEqualities: """ Test that IonizationStateCollection instances are equal or not equal to each diff --git a/plasmapy/particles/tests/test_ionization_state.py b/plasmapy/particles/tests/test_ionization_state.py index 683518705a..2a79409f59 100644 --- a/plasmapy/particles/tests/test_ionization_state.py +++ b/plasmapy/particles/tests/test_ionization_state.py @@ -456,52 +456,6 @@ def test_State_equality_and_getitem(self): assert result_from_charge == result_from_symbol -IE = collections.namedtuple("IE", ["inputs", "expected_exception"]) - -tests_for_exceptions = { - "too few nstates": IE({"particle": "H", "ionic_fractions": [1.0]}, ParticleError), - "too many nstates": IE( - {"particle": "H", "ionic_fractions": [1, 0, 0, 0]}, ParticleError - ), - "ionic fraction < 0": IE( - {"particle": "He", "ionic_fractions": [-0.1, 0.1, 1]}, ParticleError - ), - "ionic fraction > 1": IE( - {"particle": "He", "ionic_fractions": [1.1, 0.0, 0.0]}, ParticleError - ), - "invalid ionic fraction": IE( - {"particle": "He", "ionic_fractions": [1.0, 0.0, "a"]}, ParticleError - ), - "bad n_elem units": IE( - {"particle": "H", "ionic_fractions": [0, 1], "n_elem": 3 * u.m ** 3}, - u.UnitTypeError, - ), - "bad T_e units": IE( - {"particle": "H", "ionic_fractions": [0, 1], "T_e": 1 * u.m}, u.UnitTypeError - ), - "negative n_elem": IE( - { - "particle": "He", - "ionic_fractions": [1.0, 0.0, 0.0], - "n_elem": -1 * u.m ** -3, - }, - ParticleError, - ), - "negative T_e": IE( - {"particle": "He", "ionic_fractions": [1.0, 0.0, 0.0], "T_e": -1 * u.K}, - ParticleError, - ), - "redundant ndens": IE( - { - "particle": "H", - "ionic_fractions": np.array([3, 4]) * u.m ** -3, - "n_elem": 4 * u.m ** -3, - }, - ParticleError, - ), -} - - ions = ["Fe 6+", "p", "He-4 0+", "triton", "alpha", "Ne +0"] @@ -515,12 +469,14 @@ def test_IonizationState_ionfracs_from_ion_input(ion): expected_ionic_fractions = np.zeros(ion_particle.atomic_number + 1) expected_ionic_fractions[ion_particle.charge_number] = 1.0 - if not np.allclose(expected_ionic_fractions, actual_ionic_fractions, atol=1e-16): - pytest.fail( - f"The returned ionic fraction for IonizationState({repr(ion)}) " - f"should have entirely been in the Z = {ion_particle.charge_number} " - f"level, but was instead: {ionization_state.ionic_fractions}." - ) + np.testing.assert_allclose( + expected_ionic_fractions, + actual_ionic_fractions, + atol=1e-16, + err_msg=f"The returned ionic fraction for IonizationState({repr(ion)}) " + f"should have entirely been in the Z = {ion_particle.integer_charge} " + f"level.", + ) @pytest.mark.parametrize("ion", ions) @@ -546,19 +502,6 @@ def test_IonizationState_base_particles_from_ion_input(ion): ) [email protected]("test", tests_for_exceptions.keys()) -def test_IonizationState_exceptions(test): - """ - Test that appropriate exceptions are raised for inappropriate inputs - to `IonizationState`. - """ - run_test( - IonizationState, - kwargs=tests_for_exceptions[test].inputs, - expected_outcome=tests_for_exceptions[test].expected_exception, - ) - - expected_properties = { "T_e": 5000.0 * u.K, "tol": 2e-14, diff --git a/plasmapy/particles/tests/test_nuclear.py b/plasmapy/particles/tests/test_nuclear.py index ca6f607e5b..f0bd68227c 100644 --- a/plasmapy/particles/tests/test_nuclear.py +++ b/plasmapy/particles/tests/test_nuclear.py @@ -3,6 +3,7 @@ from astropy import constants as const from astropy import units as u +from astropy.tests.helper import assert_quantity_allclose from plasmapy.particles.exceptions import InvalidParticleError, ParticleError from plasmapy.particles.nuclear import ( @@ -12,81 +13,6 @@ ) from plasmapy.utils.pytest_helpers import run_test, run_test_equivalent_calls -test_nuclear_table = [ - [nuclear_binding_energy, "p", {}, 0 * u.J], - [nuclear_binding_energy, "n", {}, 0 * u.J], - [nuclear_binding_energy, "p", {}, 0 * u.J], - [nuclear_binding_energy, "H", {}, ParticleError], - [nuclear_binding_energy, "He-99", {}, InvalidParticleError], - [nuclear_binding_energy, "He", {"mass_numb": 99}, InvalidParticleError], - [nuclear_binding_energy, 3.1415926535j, {}, TypeError], - [mass_energy, "e-", {}, (const.m_e * const.c ** 2).to(u.J)], - [mass_energy, "p+", {}, (const.m_p * const.c ** 2).to(u.J)], - [mass_energy, "H-1", {}, (const.m_p * const.c ** 2).to(u.J)], - [mass_energy, "H-1 0+", {}, (const.m_p * const.c ** 2).to(u.J)], - [mass_energy, "n", {}, (const.m_n * const.c ** 2).to(u.J)], - [nuclear_reaction_energy, (), {"reactants": ["n"], "products": 3}, TypeError], - [ - nuclear_reaction_energy, - (), - {"reactants": ["n"], "products": ["He-4"]}, - ParticleError, - ], - [ - nuclear_reaction_energy, - (), - {"reactants": ["h"], "products": ["H-1"]}, - ParticleError, - ], - [ - nuclear_reaction_energy, - (), - {"reactants": ["e-", "n"], "products": ["p+"]}, - ParticleError, - ], - [ - nuclear_reaction_energy, - (), - {"reactants": ["e+", "n"], "products": ["p-"]}, - ParticleError, - ], - [ - nuclear_reaction_energy, - (), - {"reactants": ["ksdf"], "products": ["H-3"]}, - ParticleError, - ], - [ - nuclear_reaction_energy, - (), - {"reactants": ["H"], "products": ["H-1"]}, - ParticleError, - ], - [ - nuclear_reaction_energy, - (), - {"reactants": ["p"], "products": ["n", "n", "e-"]}, - ParticleError, - ], - [nuclear_reaction_energy, "H + H --> H", {}, ParticleError], - [nuclear_reaction_energy, "H + H", {}, ParticleError], - [nuclear_reaction_energy, 1, {}, TypeError], - [nuclear_reaction_energy, "H-1 + H-1 --> H-1", {}, ParticleError], - [nuclear_reaction_energy, "p --> n", {}, ParticleError], - [ - nuclear_reaction_energy, - "p --> p", - {"reactants": "p", "products": "p"}, - ParticleError, - ], -] - - [email protected]("test_inputs", test_nuclear_table) -def test_nuclear(test_inputs): - run_test(*test_inputs, rtol=1e-3) - - test_nuclear_equivalent_calls_table = [ [nuclear_binding_energy, ["He-4", {}], ["alpha", {}], ["He", {"mass_numb": 4}]] ] @@ -178,3 +104,22 @@ def test_nuclear_reaction_energy(): expected = 17.6 * u.MeV actual = nuclear_reaction_energy(reactants=reactants, products=products) assert u.isclose(actual, expected, rtol=1e-3) + + +table_of_nuclear_tests = [ + [nuclear_binding_energy, ["p"], {}, 0 * u.J], + [nuclear_binding_energy, ["n"], {}, 0 * u.J], + [nuclear_binding_energy, ["p"], {}, 0 * u.J], + [mass_energy, ["e-"], {}, (const.m_e * const.c ** 2).to(u.J)], + [mass_energy, ["p+"], {}, (const.m_p * const.c ** 2).to(u.J)], + [mass_energy, ["H-1"], {}, (const.m_p * const.c ** 2).to(u.J)], + [mass_energy, ["H-1 0+"], {}, (const.m_p * const.c ** 2).to(u.J)], + [mass_energy, ["n"], {}, (const.m_n * const.c ** 2).to(u.J)], +] + + [email protected]( + ["tested_object", "args", "kwargs", "expected_value"], table_of_nuclear_tests +) +def test_nuclear_table(tested_object, args, kwargs, expected_value): + run_test(tested_object, args, kwargs, expected_value, rtol=1e-3) diff --git a/plasmapy/utils/pytest_helpers/pytest_helpers.py b/plasmapy/utils/pytest_helpers/pytest_helpers.py index 9c18930eb2..8eb2d75cb3 100644 --- a/plasmapy/utils/pytest_helpers/pytest_helpers.py +++ b/plasmapy/utils/pytest_helpers/pytest_helpers.py @@ -218,7 +218,7 @@ def test_func(inputs): if kwargs is None: kwargs = {} - if not isinstance(args, tuple): + if not type(args) in [tuple, list]: args = (args,) if not callable(func): diff --git a/plasmapy/utils/tests/test_roman.py b/plasmapy/utils/tests/test_roman.py index 963f6934f3..c661fd91f2 100644 --- a/plasmapy/utils/tests/test_roman.py +++ b/plasmapy/utils/tests/test_roman.py @@ -139,17 +139,16 @@ (np.int64(14), "XIV"), ] -toRoman_exceptions_table = [ - ("X", TypeError), - (-1, roman.OutOfRangeError), - (0, roman.OutOfRangeError), - (5000, roman.OutOfRangeError), -] - -fromRoman_exceptions_table = [ - ("asdfasd", roman.InvalidRomanNumeralError), - (1, TypeError), - ("xi", roman.InvalidRomanNumeralError), +exceptions_table = [ + (roman.to_roman, "X", TypeError), + (roman.to_roman, -1, roman.OutOfRangeError), + (roman.to_roman, 0, roman.OutOfRangeError), + (roman.to_roman, 5000, roman.OutOfRangeError), + (roman.from_roman, "asdfasd", roman.InvalidRomanNumeralError), + (roman.from_roman, 1, TypeError), + (roman.from_roman, "xi", roman.InvalidRomanNumeralError), + (roman.is_roman_numeral, 1, TypeError), # TODO: tbh I would just return False here? + (roman.is_roman_numeral, ("I", "II"), TypeError), ] @@ -157,50 +156,31 @@ def test_to_roman(integer, roman_numeral): """ Test that `~plasmapy.utils.roman.to_roman` correctly converts - integers to Roman numerals. + integers to Roman numerals, and that the inverse is true as well. """ run_test(func=roman.to_roman, args=integer, expected_outcome=roman_numeral) - - [email protected]("integer, roman_numeral", ints_and_roman_numerals) -def test_from_roman(integer, roman_numeral): - """ - Test that `~plasmapy.utils.roman.from_roman` correctly converts - Roman numerals to integers. - """ run_test(func=roman.from_roman, args=roman_numeral, expected_outcome=int(integer)) [email protected]("input, expected_exception", toRoman_exceptions_table) -def test_to_roman_exceptions(input, expected_exception): - """ - Test that `~plasmapy.utils.roman.to_roman` raises the correct - exceptions when necessary. - """ - run_test(func=roman.to_roman, args=input, expected_outcome=expected_exception) - - [email protected]("input, expected_exception", fromRoman_exceptions_table) -def test_from_roman_exceptions(input, expected_exception): [email protected]("function, argument, expected_exception", exceptions_table) +def test_to_roman_exceptions(function, argument, expected_exception): """ - Test that `~plasmapy.utils.roman.from_roman` raises the correct + Test that `~plasmapy.utils.roman` functions raise the correct exceptions when necessary. """ - run_test(func=roman.from_roman, args=input, expected_outcome=expected_exception) + run_test(func=function, args=argument, expected_outcome=expected_exception) test_is_roman_numeral_table = [ ("I", True), ("i", False), ("CLXXXVIII", True), - (1, TypeError), ("khjfda", False), ("VIIII", False), ("IXX", False), - (("I", "II"), TypeError), ] [email protected]("input, expected", test_is_roman_numeral_table) -def test_is_roman_numeral(input, expected): - run_test(func=roman.is_roman_numeral, args=input, expected_outcome=expected) [email protected]("argument, expected", test_is_roman_numeral_table) +def test_is_roman_numeral(argument, expected): + run_test(func=roman.is_roman_numeral, args=argument, expected_outcome=expected)
graspologic-org__graspologic-428
update requirements to scipy>=1.4 Scipy 1.4 - has much faster linear assignment problem, making FAQ way faster - has MGC, which we eventually want for new nonpar, signal subgraph
[ { "content": "import os\nimport sys\nfrom setuptools import setup, find_packages\nfrom sys import platform\n\nPACKAGE_NAME = \"graspy\"\nDESCRIPTION = \"A set of python modules for graph statistics\"\nwith open(\"README.md\", \"r\") as f:\n LONG_DESCRIPTION = f.read()\nAUTHOR = (\"Eric Bridgeford, Jaewon Chung, Benjamin Pedigo, Bijan Varjavand\",)\nAUTHOR_EMAIL = \"[email protected]\"\nURL = \"https://github.com/neurodata/graspy\"\nMINIMUM_PYTHON_VERSION = 3, 6 # Minimum of Python 3.5\nREQUIRED_PACKAGES = [\n \"networkx>=2.1\",\n \"numpy>=1.8.1\",\n \"scikit-learn>=0.19.1\",\n \"scipy>=1.1.0\",\n \"seaborn>=0.9.0\",\n \"matplotlib>=3.0.0\",\n \"hyppo>=0.1.3\",\n]\n\n\n# Find GraSPy version.\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\nfor line in open(os.path.join(PROJECT_PATH, \"graspy\", \"__init__.py\")):\n if line.startswith(\"__version__ = \"):\n VERSION = line.strip().split()[2][1:-1]\n\n\ndef check_python_version():\n \"\"\"Exit when the Python version is too low.\"\"\"\n if sys.version_info < MINIMUM_PYTHON_VERSION:\n sys.exit(\"Python {}.{}+ is required.\".format(*MINIMUM_PYTHON_VERSION))\n\n\ncheck_python_version()\n\nsetup(\n name=PACKAGE_NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n install_requires=REQUIRED_PACKAGES,\n url=URL,\n license=\"Apache License 2.0\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n)\n", "path": "setup.py" } ]
[ { "content": "import os\nimport sys\nfrom setuptools import setup, find_packages\nfrom sys import platform\n\nPACKAGE_NAME = \"graspy\"\nDESCRIPTION = \"A set of python modules for graph statistics\"\nwith open(\"README.md\", \"r\") as f:\n LONG_DESCRIPTION = f.read()\nAUTHOR = (\"Eric Bridgeford, Jaewon Chung, Benjamin Pedigo, Bijan Varjavand\",)\nAUTHOR_EMAIL = \"[email protected]\"\nURL = \"https://github.com/neurodata/graspy\"\nMINIMUM_PYTHON_VERSION = 3, 6 # Minimum of Python 3.5\nREQUIRED_PACKAGES = [\n \"networkx>=2.1\",\n \"numpy>=1.8.1\",\n \"scikit-learn>=0.19.1\",\n \"scipy>=1.4.0\",\n \"seaborn>=0.9.0\",\n \"matplotlib>=3.0.0\",\n \"hyppo>=0.1.3\",\n]\n\n\n# Find GraSPy version.\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\nfor line in open(os.path.join(PROJECT_PATH, \"graspy\", \"__init__.py\")):\n if line.startswith(\"__version__ = \"):\n VERSION = line.strip().split()[2][1:-1]\n\n\ndef check_python_version():\n \"\"\"Exit when the Python version is too low.\"\"\"\n if sys.version_info < MINIMUM_PYTHON_VERSION:\n sys.exit(\"Python {}.{}+ is required.\".format(*MINIMUM_PYTHON_VERSION))\n\n\ncheck_python_version()\n\nsetup(\n name=PACKAGE_NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n install_requires=REQUIRED_PACKAGES,\n url=URL,\n license=\"Apache License 2.0\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n)\n", "path": "setup.py" } ]
diff --git a/requirements.txt b/requirements.txt index 55d70431f..68b2d9af3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ networkx>=2.1 numpy>=1.8.1 scikit-learn>=0.19.1 -scipy>=1.1.0 +scipy>=1.4.0 seaborn>=0.9.0 matplotlib>=3.0.0,<=3.3.0 hyppo>=0.1.2 diff --git a/setup.py b/setup.py index 7378a02c1..8f7696ef3 100644 --- a/setup.py +++ b/setup.py @@ -15,7 +15,7 @@ "networkx>=2.1", "numpy>=1.8.1", "scikit-learn>=0.19.1", - "scipy>=1.1.0", + "scipy>=1.4.0", "seaborn>=0.9.0", "matplotlib>=3.0.0", "hyppo>=0.1.3",
quantopian__zipline-1625
conflicting CLI flags for `clean` Dear Zipline Maintainers, Before I tell you about my issue, let me describe my environment: # Environment * Operating System: `OS X 10.12.1` * Python Version: `2.7` * Python Bitness: `64` * How did you install Zipline: `conda` * Python packages: _default_ Now that you know a little about me, let me tell you about the issue I am having: # Description of Issue The CLI command `zipline clean` has conflicting flags for `-b, --bundle` and `-b, --before`. ``` (zipline-venv) pgeez$ zipline clean --help Usage: zipline clean [OPTIONS] Clean up data downloaded with the ingest command. Options: -b, --bundle BUNDLE-NAME The data bundle to clean. [default: quantopian- quandl] -b, --before TIMESTAMP Clear all data before TIMESTAMP. This may not be passed with -k / --keep-last -a, --after TIMESTAMP Clear all data after TIMESTAMP This may not be passed with -k / --keep-last -k, --keep-last N Clear all but the last N downloads. This may not be passed with -b / --before or -a / --after --help Show this message and exit. ``` * What happened instead? Because of the conflict, `-b` defaults to `--before`. Sincerely, pgeez
[ { "content": "import errno\nimport os\nfrom functools import wraps\n\nimport click\nimport logbook\nimport pandas as pd\nfrom six import text_type\n\nfrom zipline.data import bundles as bundles_module\nfrom zipline.utils.cli import Date, Timestamp\nfrom zipline.utils.run_algo import _run, load_extensions\n\ntry:\n __IPYTHON__\nexcept NameError:\n __IPYTHON__ = False\n\n\[email protected]()\[email protected](\n '-e',\n '--extension',\n multiple=True,\n help='File or module path to a zipline extension to load.',\n)\[email protected](\n '--strict-extensions/--non-strict-extensions',\n is_flag=True,\n help='If --strict-extensions is passed then zipline will not run if it'\n ' cannot load all of the specified extensions. If this is not passed or'\n ' --non-strict-extensions is passed then the failure will be logged but'\n ' execution will continue.',\n)\[email protected](\n '--default-extension/--no-default-extension',\n is_flag=True,\n default=True,\n help=\"Don't load the default zipline extension.py file in $ZIPLINE_HOME.\",\n)\ndef main(extension, strict_extensions, default_extension):\n \"\"\"Top level zipline entry point.\n \"\"\"\n # install a logbook handler before performing any other operations\n logbook.StderrHandler().push_application()\n load_extensions(\n default_extension,\n extension,\n strict_extensions,\n os.environ,\n )\n\n\ndef extract_option_object(option):\n \"\"\"Convert a click.option call into a click.Option object.\n\n Parameters\n ----------\n option : decorator\n A click.option decorator.\n\n Returns\n -------\n option_object : click.Option\n The option object that this decorator will create.\n \"\"\"\n @option\n def opt():\n pass\n\n return opt.__click_params__[0]\n\n\ndef ipython_only(option):\n \"\"\"Mark that an option should only be exposed in IPython.\n\n Parameters\n ----------\n option : decorator\n A click.option decorator.\n\n Returns\n -------\n ipython_only_dec : decorator\n A decorator that correctly applies the argument even when not\n using IPython mode.\n \"\"\"\n if __IPYTHON__:\n return option\n\n argname = extract_option_object(option).name\n\n def d(f):\n @wraps(f)\n def _(*args, **kwargs):\n kwargs[argname] = None\n return f(*args, **kwargs)\n return _\n return d\n\n\[email protected]()\[email protected](\n '-f',\n '--algofile',\n default=None,\n type=click.File('r'),\n help='The file that contains the algorithm to run.',\n)\[email protected](\n '-t',\n '--algotext',\n help='The algorithm script to run.',\n)\[email protected](\n '-D',\n '--define',\n multiple=True,\n help=\"Define a name to be bound in the namespace before executing\"\n \" the algotext. For example '-Dname=value'. The value may be any python\"\n \" expression. These are evaluated in order so they may refer to previously\"\n \" defined names.\",\n)\[email protected](\n '--data-frequency',\n type=click.Choice({'daily', 'minute'}),\n default='daily',\n show_default=True,\n help='The data frequency of the simulation.',\n)\[email protected](\n '--capital-base',\n type=float,\n default=10e6,\n show_default=True,\n help='The starting capital for the simulation.',\n)\[email protected](\n '-b',\n '--bundle',\n default='quantopian-quandl',\n metavar='BUNDLE-NAME',\n show_default=True,\n help='The data bundle to use for the simulation.',\n)\[email protected](\n '--bundle-timestamp',\n type=Timestamp(),\n default=pd.Timestamp.utcnow(),\n show_default=False,\n help='The date to lookup data on or before.\\n'\n '[default: <current-time>]'\n)\[email protected](\n '-s',\n '--start',\n type=Date(tz='utc', as_timestamp=True),\n help='The start date of the simulation.',\n)\[email protected](\n '-e',\n '--end',\n type=Date(tz='utc', as_timestamp=True),\n help='The end date of the simulation.',\n)\[email protected](\n '-o',\n '--output',\n default='-',\n metavar='FILENAME',\n show_default=True,\n help=\"The location to write the perf data. If this is '-' the perf will\"\n \" be written to stdout.\",\n)\[email protected](\n '--print-algo/--no-print-algo',\n is_flag=True,\n default=False,\n help='Print the algorithm to stdout.',\n)\n@ipython_only(click.option(\n '--local-namespace/--no-local-namespace',\n is_flag=True,\n default=None,\n help='Should the algorithm methods be resolved in the local namespace.'\n))\[email protected]_context\ndef run(ctx,\n algofile,\n algotext,\n define,\n data_frequency,\n capital_base,\n bundle,\n bundle_timestamp,\n start,\n end,\n output,\n print_algo,\n local_namespace):\n \"\"\"Run a backtest for the given algorithm.\n \"\"\"\n # check that the start and end dates are passed correctly\n if start is None and end is None:\n # check both at the same time to avoid the case where a user\n # does not pass either of these and then passes the first only\n # to be told they need to pass the second argument also\n ctx.fail(\n \"must specify dates with '-s' / '--start' and '-e' / '--end'\",\n )\n if start is None:\n ctx.fail(\"must specify a start date with '-s' / '--start'\")\n if end is None:\n ctx.fail(\"must specify an end date with '-e' / '--end'\")\n\n if (algotext is not None) == (algofile is not None):\n ctx.fail(\n \"must specify exactly one of '-f' / '--algofile' or\"\n \" '-t' / '--algotext'\",\n )\n\n perf = _run(\n initialize=None,\n handle_data=None,\n before_trading_start=None,\n analyze=None,\n algofile=algofile,\n algotext=algotext,\n defines=define,\n data_frequency=data_frequency,\n capital_base=capital_base,\n data=None,\n bundle=bundle,\n bundle_timestamp=bundle_timestamp,\n start=start,\n end=end,\n output=output,\n print_algo=print_algo,\n local_namespace=local_namespace,\n environ=os.environ,\n )\n\n if output == '-':\n click.echo(str(perf))\n elif output != os.devnull: # make the zipline magic not write any data\n perf.to_pickle(output)\n\n return perf\n\n\ndef zipline_magic(line, cell=None):\n \"\"\"The zipline IPython cell magic.\n \"\"\"\n load_extensions(\n default=True,\n extensions=[],\n strict=True,\n environ=os.environ,\n )\n try:\n return run.main(\n # put our overrides at the start of the parameter list so that\n # users may pass values with higher precedence\n [\n '--algotext', cell,\n '--output', os.devnull, # don't write the results by default\n ] + ([\n # these options are set when running in line magic mode\n # set a non None algo text to use the ipython user_ns\n '--algotext', '',\n '--local-namespace',\n ] if cell is None else []) + line.split(),\n '%s%%zipline' % ((cell or '') and '%'),\n # don't use system exit and propogate errors to the caller\n standalone_mode=False,\n )\n except SystemExit as e:\n # https://github.com/mitsuhiko/click/pull/533\n # even in standalone_mode=False `--help` really wants to kill us ;_;\n if e.code:\n raise ValueError('main returned non-zero status code: %d' % e.code)\n\n\[email protected]()\[email protected](\n '-b',\n '--bundle',\n default='quantopian-quandl',\n metavar='BUNDLE-NAME',\n show_default=True,\n help='The data bundle to ingest.',\n)\[email protected](\n '--assets-version',\n type=int,\n multiple=True,\n help='Version of the assets db to which to downgrade.',\n)\[email protected](\n '--show-progress/--no-show-progress',\n default=True,\n help='Print progress information to the terminal.'\n)\ndef ingest(bundle, assets_version, show_progress):\n \"\"\"Ingest the data for the given bundle.\n \"\"\"\n bundles_module.ingest(\n bundle,\n os.environ,\n pd.Timestamp.utcnow(),\n assets_version,\n show_progress,\n )\n\n\[email protected]()\[email protected](\n '-b',\n '--bundle',\n default='quantopian-quandl',\n metavar='BUNDLE-NAME',\n show_default=True,\n help='The data bundle to clean.',\n)\[email protected](\n '-b',\n '--before',\n type=Timestamp(),\n help='Clear all data before TIMESTAMP.'\n ' This may not be passed with -k / --keep-last',\n)\[email protected](\n '-a',\n '--after',\n type=Timestamp(),\n help='Clear all data after TIMESTAMP'\n ' This may not be passed with -k / --keep-last',\n)\[email protected](\n '-k',\n '--keep-last',\n type=int,\n metavar='N',\n help='Clear all but the last N downloads.'\n ' This may not be passed with -b / --before or -a / --after',\n)\ndef clean(bundle, before, after, keep_last):\n \"\"\"Clean up data downloaded with the ingest command.\n \"\"\"\n bundles_module.clean(\n bundle,\n before,\n after,\n keep_last,\n )\n\n\[email protected]()\ndef bundles():\n \"\"\"List all of the available data bundles.\n \"\"\"\n for bundle in sorted(bundles_module.bundles.keys()):\n if bundle.startswith('.'):\n # hide the test data\n continue\n try:\n ingestions = list(\n map(text_type, bundles_module.ingestions_for_bundle(bundle))\n )\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise\n ingestions = []\n\n # If we got no ingestions, either because the directory didn't exist or\n # because there were no entries, print a single message indicating that\n # no ingestions have yet been made.\n for timestamp in ingestions or [\"<no ingestions>\"]:\n click.echo(\"%s %s\" % (bundle, timestamp))\n\n\nif __name__ == '__main__':\n main()\n", "path": "zipline/__main__.py" } ]
[ { "content": "import errno\nimport os\nfrom functools import wraps\n\nimport click\nimport logbook\nimport pandas as pd\nfrom six import text_type\n\nfrom zipline.data import bundles as bundles_module\nfrom zipline.utils.cli import Date, Timestamp\nfrom zipline.utils.run_algo import _run, load_extensions\n\ntry:\n __IPYTHON__\nexcept NameError:\n __IPYTHON__ = False\n\n\[email protected]()\[email protected](\n '-e',\n '--extension',\n multiple=True,\n help='File or module path to a zipline extension to load.',\n)\[email protected](\n '--strict-extensions/--non-strict-extensions',\n is_flag=True,\n help='If --strict-extensions is passed then zipline will not run if it'\n ' cannot load all of the specified extensions. If this is not passed or'\n ' --non-strict-extensions is passed then the failure will be logged but'\n ' execution will continue.',\n)\[email protected](\n '--default-extension/--no-default-extension',\n is_flag=True,\n default=True,\n help=\"Don't load the default zipline extension.py file in $ZIPLINE_HOME.\",\n)\ndef main(extension, strict_extensions, default_extension):\n \"\"\"Top level zipline entry point.\n \"\"\"\n # install a logbook handler before performing any other operations\n logbook.StderrHandler().push_application()\n load_extensions(\n default_extension,\n extension,\n strict_extensions,\n os.environ,\n )\n\n\ndef extract_option_object(option):\n \"\"\"Convert a click.option call into a click.Option object.\n\n Parameters\n ----------\n option : decorator\n A click.option decorator.\n\n Returns\n -------\n option_object : click.Option\n The option object that this decorator will create.\n \"\"\"\n @option\n def opt():\n pass\n\n return opt.__click_params__[0]\n\n\ndef ipython_only(option):\n \"\"\"Mark that an option should only be exposed in IPython.\n\n Parameters\n ----------\n option : decorator\n A click.option decorator.\n\n Returns\n -------\n ipython_only_dec : decorator\n A decorator that correctly applies the argument even when not\n using IPython mode.\n \"\"\"\n if __IPYTHON__:\n return option\n\n argname = extract_option_object(option).name\n\n def d(f):\n @wraps(f)\n def _(*args, **kwargs):\n kwargs[argname] = None\n return f(*args, **kwargs)\n return _\n return d\n\n\[email protected]()\[email protected](\n '-f',\n '--algofile',\n default=None,\n type=click.File('r'),\n help='The file that contains the algorithm to run.',\n)\[email protected](\n '-t',\n '--algotext',\n help='The algorithm script to run.',\n)\[email protected](\n '-D',\n '--define',\n multiple=True,\n help=\"Define a name to be bound in the namespace before executing\"\n \" the algotext. For example '-Dname=value'. The value may be any python\"\n \" expression. These are evaluated in order so they may refer to previously\"\n \" defined names.\",\n)\[email protected](\n '--data-frequency',\n type=click.Choice({'daily', 'minute'}),\n default='daily',\n show_default=True,\n help='The data frequency of the simulation.',\n)\[email protected](\n '--capital-base',\n type=float,\n default=10e6,\n show_default=True,\n help='The starting capital for the simulation.',\n)\[email protected](\n '-b',\n '--bundle',\n default='quantopian-quandl',\n metavar='BUNDLE-NAME',\n show_default=True,\n help='The data bundle to use for the simulation.',\n)\[email protected](\n '--bundle-timestamp',\n type=Timestamp(),\n default=pd.Timestamp.utcnow(),\n show_default=False,\n help='The date to lookup data on or before.\\n'\n '[default: <current-time>]'\n)\[email protected](\n '-s',\n '--start',\n type=Date(tz='utc', as_timestamp=True),\n help='The start date of the simulation.',\n)\[email protected](\n '-e',\n '--end',\n type=Date(tz='utc', as_timestamp=True),\n help='The end date of the simulation.',\n)\[email protected](\n '-o',\n '--output',\n default='-',\n metavar='FILENAME',\n show_default=True,\n help=\"The location to write the perf data. If this is '-' the perf will\"\n \" be written to stdout.\",\n)\[email protected](\n '--print-algo/--no-print-algo',\n is_flag=True,\n default=False,\n help='Print the algorithm to stdout.',\n)\n@ipython_only(click.option(\n '--local-namespace/--no-local-namespace',\n is_flag=True,\n default=None,\n help='Should the algorithm methods be resolved in the local namespace.'\n))\[email protected]_context\ndef run(ctx,\n algofile,\n algotext,\n define,\n data_frequency,\n capital_base,\n bundle,\n bundle_timestamp,\n start,\n end,\n output,\n print_algo,\n local_namespace):\n \"\"\"Run a backtest for the given algorithm.\n \"\"\"\n # check that the start and end dates are passed correctly\n if start is None and end is None:\n # check both at the same time to avoid the case where a user\n # does not pass either of these and then passes the first only\n # to be told they need to pass the second argument also\n ctx.fail(\n \"must specify dates with '-s' / '--start' and '-e' / '--end'\",\n )\n if start is None:\n ctx.fail(\"must specify a start date with '-s' / '--start'\")\n if end is None:\n ctx.fail(\"must specify an end date with '-e' / '--end'\")\n\n if (algotext is not None) == (algofile is not None):\n ctx.fail(\n \"must specify exactly one of '-f' / '--algofile' or\"\n \" '-t' / '--algotext'\",\n )\n\n perf = _run(\n initialize=None,\n handle_data=None,\n before_trading_start=None,\n analyze=None,\n algofile=algofile,\n algotext=algotext,\n defines=define,\n data_frequency=data_frequency,\n capital_base=capital_base,\n data=None,\n bundle=bundle,\n bundle_timestamp=bundle_timestamp,\n start=start,\n end=end,\n output=output,\n print_algo=print_algo,\n local_namespace=local_namespace,\n environ=os.environ,\n )\n\n if output == '-':\n click.echo(str(perf))\n elif output != os.devnull: # make the zipline magic not write any data\n perf.to_pickle(output)\n\n return perf\n\n\ndef zipline_magic(line, cell=None):\n \"\"\"The zipline IPython cell magic.\n \"\"\"\n load_extensions(\n default=True,\n extensions=[],\n strict=True,\n environ=os.environ,\n )\n try:\n return run.main(\n # put our overrides at the start of the parameter list so that\n # users may pass values with higher precedence\n [\n '--algotext', cell,\n '--output', os.devnull, # don't write the results by default\n ] + ([\n # these options are set when running in line magic mode\n # set a non None algo text to use the ipython user_ns\n '--algotext', '',\n '--local-namespace',\n ] if cell is None else []) + line.split(),\n '%s%%zipline' % ((cell or '') and '%'),\n # don't use system exit and propogate errors to the caller\n standalone_mode=False,\n )\n except SystemExit as e:\n # https://github.com/mitsuhiko/click/pull/533\n # even in standalone_mode=False `--help` really wants to kill us ;_;\n if e.code:\n raise ValueError('main returned non-zero status code: %d' % e.code)\n\n\[email protected]()\[email protected](\n '-b',\n '--bundle',\n default='quantopian-quandl',\n metavar='BUNDLE-NAME',\n show_default=True,\n help='The data bundle to ingest.',\n)\[email protected](\n '--assets-version',\n type=int,\n multiple=True,\n help='Version of the assets db to which to downgrade.',\n)\[email protected](\n '--show-progress/--no-show-progress',\n default=True,\n help='Print progress information to the terminal.'\n)\ndef ingest(bundle, assets_version, show_progress):\n \"\"\"Ingest the data for the given bundle.\n \"\"\"\n bundles_module.ingest(\n bundle,\n os.environ,\n pd.Timestamp.utcnow(),\n assets_version,\n show_progress,\n )\n\n\[email protected]()\[email protected](\n '-b',\n '--bundle',\n default='quantopian-quandl',\n metavar='BUNDLE-NAME',\n show_default=True,\n help='The data bundle to clean.',\n)\[email protected](\n '-e',\n '--before',\n type=Timestamp(),\n help='Clear all data before TIMESTAMP.'\n ' This may not be passed with -k / --keep-last',\n)\[email protected](\n '-a',\n '--after',\n type=Timestamp(),\n help='Clear all data after TIMESTAMP'\n ' This may not be passed with -k / --keep-last',\n)\[email protected](\n '-k',\n '--keep-last',\n type=int,\n metavar='N',\n help='Clear all but the last N downloads.'\n ' This may not be passed with -b / --before or -a / --after',\n)\ndef clean(bundle, before, after, keep_last):\n \"\"\"Clean up data downloaded with the ingest command.\n \"\"\"\n bundles_module.clean(\n bundle,\n before,\n after,\n keep_last,\n )\n\n\[email protected]()\ndef bundles():\n \"\"\"List all of the available data bundles.\n \"\"\"\n for bundle in sorted(bundles_module.bundles.keys()):\n if bundle.startswith('.'):\n # hide the test data\n continue\n try:\n ingestions = list(\n map(text_type, bundles_module.ingestions_for_bundle(bundle))\n )\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise\n ingestions = []\n\n # If we got no ingestions, either because the directory didn't exist or\n # because there were no entries, print a single message indicating that\n # no ingestions have yet been made.\n for timestamp in ingestions or [\"<no ingestions>\"]:\n click.echo(\"%s %s\" % (bundle, timestamp))\n\n\nif __name__ == '__main__':\n main()\n", "path": "zipline/__main__.py" } ]
diff --git a/docs/source/whatsnew/1.0.3.txt b/docs/source/whatsnew/1.0.3.txt index fb0cac6f05..e21f02a7ac 100644 --- a/docs/source/whatsnew/1.0.3.txt +++ b/docs/source/whatsnew/1.0.3.txt @@ -56,4 +56,6 @@ None Miscellaneous ~~~~~~~~~~~~~ -None +* Changed the short-opt for ``--before`` in the ``zipline clean`` + entrypoint. The new argument is ``-e``. The old argument, ``-b``, conflicted + with the ``--bundle`` short-opt (:issue:`1625`). diff --git a/zipline/__main__.py b/zipline/__main__.py index 8e719690dc..b42a90ce52 100644 --- a/zipline/__main__.py +++ b/zipline/__main__.py @@ -323,7 +323,7 @@ def ingest(bundle, assets_version, show_progress): help='The data bundle to clean.', ) @click.option( - '-b', + '-e', '--before', type=Timestamp(), help='Clear all data before TIMESTAMP.'
safe-global__safe-config-service-76
Serve static files with Nginx When running the application with Nginx as reverse-proxy, static files (such as Admin CSS) are not correctly collected and served
[ { "content": "\"\"\"\nDjango settings for safe_client_config_service project.\n\nGenerated by 'django-admin startproject' using Django 3.2.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.2/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.2/ref/settings/\n\"\"\"\nimport os\nfrom distutils.util import strtobool\nfrom pathlib import Path\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.getenv(\"SECRET_KEY\", None)\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = bool(strtobool(os.getenv(\"DEBUG\", \"false\")))\n\n# https://docs.djangoproject.com/en/3.2/ref/settings/#std:setting-ALLOWED_HOSTS\nallowed_hosts = os.getenv(\"DJANGO_ALLOWED_HOSTS\", \".localhost,127.0.0.1,[::1]\")\nALLOWED_HOSTS = [allowed_host.strip() for allowed_host in allowed_hosts.split(\",\")]\n\n# Application definition\n\nREST_FRAMEWORK = {\n # https://www.django-rest-framework.org/api-guide/renderers/\n \"DEFAULT_RENDERER_CLASSES\": [\n \"djangorestframework_camel_case.render.CamelCaseJSONRenderer\",\n ]\n}\n\nINSTALLED_APPS = [\n \"safe_apps.apps.AppsConfig\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"rest_framework\",\n]\n\nMIDDLEWARE = [\n \"config.middleware.LoggingMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n },\n \"safe-apps\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n },\n}\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"short\": {\"format\": \"%(asctime)s %(message)s\"},\n \"verbose\": {\n \"format\": \"%(asctime)s [%(levelname)s] [%(processName)s] %(message)s\"\n },\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"console_short\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"short\",\n },\n },\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": os.getenv(\"ROOT_LOG_LEVEL\", \"INFO\"),\n },\n \"loggers\": {\n \"LoggingMiddleware\": {\n \"handlers\": [\"console_short\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n },\n}\n\nROOT_URLCONF = \"config.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": os.getenv(\"POSTGRES_NAME\", \"postgres\"),\n \"USER\": os.getenv(\"POSTGRES_USER\", \"postgres\"),\n \"PASSWORD\": os.getenv(\"POSTGRES_PASSWORD\", \"postgres\"),\n \"HOST\": os.getenv(\"POSTGRES_HOST\", \"db\"),\n \"PORT\": os.getenv(\"POSTGRES_PORT\", \"5432\"),\n }\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\n# Default primary key field type\n# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field\n\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n", "path": "src/config/settings.py" } ]
[ { "content": "\"\"\"\nDjango settings for safe_client_config_service project.\n\nGenerated by 'django-admin startproject' using Django 3.2.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.2/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.2/ref/settings/\n\"\"\"\nimport os\nfrom distutils.util import strtobool\nfrom pathlib import Path\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.getenv(\"SECRET_KEY\", None)\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = bool(strtobool(os.getenv(\"DEBUG\", \"false\")))\n\n# https://docs.djangoproject.com/en/3.2/ref/settings/#std:setting-ALLOWED_HOSTS\nallowed_hosts = os.getenv(\"DJANGO_ALLOWED_HOSTS\", \".localhost,127.0.0.1,[::1]\")\nALLOWED_HOSTS = [allowed_host.strip() for allowed_host in allowed_hosts.split(\",\")]\n\n# Application definition\n\nREST_FRAMEWORK = {\n # https://www.django-rest-framework.org/api-guide/renderers/\n \"DEFAULT_RENDERER_CLASSES\": [\n \"djangorestframework_camel_case.render.CamelCaseJSONRenderer\",\n ]\n}\n\nINSTALLED_APPS = [\n \"safe_apps.apps.AppsConfig\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"rest_framework\",\n]\n\nMIDDLEWARE = [\n \"config.middleware.LoggingMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n },\n \"safe-apps\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n },\n}\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"short\": {\"format\": \"%(asctime)s %(message)s\"},\n \"verbose\": {\n \"format\": \"%(asctime)s [%(levelname)s] [%(processName)s] %(message)s\"\n },\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"console_short\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"short\",\n },\n },\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": os.getenv(\"ROOT_LOG_LEVEL\", \"INFO\"),\n },\n \"loggers\": {\n \"LoggingMiddleware\": {\n \"handlers\": [\"console_short\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n },\n}\n\nROOT_URLCONF = \"config.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": os.getenv(\"POSTGRES_NAME\", \"postgres\"),\n \"USER\": os.getenv(\"POSTGRES_USER\", \"postgres\"),\n \"PASSWORD\": os.getenv(\"POSTGRES_PASSWORD\", \"postgres\"),\n \"HOST\": os.getenv(\"POSTGRES_HOST\", \"db\"),\n \"PORT\": os.getenv(\"POSTGRES_PORT\", \"5432\"),\n }\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\nSTATIC_ROOT = \"staticfiles\"\n\n# Default primary key field type\n# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field\n\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n", "path": "src/config/settings.py" } ]
diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh index b551593e..bacaa58b 100644 --- a/docker-entrypoint.sh +++ b/docker-entrypoint.sh @@ -2,6 +2,11 @@ set -euo pipefail +echo "==> $(date +%H:%M:%S) ==> Collecting static files..." +python src/manage.py collectstatic --noinput +rm -rf ${DOCKER_NGINX_VOLUME_ROOT}/* +cp -r staticfiles/ ${DOCKER_NGINX_VOLUME_ROOT}/ + echo "==> $(date +%H:%M:%S) ==> Migrating Django models..." python src/manage.py migrate --noinput diff --git a/src/config/settings.py b/src/config/settings.py index aa3da790..ece8cb88 100644 --- a/src/config/settings.py +++ b/src/config/settings.py @@ -171,6 +171,8 @@ STATIC_URL = "/static/" +STATIC_ROOT = "staticfiles" + # Default primary key field type # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
modin-project__modin-3440
Remove inheritance of Modin DMatrix from xgb.DMatrix Inheritance of Modin DMatrix from xgb.DMatrix doesn't include any benefits. Wrong documentation is provided to user using `help(modin.experimtenal.xgboost.DMatrix)` command.
[ { "content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"Module holds public interfaces for work Modin XGBoost.\"\"\"\n\nimport logging\nfrom typing import Dict, Optional\n\nimport xgboost as xgb\n\nfrom modin.config import Engine\nfrom modin.distributed.dataframe.pandas import unwrap_partitions\nimport modin.pandas as pd\n\nLOGGER = logging.getLogger(\"[modin.xgboost]\")\n\n\nclass DMatrix(xgb.DMatrix):\n \"\"\"\n DMatrix holds references to partitions of Modin DataFrame.\n\n On init stage unwrapping partitions of Modin DataFrame is started.\n\n Parameters\n ----------\n data : modin.pandas.DataFrame\n Data source of DMatrix.\n label : modin.pandas.DataFrame or modin.pandas.Series\n Labels used for training.\n\n Notes\n -----\n Currently DMatrix supports only `data` and `label` parameters.\n \"\"\"\n\n def __init__(self, data, label):\n assert isinstance(\n data, pd.DataFrame\n ), f\"Type of `data` is {type(data)}, but expected {pd.DataFrame}.\"\n assert isinstance(\n label, (pd.DataFrame, pd.Series)\n ), f\"Type of `data` is {type(label)}, but expected {pd.DataFrame} or {pd.Series}.\"\n\n self.data = unwrap_partitions(data, axis=0, get_ip=True)\n self.label = unwrap_partitions(label, axis=0)\n\n self.metadata = (\n data.index,\n data.columns,\n data._query_compiler._modin_frame._row_lengths,\n )\n\n def __iter__(self):\n \"\"\"\n Return unwrapped `self.data` and `self.label`.\n\n Yields\n ------\n list\n List of `self.data` with pairs of references to IP of row partition\n and row partition [(IP_ref0, partition_ref0), ..].\n list\n List of `self.label` with references to row partitions\n [partition_ref0, ..].\n \"\"\"\n yield self.data\n yield self.label\n\n\nclass Booster(xgb.Booster):\n \"\"\"\n A Modin Booster of XGBoost.\n\n Booster is the model of XGBoost, that contains low level routines for\n training, prediction and evaluation.\n\n Parameters\n ----------\n params : dict, optional\n Parameters for boosters.\n cache : list, default: empty\n List of cache items.\n model_file : string/os.PathLike/xgb.Booster/bytearray, optional\n Path to the model file if it's string or PathLike or xgb.Booster.\n \"\"\"\n\n def __init__(self, params=None, cache=(), model_file=None): # noqa: MD01\n super(Booster, self).__init__(params=params, cache=cache, model_file=model_file)\n\n def predict(\n self,\n data: DMatrix,\n **kwargs,\n ):\n \"\"\"\n Run distributed prediction with a trained booster.\n\n During execution it runs ``xgb.predict`` on each worker for subset of `data`\n and creates Modin DataFrame with prediction results.\n\n Parameters\n ----------\n data : modin.experimental.xgboost.DMatrix\n Input data used for prediction.\n **kwargs : dict\n Other parameters are the same as for ``xgboost.Booster.predict``.\n\n Returns\n -------\n modin.pandas.DataFrame\n Modin DataFrame with prediction results.\n \"\"\"\n LOGGER.info(\"Prediction started\")\n\n if Engine.get() == \"Ray\":\n from .xgboost_ray import _predict\n else:\n raise ValueError(\"Current version supports only Ray engine.\")\n\n assert isinstance(\n data, DMatrix\n ), f\"Type of `data` is {type(data)}, but expected {DMatrix}.\"\n\n result = _predict(self.copy(), data, **kwargs)\n LOGGER.info(\"Prediction finished\")\n\n return result\n\n\ndef train(\n params: Dict,\n dtrain: DMatrix,\n *args,\n evals=(),\n num_actors: Optional[int] = None,\n evals_result: Optional[Dict] = None,\n **kwargs,\n):\n \"\"\"\n Run distributed training of XGBoost model.\n\n During work it evenly distributes `dtrain` between workers according\n to IP addresses partitions (in case of not even distribution of `dtrain`\n over nodes, some partitions will be re-distributed between nodes),\n runs xgb.train on each worker for subset of `dtrain` and reduces training results\n of each worker using Rabit Context.\n\n Parameters\n ----------\n params : dict\n Booster params.\n dtrain : modin.experimental.xgboost.DMatrix\n Data to be trained against.\n *args : iterable\n Other parameters for `xgboost.train`.\n evals : list of pairs (modin.experimental.xgboost.DMatrix, str), default: empty\n List of validation sets for which metrics will evaluated during training.\n Validation metrics will help us track the performance of the model.\n num_actors : int, optional\n Number of actors for training. If unspecified, this value will be\n computed automatically.\n evals_result : dict, optional\n Dict to store evaluation results in.\n **kwargs : dict\n Other parameters are the same as `xgboost.train`.\n\n Returns\n -------\n modin.experimental.xgboost.Booster\n A trained booster.\n \"\"\"\n LOGGER.info(\"Training started\")\n\n if Engine.get() == \"Ray\":\n from .xgboost_ray import _train\n else:\n raise ValueError(\"Current version supports only Ray engine.\")\n\n assert isinstance(\n dtrain, DMatrix\n ), f\"Type of `dtrain` is {type(dtrain)}, but expected {DMatrix}.\"\n result = _train(dtrain, params, *args, num_actors=num_actors, evals=evals, **kwargs)\n if isinstance(evals_result, dict):\n evals_result.update(result[\"history\"])\n\n LOGGER.info(\"Training finished\")\n return Booster(model_file=result[\"booster\"])\n", "path": "modin/experimental/xgboost/xgboost.py" } ]
[ { "content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"Module holds public interfaces for work Modin XGBoost.\"\"\"\n\nimport logging\nfrom typing import Dict, Optional\n\nimport xgboost as xgb\n\nfrom modin.config import Engine\nfrom modin.distributed.dataframe.pandas import unwrap_partitions\nimport modin.pandas as pd\n\nLOGGER = logging.getLogger(\"[modin.xgboost]\")\n\n\nclass DMatrix:\n \"\"\"\n DMatrix holds references to partitions of Modin DataFrame.\n\n On init stage unwrapping partitions of Modin DataFrame is started.\n\n Parameters\n ----------\n data : modin.pandas.DataFrame\n Data source of DMatrix.\n label : modin.pandas.DataFrame or modin.pandas.Series\n Labels used for training.\n\n Notes\n -----\n Currently DMatrix supports only `data` and `label` parameters.\n \"\"\"\n\n def __init__(self, data, label):\n assert isinstance(\n data, pd.DataFrame\n ), f\"Type of `data` is {type(data)}, but expected {pd.DataFrame}.\"\n assert isinstance(\n label, (pd.DataFrame, pd.Series)\n ), f\"Type of `data` is {type(label)}, but expected {pd.DataFrame} or {pd.Series}.\"\n\n self.data = unwrap_partitions(data, axis=0, get_ip=True)\n self.label = unwrap_partitions(label, axis=0)\n\n self.metadata = (\n data.index,\n data.columns,\n data._query_compiler._modin_frame._row_lengths,\n )\n\n def __iter__(self):\n \"\"\"\n Return unwrapped `self.data` and `self.label`.\n\n Yields\n ------\n list\n List of `self.data` with pairs of references to IP of row partition\n and row partition [(IP_ref0, partition_ref0), ..].\n list\n List of `self.label` with references to row partitions\n [partition_ref0, ..].\n \"\"\"\n yield self.data\n yield self.label\n\n\nclass Booster(xgb.Booster):\n \"\"\"\n A Modin Booster of XGBoost.\n\n Booster is the model of XGBoost, that contains low level routines for\n training, prediction and evaluation.\n\n Parameters\n ----------\n params : dict, optional\n Parameters for boosters.\n cache : list, default: empty\n List of cache items.\n model_file : string/os.PathLike/xgb.Booster/bytearray, optional\n Path to the model file if it's string or PathLike or xgb.Booster.\n \"\"\"\n\n def __init__(self, params=None, cache=(), model_file=None): # noqa: MD01\n super(Booster, self).__init__(params=params, cache=cache, model_file=model_file)\n\n def predict(\n self,\n data: DMatrix,\n **kwargs,\n ):\n \"\"\"\n Run distributed prediction with a trained booster.\n\n During execution it runs ``xgb.predict`` on each worker for subset of `data`\n and creates Modin DataFrame with prediction results.\n\n Parameters\n ----------\n data : modin.experimental.xgboost.DMatrix\n Input data used for prediction.\n **kwargs : dict\n Other parameters are the same as for ``xgboost.Booster.predict``.\n\n Returns\n -------\n modin.pandas.DataFrame\n Modin DataFrame with prediction results.\n \"\"\"\n LOGGER.info(\"Prediction started\")\n\n if Engine.get() == \"Ray\":\n from .xgboost_ray import _predict\n else:\n raise ValueError(\"Current version supports only Ray engine.\")\n\n assert isinstance(\n data, DMatrix\n ), f\"Type of `data` is {type(data)}, but expected {DMatrix}.\"\n\n result = _predict(self.copy(), data, **kwargs)\n LOGGER.info(\"Prediction finished\")\n\n return result\n\n\ndef train(\n params: Dict,\n dtrain: DMatrix,\n *args,\n evals=(),\n num_actors: Optional[int] = None,\n evals_result: Optional[Dict] = None,\n **kwargs,\n):\n \"\"\"\n Run distributed training of XGBoost model.\n\n During work it evenly distributes `dtrain` between workers according\n to IP addresses partitions (in case of not even distribution of `dtrain`\n over nodes, some partitions will be re-distributed between nodes),\n runs xgb.train on each worker for subset of `dtrain` and reduces training results\n of each worker using Rabit Context.\n\n Parameters\n ----------\n params : dict\n Booster params.\n dtrain : modin.experimental.xgboost.DMatrix\n Data to be trained against.\n *args : iterable\n Other parameters for `xgboost.train`.\n evals : list of pairs (modin.experimental.xgboost.DMatrix, str), default: empty\n List of validation sets for which metrics will evaluated during training.\n Validation metrics will help us track the performance of the model.\n num_actors : int, optional\n Number of actors for training. If unspecified, this value will be\n computed automatically.\n evals_result : dict, optional\n Dict to store evaluation results in.\n **kwargs : dict\n Other parameters are the same as `xgboost.train`.\n\n Returns\n -------\n modin.experimental.xgboost.Booster\n A trained booster.\n \"\"\"\n LOGGER.info(\"Training started\")\n\n if Engine.get() == \"Ray\":\n from .xgboost_ray import _train\n else:\n raise ValueError(\"Current version supports only Ray engine.\")\n\n assert isinstance(\n dtrain, DMatrix\n ), f\"Type of `dtrain` is {type(dtrain)}, but expected {DMatrix}.\"\n result = _train(dtrain, params, *args, num_actors=num_actors, evals=evals, **kwargs)\n if isinstance(evals_result, dict):\n evals_result.update(result[\"history\"])\n\n LOGGER.info(\"Training finished\")\n return Booster(model_file=result[\"booster\"])\n", "path": "modin/experimental/xgboost/xgboost.py" } ]
diff --git a/modin/experimental/xgboost/xgboost.py b/modin/experimental/xgboost/xgboost.py index 4d569ce5158..769c68b1b39 100644 --- a/modin/experimental/xgboost/xgboost.py +++ b/modin/experimental/xgboost/xgboost.py @@ -25,7 +25,7 @@ LOGGER = logging.getLogger("[modin.xgboost]") -class DMatrix(xgb.DMatrix): +class DMatrix: """ DMatrix holds references to partitions of Modin DataFrame.
facebookresearch__habitat-lab-347
DD-PPO does not all reduce gradients ## 🐛 Bug DD-PPO does not all reduce gradients during the backward call, because `reducer.prepare_for_backward` is not being called during training process. The problem is in this line: https://github.com/facebookresearch/habitat-api/blob/v0.1.4/habitat_baselines/rl/ddppo/algo/ddppo.py#L96 ``` class DecentralizedDistributedMixin: ... def before_backward(self, loss): # ... self.reducer.prepare_for_backward(..) # Mixin goes second that way the PPO __init__ will still be called class DDPPO(PPO, DecentralizedDistributedMixin): # Here PPO and Mixin both have "before_backward" method, # DDPPO will call PPO's not the Mixin's. pass ``` And here is a quick fix: ``` class DecentralizedDistributedMixin: ... # Mixin goes second that way the PPO __init__ will still be called class DDPPO(PPO, DecentralizedDistributedMixin): # Move before_backward to DDPPO def before_backward(self, loss): # ... self.reducer.prepare_for_backward(..) ```
[ { "content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Tuple\n\nimport torch\nimport torch.distributed as distrib\n\nfrom habitat_baselines.common.rollout_storage import RolloutStorage\nfrom habitat_baselines.rl.ppo import PPO\n\nEPS_PPO = 1e-5\n\n\ndef distributed_mean_and_var(\n values: torch.Tensor,\n) -> Tuple[torch.Tensor, torch.Tensor]:\n r\"\"\"Computes the mean and variances of a tensor over multiple workers.\n\n This method is equivalent to first collecting all versions of values and\n then computing the mean and variance locally over that\n\n :param values: (*,) shaped tensors to compute mean and variance over. Assumed\n to be solely the workers local copy of this tensor,\n the resultant mean and variance will be computed\n over _all_ workers version of this tensor.\n \"\"\"\n assert distrib.is_initialized(), \"Distributed must be initialized\"\n\n world_size = distrib.get_world_size()\n mean = values.mean()\n distrib.all_reduce(mean)\n mean /= world_size\n\n sq_diff = (values - mean).pow(2).mean()\n distrib.all_reduce(sq_diff)\n var = sq_diff / world_size\n\n return mean, var\n\n\nclass DecentralizedDistributedMixin:\n def _get_advantages_distributed(\n self, rollouts: RolloutStorage\n ) -> torch.Tensor:\n advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1]\n if not self.use_normalized_advantage:\n return advantages\n\n mean, var = distributed_mean_and_var(advantages)\n\n return (advantages - mean) / (var.sqrt() + EPS_PPO)\n\n def init_distributed(self, find_unused_params: bool = True) -> None:\n r\"\"\"Initializes distributed training for the model\n\n 1. Broadcasts the model weights from world_rank 0 to all other workers\n 2. Adds gradient hooks to the model\n\n :param find_unused_params: Whether or not to filter out unused parameters\n before gradient reduction. This *must* be True if\n there are any parameters in the model that where unused in the\n forward pass, otherwise the gradient reduction\n will not work correctly.\n \"\"\"\n # NB: Used to hide the hooks from the nn.Module,\n # so they don't show up in the state_dict\n class Guard:\n def __init__(self, model, device):\n if torch.cuda.is_available():\n self.ddp = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[device], output_device=device\n )\n else:\n self.ddp = torch.nn.parallel.DistributedDataParallel(model)\n\n self._ddp_hooks = Guard(self.actor_critic, self.device)\n self.get_advantages = self._get_advantages_distributed\n\n self.reducer = self._ddp_hooks.ddp.reducer\n self.find_unused_params = find_unused_params\n\n def before_backward(self, loss):\n super().before_backward(loss)\n\n if self.find_unused_params:\n self.reducer.prepare_for_backward([loss])\n else:\n self.reducer.prepare_for_backward([])\n\n\n# Mixin goes second that way the PPO __init__ will still be called\nclass DDPPO(PPO, DecentralizedDistributedMixin):\n pass\n", "path": "habitat_baselines/rl/ddppo/algo/ddppo.py" } ]
[ { "content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Tuple\n\nimport torch\nimport torch.distributed as distrib\n\nfrom habitat_baselines.common.rollout_storage import RolloutStorage\nfrom habitat_baselines.rl.ppo import PPO\n\nEPS_PPO = 1e-5\n\n\ndef distributed_mean_and_var(\n values: torch.Tensor,\n) -> Tuple[torch.Tensor, torch.Tensor]:\n r\"\"\"Computes the mean and variances of a tensor over multiple workers.\n\n This method is equivalent to first collecting all versions of values and\n then computing the mean and variance locally over that\n\n :param values: (*,) shaped tensors to compute mean and variance over. Assumed\n to be solely the workers local copy of this tensor,\n the resultant mean and variance will be computed\n over _all_ workers version of this tensor.\n \"\"\"\n assert distrib.is_initialized(), \"Distributed must be initialized\"\n\n world_size = distrib.get_world_size()\n mean = values.mean()\n distrib.all_reduce(mean)\n mean /= world_size\n\n sq_diff = (values - mean).pow(2).mean()\n distrib.all_reduce(sq_diff)\n var = sq_diff / world_size\n\n return mean, var\n\n\nclass DecentralizedDistributedMixin:\n def _get_advantages_distributed(\n self, rollouts: RolloutStorage\n ) -> torch.Tensor:\n advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1]\n if not self.use_normalized_advantage:\n return advantages\n\n mean, var = distributed_mean_and_var(advantages)\n\n return (advantages - mean) / (var.sqrt() + EPS_PPO)\n\n def init_distributed(self, find_unused_params: bool = True) -> None:\n r\"\"\"Initializes distributed training for the model\n\n 1. Broadcasts the model weights from world_rank 0 to all other workers\n 2. Adds gradient hooks to the model\n\n :param find_unused_params: Whether or not to filter out unused parameters\n before gradient reduction. This *must* be True if\n there are any parameters in the model that where unused in the\n forward pass, otherwise the gradient reduction\n will not work correctly.\n \"\"\"\n # NB: Used to hide the hooks from the nn.Module,\n # so they don't show up in the state_dict\n class Guard:\n def __init__(self, model, device):\n if torch.cuda.is_available():\n self.ddp = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[device], output_device=device\n )\n else:\n self.ddp = torch.nn.parallel.DistributedDataParallel(model)\n\n self._ddp_hooks = Guard(self.actor_critic, self.device)\n self.get_advantages = self._get_advantages_distributed\n\n self.reducer = self._ddp_hooks.ddp.reducer\n self.find_unused_params = find_unused_params\n\n def before_backward(self, loss):\n super().before_backward(loss)\n\n if self.find_unused_params:\n self.reducer.prepare_for_backward([loss])\n else:\n self.reducer.prepare_for_backward([])\n\n\nclass DDPPO(DecentralizedDistributedMixin, PPO):\n pass\n", "path": "habitat_baselines/rl/ddppo/algo/ddppo.py" } ]
diff --git a/habitat_baselines/rl/ddppo/algo/ddppo.py b/habitat_baselines/rl/ddppo/algo/ddppo.py index 4da9e8d6c3..9eb0c581be 100644 --- a/habitat_baselines/rl/ddppo/algo/ddppo.py +++ b/habitat_baselines/rl/ddppo/algo/ddppo.py @@ -92,6 +92,5 @@ def before_backward(self, loss): self.reducer.prepare_for_backward([]) -# Mixin goes second that way the PPO __init__ will still be called -class DDPPO(PPO, DecentralizedDistributedMixin): +class DDPPO(DecentralizedDistributedMixin, PPO): pass
translate__pootle-5666
Silence dev checks in the admin UI as well There's the following section in the Pootle dev config: ``` Python # Silence the DEBUG check on dev servers SILENCED_SYSTEM_CHECKS = [ 'pootle.W004', # python-levenstein not installed 'pootle.W005', # DEBUG = True 'pootle.W010', # DEFAULT_FROM_EMAIL has default setting 'pootle.W011', # POOTLE_CONTACT_EMAIL has default setting ] ``` When running Pootle, I still get these notifications in the admin UI among others: > `/!\` DEBUG mode is on. Do not do this in production! Set DEBUG = False in Pootle settings > > `/!\` POOTLE_CONTACT_EMAIL is using the following default setting 'info@YOUR_DOMAIN.com'. POOTLE_CONTACT_EMAIL is the address that will receive messages sent by the contact form. > > `/!\` DEFAULT_FROM_EMAIL is using the following default setting 'webmaster@localhost'. DEFAULT_FROM_EMAIL is used in all outgoing Pootle email. Don't forget to review your mail server settings. I think it might make sense to silence them consistently not only in the console, but in the admin UI as well.
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport locale\nimport os\n\nfrom redis.exceptions import ConnectionError\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.cache import cache\nfrom django.shortcuts import render\n\nfrom django_rq.queues import get_failed_queue, get_queue\nfrom django_rq.workers import Worker\n\nfrom pootle.core.decorators import admin_required\nfrom pootle.i18n.gettext import ugettext as _, ungettext\nfrom pootle_statistics.models import Submission\nfrom pootle_store.models import Suggestion\n\n\ndef _format_numbers(numbers):\n for k in numbers.keys():\n formatted_number = locale.format(\"%d\", numbers[k], grouping=True)\n # Under Windows, formatted number must be converted to Unicode\n if os.name == 'nt':\n formatted_number = formatted_number.decode(\n locale.getpreferredencoding()\n )\n numbers[k] = formatted_number\n\n\ndef server_stats():\n User = get_user_model()\n result = cache.get(\"server_stats\")\n if result is None:\n result = {}\n result['user_count'] = max(User.objects.filter(\n is_active=True).count()-2, 0)\n # 'default' and 'nobody' might be counted\n # FIXME: the special users should not be retuned with is_active\n result['submission_count'] = Submission.objects.count()\n result['pending_count'] = Suggestion.objects.pending().count()\n cache.set(\"server_stats\", result, 86400)\n _format_numbers(result)\n return result\n\n\ndef rq_stats():\n queue = get_queue()\n failed_queue = get_failed_queue()\n try:\n workers = Worker.all(queue.connection)\n except ConnectionError:\n return None\n\n num_workers = len(workers)\n is_running = len(queue.connection.smembers(Worker.redis_workers_keys)) > 0\n if is_running:\n # Translators: this refers to the status of the background job worker\n status_msg = ungettext('Running (%d worker)', 'Running (%d workers)',\n num_workers) % num_workers\n else:\n # Translators: this refers to the status of the background job worker\n status_msg = _('Stopped')\n\n result = {\n 'job_count': queue.count,\n 'failed_job_count': failed_queue.count,\n 'is_running': is_running,\n 'status_msg': status_msg,\n }\n\n return result\n\n\ndef checks():\n from django.core.checks.registry import registry\n\n return registry.run_checks()\n\n\n@admin_required\ndef view(request):\n ctx = {\n 'page': 'admin-dashboard',\n 'server_stats': server_stats(),\n 'rq_stats': rq_stats(),\n 'checks': checks(),\n }\n return render(request, \"admin/dashboard.html\", ctx)\n", "path": "pootle/apps/pootle_app/views/admin/dashboard.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport locale\nimport os\n\nfrom redis.exceptions import ConnectionError\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.cache import cache\nfrom django.shortcuts import render\n\nfrom django_rq.queues import get_failed_queue, get_queue\nfrom django_rq.workers import Worker\n\nfrom pootle.core.decorators import admin_required\nfrom pootle.i18n.gettext import ugettext as _, ungettext\nfrom pootle_statistics.models import Submission\nfrom pootle_store.models import Suggestion\n\n\ndef _format_numbers(numbers):\n for k in numbers.keys():\n formatted_number = locale.format(\"%d\", numbers[k], grouping=True)\n # Under Windows, formatted number must be converted to Unicode\n if os.name == 'nt':\n formatted_number = formatted_number.decode(\n locale.getpreferredencoding()\n )\n numbers[k] = formatted_number\n\n\ndef server_stats():\n User = get_user_model()\n result = cache.get(\"server_stats\")\n if result is None:\n result = {}\n result['user_count'] = max(User.objects.filter(\n is_active=True).count()-2, 0)\n # 'default' and 'nobody' might be counted\n # FIXME: the special users should not be retuned with is_active\n result['submission_count'] = Submission.objects.count()\n result['pending_count'] = Suggestion.objects.pending().count()\n cache.set(\"server_stats\", result, 86400)\n _format_numbers(result)\n return result\n\n\ndef rq_stats():\n queue = get_queue()\n failed_queue = get_failed_queue()\n try:\n workers = Worker.all(queue.connection)\n except ConnectionError:\n return None\n\n num_workers = len(workers)\n is_running = len(queue.connection.smembers(Worker.redis_workers_keys)) > 0\n if is_running:\n # Translators: this refers to the status of the background job worker\n status_msg = ungettext('Running (%d worker)', 'Running (%d workers)',\n num_workers) % num_workers\n else:\n # Translators: this refers to the status of the background job worker\n status_msg = _('Stopped')\n\n result = {\n 'job_count': queue.count,\n 'failed_job_count': failed_queue.count,\n 'is_running': is_running,\n 'status_msg': status_msg,\n }\n\n return result\n\n\ndef checks():\n from django.core.checks.registry import registry\n\n return [e for e in registry.run_checks() if not e.is_silenced()]\n\n\n@admin_required\ndef view(request):\n ctx = {\n 'page': 'admin-dashboard',\n 'server_stats': server_stats(),\n 'rq_stats': rq_stats(),\n 'checks': checks(),\n }\n return render(request, \"admin/dashboard.html\", ctx)\n", "path": "pootle/apps/pootle_app/views/admin/dashboard.py" } ]
diff --git a/pootle/apps/pootle_app/views/admin/dashboard.py b/pootle/apps/pootle_app/views/admin/dashboard.py index e555c96df0d..7e501c6b23e 100644 --- a/pootle/apps/pootle_app/views/admin/dashboard.py +++ b/pootle/apps/pootle_app/views/admin/dashboard.py @@ -82,7 +82,7 @@ def rq_stats(): def checks(): from django.core.checks.registry import registry - return registry.run_checks() + return [e for e in registry.run_checks() if not e.is_silenced()] @admin_required
nilearn__nilearn-4306
Tests failing on main with pytest 8.1.0 See https://github.com/nilearn/nilearn/actions/runs/8136733065/job/22233621361 ``` test_plotting: install_deps> python -I -m pip install kaleido 'kaleido; platform_system != "Windows"' 'kaleido==0.1.0.post1; platform_system == "Windows"' 'matplotlib>=3.3.0' plotly .pkg: install_requires> python -I -m pip install hatch-vcs hatchling .pkg: _optional_hooks> python /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/pyproject_api/_backend.py True hatchling.build .pkg: get_requires_for_build_sdist> python /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/pyproject_api/_backend.py True hatchling.build .pkg: freeze> python -m pip freeze --all .pkg: editables==0.5,hatch-vcs==0.4.0,hatchling==1.21.1,packaging==23.2,pathspec==0.12.1,pip==24.0,pluggy==1.4.0,setuptools==69.1.1,setuptools-scm==8.0.4,trove-classifiers==2024.3.3,typing_extensions==4.10.0 .pkg: build_sdist> python /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/pyproject_api/_backend.py True hatchling.build test_plotting: install_package_deps> python -I -m pip install coverage 'joblib>=1.0.0' lxml 'nibabel>=4.0.0' 'numpy>=1.19.0' packaging 'pandas>=1.1.5' pytest-cov 'pytest>=6.0.0' 'requests>=2.25.0' 'scikit-learn>=1.0.0' 'scipy>=1.8.0' test_plotting: install_package> python -I -m pip install --force-reinstall --no-deps /Users/runner/work/nilearn/nilearn/.tox/.tmp/package/1/nilearn-0.1.dev1+gf91de22.tar.gz test_plotting: freeze> python -m pip freeze --all test_plotting: certifi==2024.2.2,charset-normalizer==3.3.2,contourpy==1.2.0,coverage==7.4.3,cycler==0.12.1,fonttools==4.49.0,idna==3.6,iniconfig==2.0.0,joblib==1.3.2,kaleido==0.2.1,kiwisolver==1.4.5,lxml==5.1.0,matplotlib==3.8.3,nibabel==5.2.1,nilearn @ file:///Users/runner/work/nilearn/nilearn/.tox/.tmp/package/1/nilearn-0.1.dev1%2Bgf91de22.tar.gz#sha256=b29f617bbb5d9aa3a94aa7518f1006aea9f52d58d945a8b82ed6951bffccb22e,numpy==1.26.4,packaging==23.2,pandas==2.2.1,pillow==10.2.0,pip==24.0,plotly==5.19.0,pluggy==1.4.0,pyparsing==3.1.1,pytest==8.1.0,pytest-cov==4.1.0,python-dateutil==2.9.0.post0,pytz==2024.1,requests==2.31.0,scikit-learn==1.4.1.post1,scipy==1.12.0,six==1.16.0,tenacity==8.2.3,threadpoolctl==3.3.0,tzdata==2024.1,urllib3==2.2.1 test_plotting: commands[0]> pytest --cov=nilearn --cov-report=xml nilearn ============================= test session starts ============================== platform darwin -- Python 3.12.1, pytest-8.1.0, pluggy-1.4.0 -- /Users/runner/work/nilearn/nilearn/.tox/test_plotting/bin/python cachedir: .tox/test_plotting/.pytest_cache rootdir: /Users/runner/work/nilearn/nilearn configfile: pyproject.toml plugins: cov-4.1.0 collecting ... collected 3118 items / 1 error ==================================== ERRORS ==================================== ______________________ ERROR collecting nilearn/externals ______________________ .tox/test_plotting/lib/python3.12/site-packages/pluggy/_manager.py:167: in register self._verify_hook(hook, hookimpl) hook = <HookCaller 'pytest_ignore_collect'> hookimpl = <HookImpl plugin_name='/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py', plugin=<module 'nilearn.externals.conftest' from '/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py'>> hookimpl_opts = {'hookwrapper': False, 'optionalhook': False, 'specname': None, 'tryfirst': False, 'trylast': False, 'wrapper': False} method = <function pytest_ignore_collect at 0x12f044680> name = 'pytest_ignore_collect' plugin = <module 'nilearn.externals.conftest' from '/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py'> plugin_name = '/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py' self = <_pytest.config.PytestPluginManager object at 0x10c395130> .tox/test_plotting/lib/python3.12/site-packages/pluggy/_manager.py:342: in _verify_hook raise PluginValidationError( E pluggy._manager.PluginValidationError: Plugin '/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py' for hook 'pytest_ignore_collect' E hookimpl definition: pytest_ignore_collect(path, config) E Argument(s) {'path'} are declared in the hookimpl but can not be found in the hookspec hook = <HookCaller 'pytest_ignore_collect'> hookimpl = <HookImpl plugin_name='/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py', plugin=<module 'nilearn.externals.conftest' from '/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py'>> notinspec = {'path'} self = <_pytest.config.PytestPluginManager object at 0x10c395130> =============================== warnings summary =============================== nilearn/input_data/__init__.py:23 /Users/runner/work/nilearn/nilearn/nilearn/input_data/__init__.py:23: DeprecationWarning: The import path 'nilearn.input_data' is deprecated in version 0.9. Importing from 'nilearn.input_data' will be possible at least until release 0.13.0. Please import from 'nilearn.maskers' instead. warnings.warn(message, DeprecationWarning) -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html ---------- coverage: platform darwin, python 3.12.1-final-0 ---------- Coverage XML written to file coverage.xml =========================== short test summary info ============================ ERROR nilearn/externals - pluggy._manager.PluginValidationError: Plugin '/Use... !!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!! ========================= 1 warning, 1 error in 17.43s ========================= test_plotting: exit 2 (22.26 seconds) /Users/runner/work/nilearn/nilearn> pytest --cov=nilearn --cov-report=xml nilearn pid=7328 .pkg: _exit> python /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/pyproject_api/_backend.py True hatchling.build test_plotting: FAIL code 2 (102.15=setup[79.89]+cmd[22.26] seconds) evaluation failed :( (102.25 seconds) ``` The tests run fine locally: - with pytest<8.1.0 - OR removing the conftest in nilearn/nilearn/externals/conftest.py (which suggest to ignore collecting tests, though there are no tests in that subfolder - EDIT: tests are in the __init__.py file).
[ { "content": "# Do not collect any tests in externals. This is more robust than using\n# --ignore because --ignore needs a path and it is not convenient to pass in\n# the externals path (very long install-dependent path in site-packages) when\n# using --pyargs\n\n\ndef pytest_ignore_collect(path, config):\n return True\n", "path": "nilearn/externals/conftest.py" } ]
[ { "content": null, "path": "nilearn/externals/conftest.py" } ]
diff --git a/nilearn/externals/conftest.py b/nilearn/externals/conftest.py deleted file mode 100644 index f3bb9d9e9a..0000000000 --- a/nilearn/externals/conftest.py +++ /dev/null @@ -1,8 +0,0 @@ -# Do not collect any tests in externals. This is more robust than using -# --ignore because --ignore needs a path and it is not convenient to pass in -# the externals path (very long install-dependent path in site-packages) when -# using --pyargs - - -def pytest_ignore_collect(path, config): - return True diff --git a/pyproject.toml b/pyproject.toml index b29619fe8d..3259956486 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -159,4 +159,5 @@ addopts = "-ra --strict-config --strict-markers --doctest-modules --showlocals - doctest_optionflags = "NORMALIZE_WHITESPACE ELLIPSIS" junit_family = "xunit2" minversion = "6.0" +norecursedirs = "tempita" xfail_strict = true
translate__pootle-5820
Adding a new languages forces you to specify special characters If you add a new language you are blocked with the UI insisting that you add special characters. ![screen shot 2017-01-16 at 16 01 52](https://cloud.githubusercontent.com/assets/647438/21990281/17dee6e4-dc06-11e6-910a-4b58c6c45192.png) The part in red is Afrikaans and say "This field is required" Not all languages require special characters. Also the model allows `blank=True` So somewhere we're blocking.
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport re\nimport urlparse\nfrom collections import OrderedDict\n\nfrom django import forms\nfrom django.contrib.auth import get_user_model\n\nfrom pootle.i18n.gettext import ugettext_lazy as _\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project\nfrom pootle_store.models import Store\n\n\nLANGCODE_RE = re.compile(\"^[a-z]{2,}([_-]([a-z]{2,}|[0-9]{3}))*(@[a-z0-9]+)?$\",\n re.IGNORECASE)\n\n\nclass LanguageForm(forms.ModelForm):\n\n specialchars = forms.CharField(strip=False)\n\n class Meta(object):\n model = Language\n fields = ('id', 'code', 'fullname', 'specialchars', 'nplurals',\n 'pluralequation',)\n\n def clean_code(self):\n if (not self.cleaned_data['code'] == 'templates' and\n not LANGCODE_RE.match(self.cleaned_data['code'])):\n raise forms.ValidationError(\n _('Language code does not follow the ISO convention')\n )\n\n return self.cleaned_data[\"code\"]\n\n def clean_specialchars(self):\n \"\"\"Ensures inputted characters are unique.\"\"\"\n chars = self.cleaned_data['specialchars']\n return u''.join(\n OrderedDict((char, None) for char in list(chars)).keys()\n )\n\n\nclass ProjectForm(forms.ModelForm):\n\n source_language = forms.ModelChoiceField(label=_('Source Language'),\n queryset=Language.objects.none())\n\n class Meta(object):\n model = Project\n fields = ('id', 'code', 'fullname', 'checkstyle',\n 'filetypes', 'treestyle', 'source_language', 'ignoredfiles',\n 'report_email', 'screenshot_search_prefix', 'disabled',)\n\n def __init__(self, *args, **kwargs):\n super(ProjectForm, self).__init__(*args, **kwargs)\n\n queryset = Language.objects.exclude(code='templates')\n self.fields['source_language'].queryset = queryset\n\n self.fields[\"filetypes\"].initial = [\n self.fields[\"filetypes\"].queryset.get(name=\"po\")]\n\n if self.instance.id:\n if (self.instance.treestyle != 'auto' and\n self.instance.translationproject_set.count() and\n self.instance.treestyle == self.instance._detect_treestyle()):\n self.fields['treestyle'].required = False\n\n def clean_filetypes(self):\n value = self.cleaned_data.get('filetypes', [])\n if not self.instance.pk:\n return value\n for filetype in self.instance.filetypes.all():\n if filetype not in value:\n has_stores = Store.objects.filter(\n translation_project__project=self.instance, filetype=filetype)\n if has_stores.exists():\n raise forms.ValidationError(\n _(\"You cannot remove a file type from a Project, \"\n \"if there are Stores of that file type ('%s')\"\n % filetype))\n return value\n\n def clean_fullname(self):\n return self.cleaned_data['fullname'].strip()\n\n def clean_treestyle(self):\n value = self.cleaned_data.get('treestyle', None)\n if not value:\n value = self.instance.treestyle\n return value\n\n def clean_code(self):\n return self.cleaned_data['code'].strip()\n\n\nclass UserForm(forms.ModelForm):\n\n password = forms.CharField(label=_('Password'), required=False,\n widget=forms.PasswordInput)\n\n class Meta(object):\n model = get_user_model()\n fields = ('id', 'username', 'is_active', 'full_name', 'email',\n 'is_superuser', 'twitter', 'linkedin', 'website', 'bio')\n\n def __init__(self, *args, **kwargs):\n super(UserForm, self).__init__(*args, **kwargs)\n\n # Require setting the password for new users\n if self.instance.pk is None:\n self.fields['password'].required = True\n\n def save(self, commit=True):\n password = self.cleaned_data['password']\n\n if password != '':\n user = super(UserForm, self).save(commit=False)\n user.set_password(password)\n\n if commit:\n user.save()\n else:\n user = super(UserForm, self).save(commit=commit)\n\n return user\n\n def clean_linkedin(self):\n url = self.cleaned_data['linkedin']\n if url != '':\n parsed = urlparse.urlparse(url)\n if 'linkedin.com' not in parsed.netloc or parsed.path == '/':\n raise forms.ValidationError(\n _('Please enter a valid LinkedIn user profile URL.')\n )\n\n return url\n", "path": "pootle/apps/pootle_app/forms.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport re\nimport urlparse\nfrom collections import OrderedDict\n\nfrom django import forms\nfrom django.contrib.auth import get_user_model\n\nfrom pootle.i18n.gettext import ugettext_lazy as _\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project\nfrom pootle_store.models import Store\n\n\nLANGCODE_RE = re.compile(\"^[a-z]{2,}([_-]([a-z]{2,}|[0-9]{3}))*(@[a-z0-9]+)?$\",\n re.IGNORECASE)\n\n\nclass LanguageForm(forms.ModelForm):\n\n specialchars = forms.CharField(strip=False, required=False)\n\n class Meta(object):\n model = Language\n fields = ('id', 'code', 'fullname', 'specialchars', 'nplurals',\n 'pluralequation',)\n\n def clean_code(self):\n if (not self.cleaned_data['code'] == 'templates' and\n not LANGCODE_RE.match(self.cleaned_data['code'])):\n raise forms.ValidationError(\n _('Language code does not follow the ISO convention')\n )\n\n return self.cleaned_data[\"code\"]\n\n def clean_specialchars(self):\n \"\"\"Ensures inputted characters are unique.\"\"\"\n chars = self.cleaned_data['specialchars']\n return u''.join(\n OrderedDict((char, None) for char in list(chars)).keys()\n )\n\n\nclass ProjectForm(forms.ModelForm):\n\n source_language = forms.ModelChoiceField(label=_('Source Language'),\n queryset=Language.objects.none())\n\n class Meta(object):\n model = Project\n fields = ('id', 'code', 'fullname', 'checkstyle',\n 'filetypes', 'treestyle', 'source_language', 'ignoredfiles',\n 'report_email', 'screenshot_search_prefix', 'disabled',)\n\n def __init__(self, *args, **kwargs):\n super(ProjectForm, self).__init__(*args, **kwargs)\n\n queryset = Language.objects.exclude(code='templates')\n self.fields['source_language'].queryset = queryset\n\n self.fields[\"filetypes\"].initial = [\n self.fields[\"filetypes\"].queryset.get(name=\"po\")]\n\n if self.instance.id:\n if (self.instance.treestyle != 'auto' and\n self.instance.translationproject_set.count() and\n self.instance.treestyle == self.instance._detect_treestyle()):\n self.fields['treestyle'].required = False\n\n def clean_filetypes(self):\n value = self.cleaned_data.get('filetypes', [])\n if not self.instance.pk:\n return value\n for filetype in self.instance.filetypes.all():\n if filetype not in value:\n has_stores = Store.objects.filter(\n translation_project__project=self.instance, filetype=filetype)\n if has_stores.exists():\n raise forms.ValidationError(\n _(\"You cannot remove a file type from a Project, \"\n \"if there are Stores of that file type ('%s')\"\n % filetype))\n return value\n\n def clean_fullname(self):\n return self.cleaned_data['fullname'].strip()\n\n def clean_treestyle(self):\n value = self.cleaned_data.get('treestyle', None)\n if not value:\n value = self.instance.treestyle\n return value\n\n def clean_code(self):\n return self.cleaned_data['code'].strip()\n\n\nclass UserForm(forms.ModelForm):\n\n password = forms.CharField(label=_('Password'), required=False,\n widget=forms.PasswordInput)\n\n class Meta(object):\n model = get_user_model()\n fields = ('id', 'username', 'is_active', 'full_name', 'email',\n 'is_superuser', 'twitter', 'linkedin', 'website', 'bio')\n\n def __init__(self, *args, **kwargs):\n super(UserForm, self).__init__(*args, **kwargs)\n\n # Require setting the password for new users\n if self.instance.pk is None:\n self.fields['password'].required = True\n\n def save(self, commit=True):\n password = self.cleaned_data['password']\n\n if password != '':\n user = super(UserForm, self).save(commit=False)\n user.set_password(password)\n\n if commit:\n user.save()\n else:\n user = super(UserForm, self).save(commit=commit)\n\n return user\n\n def clean_linkedin(self):\n url = self.cleaned_data['linkedin']\n if url != '':\n parsed = urlparse.urlparse(url)\n if 'linkedin.com' not in parsed.netloc or parsed.path == '/':\n raise forms.ValidationError(\n _('Please enter a valid LinkedIn user profile URL.')\n )\n\n return url\n", "path": "pootle/apps/pootle_app/forms.py" } ]
diff --git a/pootle/apps/pootle_app/forms.py b/pootle/apps/pootle_app/forms.py index 87f94710d57..f538ea19836 100644 --- a/pootle/apps/pootle_app/forms.py +++ b/pootle/apps/pootle_app/forms.py @@ -25,7 +25,7 @@ class LanguageForm(forms.ModelForm): - specialchars = forms.CharField(strip=False) + specialchars = forms.CharField(strip=False, required=False) class Meta(object): model = Language diff --git a/tests/forms/language.py b/tests/forms/language.py index ebc47795b27..5dcbd5dc4af 100644 --- a/tests/forms/language.py +++ b/tests/forms/language.py @@ -12,6 +12,7 @@ @pytest.mark.parametrize('specialchars', [ + ' ', ' abcde ', ' ab cd', ' abcde', @@ -52,3 +53,18 @@ def test_clean_specialchars_unique(specialchars, count_char): form = LanguageForm(form_data) assert form.is_valid() assert form.cleaned_data['specialchars'].count(count_char) == 1 + + [email protected]_db +def test_specialchars_can_be_blank(): + """Test that a blank special character field is valid.""" + form_data = { + 'code': 'foo', + 'fullname': 'Foo', + 'checkstyle': 'foo', + 'nplurals': '2', + 'specialchars': '', + } + form = LanguageForm(form_data) + assert form.is_valid() + assert form.cleaned_data['specialchars'] == ''
getmoto__moto-1801
Botocore sub-dependency mismatch Running `pipenv install moto` results in: ``` Warning: Your dependencies could not be resolved. You likely have a mismatch in your sub-dependencies. You can use $ pipenv install --skip-lock to bypass this mechanism, then run $ pipenv graph to inspect the situation. Hint: try $ pipenv lock --pre if it is a pre-release dependency. Could not find a version that matches botocore<1.11,<1.12.0,<2.0.0,==1.10.84,>=1.11.3,>=1.3.0,>=1.9.16 Tried: 0.4.1, 0.4.2, 0.5.0, 0.5.1, 0.5.2, 0.5.3, 0.5.4, 0.6.0, 0.7.0, 0.8.0, 0.8.1, 0.8.2, 0.8.3, 0.9.0, 0.9.1, 0.9.2, 0.10.0, 0.11.0, 0.12.0, 0.13.0, 0.13.1, 0.14.0, 0.15.0, 0.15.1, 0.16.0, 0.17.0, 0.18.0, 0.19.0, 0.20.0, 0.21.0, 0.22.0, 0.23.0, 0.24.0, 0.25.0, 0.26.0, 0.27.0, 0.28.0, 0.29.0, 0.30.0, 0.31.0, 0.32.0, 0.33.0, 0.34.0, 0.35.0, 0.36.0, 0.37.0, 0.38.0, 0.39.0, 0.40.0, 0.41.0, 0.42.0, 0.43.0, 0.44.0, 0.45.0, 0.46.0, 0.47.0, 0.48.0, 0.49.0, 0.50.0, 0.51.0, 0.52.0, 0.53.0, 0.54.0, 0.55.0, 0.56.0, 0.57.0, 0.58.0, 0.59.0, 0.60.0, 0.61.0, 0.62.0, 0.63.0, 0.64.0, 0.65.0, 0.66.0, 0.67.0, 0.68.0, 0.69.0, 0.70.0, 0.71.0, 0.72.0, 0.73.0, 0.74.0, 0.75.0, 0.76.0, 0.77.0, 0.78.0, 0.79.0, 0.80.0, 0.81.0, 0.82.0, 0.83.0, 0.84.0, 0.85.0, 0.86.0, 0.87.0, 0.88.0, 0.89.0, 0.90.0, 0.91.0, 0.92.0, 0.93.0, 0.94.0, 0.95.0, 0.96.0, 0.97.0, 0.98.0, 0.99.0, 0.100.0, 0.101.0, 0.102.0, 0.103.0, 0.104.0, 0.105.0, 0.106.0, 0.107.0, 0.108.0, 0.109.0, 1.0.0, 1.0.0, 1.0.1, 1.0.1, 1.1.0, 1.1.0, 1.1.1, 1.1.1, 1.1.2, 1.1.2, 1.1.3, 1.1.3, 1.1.4, 1.1.4, 1.1.5, 1.1.5, 1.1.6, 1.1.6, 1.1.7, 1.1.7, 1.1.8, 1.1.8, 1.1.9, 1.1.9, 1.1.10, 1.1.10, 1.1.11, 1.1.11, 1.1.12, 1.1.12, 1.2.0, 1.2.0, 1.2.1, 1.2.1, 1.2.2, 1.2.2, 1.2.3, 1.2.3, 1.2.4, 1.2.4, 1.2.5, 1.2.5, 1.2.6, 1.2.6, 1.2.7, 1.2.7, 1.2.8, 1.2.8, 1.2.9, 1.2.9, 1.2.10, 1.2.10, 1.2.11, 1.2.11, 1.3.0, 1.3.0, 1.3.1, 1.3.1, 1.3.2, 1.3.2, 1.3.3, 1.3.3, 1.3.4, 1.3.4, 1.3.5, 1.3.5, 1.3.6, 1.3.6, 1.3.7, 1.3.7, 1.3.8, 1.3.8, 1.3.9, 1.3.9, 1.3.10, 1.3.10, 1.3.11, 1.3.11, 1.3.12, 1.3.12, 1.3.13, 1.3.13, 1.3.14, 1.3.14, 1.3.15, 1.3.15, 1.3.16, 1.3.16, 1.3.17, 1.3.17, 1.3.18, 1.3.18, 1.3.19, 1.3.19, 1.3.20, 1.3.20, 1.3.21, 1.3.21, 1.3.22, 1.3.22, 1.3.23, 1.3.23, 1.3.24, 1.3.24, 1.3.25, 1.3.25, 1.3.26, 1.3.26, 1.3.27, 1.3.27, 1.3.28, 1.3.28, 1.3.29, 1.3.29, 1.3.30, 1.3.30, 1.4.0, 1.4.0, 1.4.1, 1.4.1, 1.4.2, 1.4.2, 1.4.3, 1.4.3, 1.4.4, 1.4.4, 1.4.5, 1.4.5, 1.4.6, 1.4.6, 1.4.7, 1.4.7, 1.4.8, 1.4.8, 1.4.9, 1.4.9, 1.4.10, 1.4.10, 1.4.11, 1.4.11, 1.4.12, 1.4.12, 1.4.13, 1.4.13, 1.4.14, 1.4.14, 1.4.15, 1.4.15, 1.4.16, 1.4.16, 1.4.17, 1.4.17, 1.4.18, 1.4.18, 1.4.19, 1.4.19, 1.4.20, 1.4.20, 1.4.21, 1.4.21, 1.4.22, 1.4.22, 1.4.23, 1.4.23, 1.4.24, 1.4.24, 1.4.25, 1.4.25, 1.4.26, 1.4.26, 1.4.27, 1.4.27, 1.4.28, 1.4.28, 1.4.29, 1.4.29, 1.4.30, 1.4.30, 1.4.31, 1.4.31, 1.4.32, 1.4.32, 1.4.33, 1.4.33, 1.4.34, 1.4.34, 1.4.35, 1.4.35, 1.4.36, 1.4.36, 1.4.37, 1.4.37, 1.4.38, 1.4.38, 1.4.39, 1.4.39, 1.4.40, 1.4.40, 1.4.41, 1.4.41, 1.4.42, 1.4.42, 1.4.43, 1.4.43, 1.4.44, 1.4.44, 1.4.46, 1.4.46, 1.4.47, 1.4.47, 1.4.48, 1.4.48, 1.4.49, 1.4.49, 1.4.50, 1.4.50, 1.4.51, 1.4.51, 1.4.52, 1.4.52, 1.4.53, 1.4.53, 1.4.54, 1.4.54, 1.4.55, 1.4.55, 1.4.56, 1.4.56, 1.4.57, 1.4.57, 1.4.58, 1.4.58, 1.4.59, 1.4.59, 1.4.60, 1.4.60, 1.4.61, 1.4.61, 1.4.62, 1.4.62, 1.4.63, 1.4.63, 1.4.64, 1.4.64, 1.4.65, 1.4.65, 1.4.66, 1.4.66, 1.4.67, 1.4.67, 1.4.68, 1.4.68, 1.4.69, 1.4.69, 1.4.70, 1.4.70, 1.4.71, 1.4.71, 1.4.72, 1.4.72, 1.4.73, 1.4.73, 1.4.74, 1.4.74, 1.4.75, 1.4.75, 1.4.76, 1.4.76, 1.4.77, 1.4.77, 1.4.78, 1.4.78, 1.4.79, 1.4.79, 1.4.80, 1.4.80, 1.4.81, 1.4.81, 1.4.82, 1.4.82, 1.4.83, 1.4.83, 1.4.84, 1.4.84, 1.4.85, 1.4.85, 1.4.86, 1.4.86, 1.4.87, 1.4.87, 1.4.88, 1.4.88, 1.4.89, 1.4.89, 1.4.90, 1.4.90, 1.4.91, 1.4.91, 1.4.92, 1.4.92, 1.4.93, 1.4.93, 1.5.0, 1.5.0, 1.5.1, 1.5.1, 1.5.2, 1.5.2, 1.5.3, 1.5.3, 1.5.4, 1.5.4, 1.5.5, 1.5.5, 1.5.6, 1.5.6, 1.5.7, 1.5.7, 1.5.8, 1.5.8, 1.5.9, 1.5.9, 1.5.10, 1.5.10, 1.5.11, 1.5.11, 1.5.12, 1.5.12, 1.5.13, 1.5.13, 1.5.14, 1.5.14, 1.5.15, 1.5.15, 1.5.16, 1.5.16, 1.5.17, 1.5.17, 1.5.18, 1.5.18, 1.5.19, 1.5.19, 1.5.20, 1.5.20, 1.5.21, 1.5.21, 1.5.22, 1.5.22, 1.5.23, 1.5.23, 1.5.24, 1.5.24, 1.5.25, 1.5.25, 1.5.26, 1.5.26, 1.5.27, 1.5.27, 1.5.28, 1.5.28, 1.5.29, 1.5.29, 1.5.30, 1.5.30, 1.5.31, 1.5.31, 1.5.32, 1.5.32, 1.5.33, 1.5.33, 1.5.34, 1.5.34, 1.5.35, 1.5.35, 1.5.36, 1.5.36, 1.5.37, 1.5.37, 1.5.38, 1.5.38, 1.5.39, 1.5.39, 1.5.40, 1.5.40, 1.5.41, 1.5.41, 1.5.42, 1.5.42, 1.5.43, 1.5.43, 1.5.44, 1.5.44, 1.5.45, 1.5.45, 1.5.46, 1.5.46, 1.5.47, 1.5.47, 1.5.48, 1.5.48, 1.5.49, 1.5.49, 1.5.50, 1.5.50, 1.5.51, 1.5.51, 1.5.52, 1.5.52, 1.5.53, 1.5.53, 1.5.54, 1.5.54, 1.5.55, 1.5.55, 1.5.56, 1.5.56, 1.5.57, 1.5.57, 1.5.58, 1.5.58, 1.5.59, 1.5.59, 1.5.60, 1.5.60, 1.5.61, 1.5.61, 1.5.62, 1.5.62, 1.5.63, 1.5.63, 1.5.64, 1.5.64, 1.5.65, 1.5.65, 1.5.66, 1.5.66, 1.5.67, 1.5.67, 1.5.68, 1.5.68, 1.5.69, 1.5.69, 1.5.70, 1.5.70, 1.5.71, 1.5.71, 1.5.72, 1.5.72, 1.5.73, 1.5.73, 1.5.74, 1.5.74, 1.5.75, 1.5.75, 1.5.76, 1.5.76, 1.5.77, 1.5.77, 1.5.78, 1.5.78, 1.5.79, 1.5.79, 1.5.80, 1.5.80, 1.5.81, 1.5.81, 1.5.82, 1.5.82, 1.5.83, 1.5.83, 1.5.84, 1.5.84, 1.5.85, 1.5.85, 1.5.86, 1.5.86, 1.5.87, 1.5.87, 1.5.88, 1.5.88, 1.5.89, 1.5.89, 1.5.90, 1.5.90, 1.5.91, 1.5.91, 1.5.92, 1.5.92, 1.5.93, 1.5.93, 1.5.94, 1.5.94, 1.5.95, 1.5.95, 1.6.0, 1.6.0, 1.6.1, 1.6.1, 1.6.2, 1.6.2, 1.6.3, 1.6.3, 1.6.4, 1.6.4, 1.6.5, 1.6.5, 1.6.6, 1.6.6, 1.6.7, 1.6.7, 1.6.8, 1.6.8, 1.7.0, 1.7.0, 1.7.1, 1.7.1, 1.7.2, 1.7.2, 1.7.3, 1.7.3, 1.7.4, 1.7.4, 1.7.5, 1.7.5, 1.7.6, 1.7.6, 1.7.7, 1.7.7, 1.7.8, 1.7.8, 1.7.9, 1.7.9, 1.7.10, 1.7.10, 1.7.11, 1.7.11, 1.7.12, 1.7.12, 1.7.13, 1.7.13, 1.7.14, 1.7.14, 1.7.15, 1.7.15, 1.7.16, 1.7.16, 1.7.17, 1.7.17, 1.7.18, 1.7.18, 1.7.19, 1.7.19, 1.7.20, 1.7.20, 1.7.21, 1.7.21, 1.7.22, 1.7.22, 1.7.23, 1.7.23, 1.7.24, 1.7.24, 1.7.25, 1.7.25, 1.7.26, 1.7.26, 1.7.27, 1.7.27, 1.7.28, 1.7.28, 1.7.29, 1.7.29, 1.7.30, 1.7.30, 1.7.31, 1.7.31, 1.7.32, 1.7.32, 1.7.33, 1.7.33, 1.7.34, 1.7.34, 1.7.35, 1.7.35, 1.7.36, 1.7.36, 1.7.37, 1.7.37, 1.7.38, 1.7.38, 1.7.39, 1.7.39, 1.7.40, 1.7.40, 1.7.41, 1.7.41, 1.7.42, 1.7.42, 1.7.43, 1.7.43, 1.7.44, 1.7.44, 1.7.45, 1.7.45, 1.7.46, 1.7.46, 1.7.47, 1.7.47, 1.7.48, 1.7.48, 1.8.0, 1.8.0, 1.8.1, 1.8.1, 1.8.2, 1.8.2, 1.8.3, 1.8.3, 1.8.4, 1.8.4, 1.8.5, 1.8.5, 1.8.6, 1.8.6, 1.8.7, 1.8.7, 1.8.8, 1.8.8, 1.8.9, 1.8.9, 1.8.10, 1.8.10, 1.8.11, 1.8.11, 1.8.12, 1.8.12, 1.8.13, 1.8.13, 1.8.14, 1.8.14, 1.8.15, 1.8.15, 1.8.16, 1.8.16, 1.8.17, 1.8.17, 1.8.18, 1.8.18, 1.8.19, 1.8.19, 1.8.20, 1.8.20, 1.8.21, 1.8.21, 1.8.22, 1.8.22, 1.8.23, 1.8.23, 1.8.24, 1.8.24, 1.8.25, 1.8.25, 1.8.26, 1.8.26, 1.8.27, 1.8.27, 1.8.28, 1.8.28, 1.8.29, 1.8.29, 1.8.30, 1.8.30, 1.8.31, 1.8.31, 1.8.32, 1.8.32, 1.8.33, 1.8.33, 1.8.34, 1.8.34, 1.8.35, 1.8.35, 1.8.36, 1.8.36, 1.8.37, 1.8.37, 1.8.38, 1.8.38, 1.8.39, 1.8.39, 1.8.40, 1.8.40, 1.8.41, 1.8.41, 1.8.42, 1.8.42, 1.8.43, 1.8.43, 1.8.44, 1.8.44, 1.8.45, 1.8.45, 1.8.46, 1.8.46, 1.8.47, 1.8.47, 1.8.48, 1.8.48, 1.8.49, 1.8.49, 1.8.50, 1.8.50, 1.9.0, 1.9.0, 1.9.1, 1.9.1, 1.9.2, 1.9.2, 1.9.3, 1.9.3, 1.9.4, 1.9.4, 1.9.5, 1.9.5, 1.9.6, 1.9.6, 1.9.7, 1.9.7, 1.9.8, 1.9.8, 1.9.9, 1.9.9, 1.9.10, 1.9.10, 1.9.11, 1.9.11, 1.9.12, 1.9.12, 1.9.13, 1.9.13, 1.9.14, 1.9.14, 1.9.15, 1.9.15, 1.9.16, 1.9.16, 1.9.17, 1.9.17, 1.9.18, 1.9.18, 1.9.19, 1.9.19, 1.9.20, 1.9.20, 1.9.21, 1.9.21, 1.9.22, 1.9.22, 1.9.23, 1.9.23, 1.10.0, 1.10.0, 1.10.1, 1.10.1, 1.10.2, 1.10.2, 1.10.3, 1.10.3, 1.10.4, 1.10.4, 1.10.5, 1.10.5, 1.10.6, 1.10.6, 1.10.7, 1.10.7, 1.10.8, 1.10.8, 1.10.9, 1.10.9, 1.10.10, 1.10.10, 1.10.11, 1.10.11, 1.10.12, 1.10.12, 1.10.13, 1.10.13, 1.10.14, 1.10.14, 1.10.15, 1.10.15, 1.10.16, 1.10.16, 1.10.17, 1.10.17, 1.10.18, 1.10.18, 1.10.19, 1.10.19, 1.10.20, 1.10.20, 1.10.21, 1.10.21, 1.10.22, 1.10.22, 1.10.23, 1.10.23, 1.10.24, 1.10.24, 1.10.25, 1.10.25, 1.10.26, 1.10.26, 1.10.27, 1.10.27, 1.10.28, 1.10.28, 1.10.29, 1.10.29, 1.10.30, 1.10.30, 1.10.31, 1.10.31, 1.10.32, 1.10.32, 1.10.33, 1.10.33, 1.10.34, 1.10.34, 1.10.35, 1.10.35, 1.10.36, 1.10.36, 1.10.37, 1.10.37, 1.10.38, 1.10.38, 1.10.39, 1.10.39, 1.10.40, 1.10.40, 1.10.41, 1.10.41, 1.10.42, 1.10.42, 1.10.43, 1.10.43, 1.10.44, 1.10.44, 1.10.45, 1.10.45, 1.10.46, 1.10.46, 1.10.47, 1.10.47, 1.10.48, 1.10.48, 1.10.49, 1.10.49, 1.10.50, 1.10.50, 1.10.51, 1.10.51, 1.10.52, 1.10.52, 1.10.53, 1.10.53, 1.10.54, 1.10.54, 1.10.55, 1.10.55, 1.10.56, 1.10.56, 1.10.57, 1.10.57, 1.10.58, 1.10.58, 1.10.59, 1.10.59, 1.10.60, 1.10.60, 1.10.61, 1.10.61, 1.10.62, 1.10.62, 1.10.63, 1.10.63, 1.10.64, 1.10.64, 1.10.65, 1.10.65, 1.10.66, 1.10.66, 1.10.67, 1.10.67, 1.10.68, 1.10.68, 1.10.69, 1.10.69, 1.10.70, 1.10.70, 1.10.71, 1.10.71, 1.10.72, 1.10.72, 1.10.73, 1.10.73, 1.10.74, 1.10.74, 1.10.75, 1.10.75, 1.10.76, 1.10.76, 1.10.77, 1.10.77, 1.10.78, 1.10.78, 1.10.79, 1.10.79, 1.10.80, 1.10.80, 1.10.81, 1.10.81, 1.10.82, 1.10.82, 1.10.83, 1.10.83, 1.10.84, 1.10.84, 1.11.0, 1.11.0, 1.11.1, 1.11.1, 1.11.2, 1.11.2, 1.11.3, 1.11.3 There are incompatible versions in the resolved dependencies. ``` This is due to the fact that [this line in moto](https://github.com/spulec/moto/blob/master/setup.py#L12) requires less than version 1.11 of botocore, and [this line in boto3](https://github.com/boto/boto3/blob/develop/setup.py#L17) requires greater than or equal to 1.11.3. This is installing moto 1.3.5, the expectation is no warning for mismatches in dependencies.
[ { "content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nimport setuptools\nfrom setuptools import setup, find_packages\nimport sys\n\n\ninstall_requires = [\n \"Jinja2>=2.7.3\",\n \"boto>=2.36.0\",\n \"boto3>=1.6.16\",\n \"botocore>=1.9.16,<1.11\",\n \"cookies\",\n \"cryptography>=2.0.0\",\n \"requests>=2.5\",\n \"xmltodict\",\n \"six>1.9\",\n \"werkzeug\",\n \"pyaml\",\n \"pytz\",\n \"python-dateutil<3.0.0,>=2.1\",\n \"python-jose<3.0.0\",\n \"mock\",\n \"docker>=2.5.1\",\n \"jsondiff==1.1.1\",\n \"aws-xray-sdk<0.96,>=0.93\",\n \"responses>=0.9.0\",\n]\n\nextras_require = {\n 'server': ['flask'],\n}\n\n# https://hynek.me/articles/conditional-python-dependencies/\nif int(setuptools.__version__.split(\".\", 1)[0]) < 18:\n if sys.version_info[0:2] < (3, 3):\n install_requires.append(\"backports.tempfile\")\nelse:\n extras_require[\":python_version<'3.3'\"] = [\"backports.tempfile\"]\n\n\nsetup(\n name='moto',\n version='1.3.5',\n description='A library that allows your python tests to easily'\n ' mock out the boto library',\n author='Steve Pulec',\n author_email='[email protected]',\n url='https://github.com/spulec/moto',\n entry_points={\n 'console_scripts': [\n 'moto_server = moto.server:main',\n ],\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n extras_require=extras_require,\n include_package_data=True,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Testing\",\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nimport setuptools\nfrom setuptools import setup, find_packages\nimport sys\n\n\ninstall_requires = [\n \"Jinja2>=2.7.3\",\n \"boto>=2.36.0\",\n \"boto3>=1.6.16,<1.8\",\n \"botocore>=1.9.16,<1.11\",\n \"cookies\",\n \"cryptography>=2.0.0\",\n \"requests>=2.5\",\n \"xmltodict\",\n \"six>1.9\",\n \"werkzeug\",\n \"pyaml\",\n \"pytz\",\n \"python-dateutil<3.0.0,>=2.1\",\n \"python-jose<3.0.0\",\n \"mock\",\n \"docker>=2.5.1\",\n \"jsondiff==1.1.1\",\n \"aws-xray-sdk<0.96,>=0.93\",\n \"responses>=0.9.0\",\n]\n\nextras_require = {\n 'server': ['flask'],\n}\n\n# https://hynek.me/articles/conditional-python-dependencies/\nif int(setuptools.__version__.split(\".\", 1)[0]) < 18:\n if sys.version_info[0:2] < (3, 3):\n install_requires.append(\"backports.tempfile\")\nelse:\n extras_require[\":python_version<'3.3'\"] = [\"backports.tempfile\"]\n\n\nsetup(\n name='moto',\n version='1.3.5',\n description='A library that allows your python tests to easily'\n ' mock out the boto library',\n author='Steve Pulec',\n author_email='[email protected]',\n url='https://github.com/spulec/moto',\n entry_points={\n 'console_scripts': [\n 'moto_server = moto.server:main',\n ],\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n extras_require=extras_require,\n include_package_data=True,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Testing\",\n ],\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 16aaf145294f..bcb48a967142 100755 --- a/setup.py +++ b/setup.py @@ -8,7 +8,7 @@ install_requires = [ "Jinja2>=2.7.3", "boto>=2.36.0", - "boto3>=1.6.16", + "boto3>=1.6.16,<1.8", "botocore>=1.9.16,<1.11", "cookies", "cryptography>=2.0.0",
geopandas__geopandas-372
bbox filter from read_file doesn't take advantage of fiona filtering In line: https://github.com/geopandas/geopandas/blob/master/geopandas/io/file.py#L28 The function goes through the trouble of checking if `bbox` is not null, but just calls `f` in `from_features` just the same. Line 28 just needs to be changed to the intended `f_filt` to return filtered results or non-filtered if no bbox is passed in.
[ { "content": "import os\n\nimport fiona\nimport numpy as np\nfrom shapely.geometry import mapping\n\nfrom six import iteritems\nfrom geopandas import GeoDataFrame\n\n\ndef read_file(filename, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file.\n\n *filename* is either the absolute or relative path to the file to be\n opened and *kwargs* are keyword args to be passed to the `open` method\n in the fiona library when opening the file. For more information on \n possible keywords, type: ``import fiona; help(fiona.open)``\n \"\"\"\n bbox = kwargs.pop('bbox', None)\n with fiona.open(filename, **kwargs) as f:\n crs = f.crs\n if bbox is not None:\n assert len(bbox)==4\n f_filt = f.filter(bbox=bbox)\n else:\n f_filt = f\n gdf = GeoDataFrame.from_features(f, crs=crs)\n\n return gdf\n\n\ndef to_file(df, filename, driver=\"ESRI Shapefile\", schema=None,\n **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n\n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n df : GeoDataFrame to be written\n filename : string\n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written. If None, GeoPandas\n will determine the schema based on each column's dtype\n\n The *kwargs* are passed to fiona.open and can be used to write\n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n if schema is None:\n schema = infer_schema(df)\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\n schema=schema, **kwargs) as c:\n for feature in df.iterfeatures():\n c.write(feature)\n\n\ndef infer_schema(df):\n try:\n from collections import OrderedDict\n except ImportError:\n from ordereddict import OrderedDict\n\n def convert_type(in_type):\n if in_type == object:\n return 'str'\n out_type = type(np.asscalar(np.zeros(1, in_type))).__name__\n if out_type == 'long':\n out_type = 'int'\n return out_type\n\n properties = OrderedDict([\n (col, convert_type(_type)) for col, _type in\n zip(df.columns, df.dtypes) if col != df._geometry_column_name\n ])\n\n geom_type = _common_geom_type(df)\n if not geom_type:\n raise ValueError(\"Geometry column cannot contain mutiple \"\n \"geometry types when writing to file.\")\n\n schema = {'geometry': geom_type, 'properties': properties}\n\n return schema\n\n\ndef _common_geom_type(df):\n # Need to check geom_types before we write to file...\n # Some (most?) providers expect a single geometry type:\n # Point, LineString, or Polygon\n geom_types = df.geometry.geom_type.unique()\n\n from os.path import commonprefix # To find longest common prefix\n geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1] # Reverse\n if not geom_type:\n geom_type = None\n\n return geom_type\n", "path": "geopandas/io/file.py" } ]
[ { "content": "import os\n\nimport fiona\nimport numpy as np\nfrom shapely.geometry import mapping\n\nfrom six import iteritems\nfrom geopandas import GeoDataFrame\n\n\ndef read_file(filename, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file.\n\n *filename* is either the absolute or relative path to the file to be\n opened and *kwargs* are keyword args to be passed to the `open` method\n in the fiona library when opening the file. For more information on \n possible keywords, type: ``import fiona; help(fiona.open)``\n \"\"\"\n bbox = kwargs.pop('bbox', None)\n with fiona.open(filename, **kwargs) as f:\n crs = f.crs\n if bbox is not None:\n assert len(bbox)==4\n f_filt = f.filter(bbox=bbox)\n else:\n f_filt = f\n gdf = GeoDataFrame.from_features(f_filt, crs=crs)\n\n return gdf\n\n\ndef to_file(df, filename, driver=\"ESRI Shapefile\", schema=None,\n **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n\n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n df : GeoDataFrame to be written\n filename : string\n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written. If None, GeoPandas\n will determine the schema based on each column's dtype\n\n The *kwargs* are passed to fiona.open and can be used to write\n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n if schema is None:\n schema = infer_schema(df)\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\n schema=schema, **kwargs) as c:\n for feature in df.iterfeatures():\n c.write(feature)\n\n\ndef infer_schema(df):\n try:\n from collections import OrderedDict\n except ImportError:\n from ordereddict import OrderedDict\n\n def convert_type(in_type):\n if in_type == object:\n return 'str'\n out_type = type(np.asscalar(np.zeros(1, in_type))).__name__\n if out_type == 'long':\n out_type = 'int'\n return out_type\n\n properties = OrderedDict([\n (col, convert_type(_type)) for col, _type in\n zip(df.columns, df.dtypes) if col != df._geometry_column_name\n ])\n\n geom_type = _common_geom_type(df)\n if not geom_type:\n raise ValueError(\"Geometry column cannot contain mutiple \"\n \"geometry types when writing to file.\")\n\n schema = {'geometry': geom_type, 'properties': properties}\n\n return schema\n\n\ndef _common_geom_type(df):\n # Need to check geom_types before we write to file...\n # Some (most?) providers expect a single geometry type:\n # Point, LineString, or Polygon\n geom_types = df.geometry.geom_type.unique()\n\n from os.path import commonprefix # To find longest common prefix\n geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1] # Reverse\n if not geom_type:\n geom_type = None\n\n return geom_type\n", "path": "geopandas/io/file.py" } ]
diff --git a/geopandas/io/file.py b/geopandas/io/file.py index d407615347..1ac2f88635 100644 --- a/geopandas/io/file.py +++ b/geopandas/io/file.py @@ -25,7 +25,7 @@ def read_file(filename, **kwargs): f_filt = f.filter(bbox=bbox) else: f_filt = f - gdf = GeoDataFrame.from_features(f, crs=crs) + gdf = GeoDataFrame.from_features(f_filt, crs=crs) return gdf diff --git a/geopandas/io/tests/test_io.py b/geopandas/io/tests/test_io.py index e8e87f9d73..c6a1cf5e94 100644 --- a/geopandas/io/tests/test_io.py +++ b/geopandas/io/tests/test_io.py @@ -54,3 +54,15 @@ def test_read_file(self): df = self.df.rename(columns=lambda x: x.lower()) validate_boro_df(self, df) self.assert_(df.crs == self.crs) + + def test_filtered_read_file(self): + full_df_shape = self.df.shape + nybb_filename, nybb_zip_path = download_nybb() + vfs = 'zip://' + nybb_filename + bbox = (1031051.7879884212, 224272.49231459625, 1047224.3104931959, 244317.30894023244) + filtered_df = read_file(nybb_zip_path, vfs=vfs, bbox=bbox) + filtered_df_shape = filtered_df.shape + assert(full_df_shape != filtered_df_shape) + assert(filtered_df_shape == (2, 5)) + +
ibis-project__ibis-8364
bug: `Scalar.isin(Column)` returns a Column, not a Scalar ### What happened? ```python import ibis needle = ibis.literal(2) haystack = ibis.memtable({"x": [1, 2, 3]}).x type(needle.isin(haystack)) # ibis.expr.types.logical.BooleanColumn ``` ### What version of ibis are you using? main ### What backend(s) are you using, if any? _No response_ ### Relevant log output _No response_ ### Code of Conduct - [X] I agree to follow this project's Code of Conduct
[ { "content": "from __future__ import annotations\n\nfrom public import public\n\nimport ibis.expr.datashape as ds\nimport ibis.expr.datatypes as dt\nimport ibis.expr.rules as rlz\nfrom ibis.common.annotations import attribute\nfrom ibis.common.exceptions import IntegrityError\nfrom ibis.expr.operations.core import Value\nfrom ibis.expr.operations.relations import Relation # noqa: TCH001\n\n\n@public\nclass Subquery(Value):\n rel: Relation\n\n @attribute\n def relations(self):\n return frozenset()\n\n\n@public\nclass ExistsSubquery(Subquery):\n dtype = dt.boolean\n shape = ds.columnar\n\n\n@public\nclass ScalarSubquery(Subquery):\n shape = ds.scalar\n\n def __init__(self, rel):\n if len(rel.schema) != 1:\n raise IntegrityError(\n \"Relation passed to ScalarSubquery() must have exactly one \"\n f\"column, got {len(rel.schema)}\"\n )\n super().__init__(rel=rel)\n\n @attribute\n def value(self):\n (value,) = self.rel.values.values()\n return value\n\n @attribute\n def dtype(self):\n return self.value.dtype\n\n\n@public\nclass InSubquery(Subquery):\n needle: Value\n\n dtype = dt.boolean\n shape = ds.columnar\n\n def __init__(self, rel, needle):\n if len(rel.schema) != 1:\n raise IntegrityError(\n \"Relation passed to InSubquery() must have exactly one \"\n f\"column, got {len(rel.schema)}\"\n )\n (value,) = rel.values.values()\n if not rlz.comparable(value, needle):\n raise IntegrityError(f\"{needle!r} is not comparable to {value!r}\")\n super().__init__(rel=rel, needle=needle)\n\n @attribute\n def value(self):\n (value,) = self.rel.values.values()\n return value\n\n @attribute\n def relations(self):\n return self.needle.relations\n", "path": "ibis/expr/operations/subqueries.py" } ]
[ { "content": "from __future__ import annotations\n\nfrom public import public\n\nimport ibis.expr.datashape as ds\nimport ibis.expr.datatypes as dt\nimport ibis.expr.rules as rlz\nfrom ibis.common.annotations import attribute\nfrom ibis.common.exceptions import IntegrityError\nfrom ibis.expr.operations.core import Value\nfrom ibis.expr.operations.relations import Relation # noqa: TCH001\n\n\n@public\nclass Subquery(Value):\n rel: Relation\n\n @attribute\n def relations(self):\n return frozenset()\n\n\n@public\nclass ExistsSubquery(Subquery):\n dtype = dt.boolean\n shape = ds.columnar\n\n\n@public\nclass ScalarSubquery(Subquery):\n shape = ds.scalar\n\n def __init__(self, rel):\n if len(rel.schema) != 1:\n raise IntegrityError(\n \"Relation passed to ScalarSubquery() must have exactly one \"\n f\"column, got {len(rel.schema)}\"\n )\n super().__init__(rel=rel)\n\n @attribute\n def value(self):\n (value,) = self.rel.values.values()\n return value\n\n @attribute\n def dtype(self):\n return self.value.dtype\n\n\n@public\nclass InSubquery(Subquery):\n needle: Value\n\n dtype = dt.boolean\n shape = rlz.shape_like(\"needle\")\n\n def __init__(self, rel, needle):\n if len(rel.schema) != 1:\n raise IntegrityError(\n \"Relation passed to InSubquery() must have exactly one \"\n f\"column, got {len(rel.schema)}\"\n )\n (value,) = rel.values.values()\n if not rlz.comparable(value, needle):\n raise IntegrityError(f\"{needle!r} is not comparable to {value!r}\")\n super().__init__(rel=rel, needle=needle)\n\n @attribute\n def value(self):\n (value,) = self.rel.values.values()\n return value\n\n @attribute\n def relations(self):\n return self.needle.relations\n", "path": "ibis/expr/operations/subqueries.py" } ]
diff --git a/ibis/expr/operations/subqueries.py b/ibis/expr/operations/subqueries.py index 76f3cd8a9cc6..229c5bb56c61 100644 --- a/ibis/expr/operations/subqueries.py +++ b/ibis/expr/operations/subqueries.py @@ -53,7 +53,7 @@ class InSubquery(Subquery): needle: Value dtype = dt.boolean - shape = ds.columnar + shape = rlz.shape_like("needle") def __init__(self, rel, needle): if len(rel.schema) != 1: diff --git a/ibis/tests/expr/test_value_exprs.py b/ibis/tests/expr/test_value_exprs.py index ebb86c7a4d11..aec143083ff5 100644 --- a/ibis/tests/expr/test_value_exprs.py +++ b/ibis/tests/expr/test_value_exprs.py @@ -1713,3 +1713,13 @@ def test_deferred_doesnt_convert_callables(): b=t.b.split(",").filter(lambda pp: ~pp.isin(("word1", "word2"))) ) assert expr.equals(expected) + + +def test_in_subquery_shape(): + t = ibis.table([("a", "int64"), ("b", "string")]) + + expr = t.a.cast("string").isin(t.b) + assert expr.op().shape.is_columnar() + + expr = ibis.literal(2).isin(t.a) + assert expr.op().shape.is_scalar()
mlcommons__GaNDLF-753
All training is failing with a `timm` error **Describe the bug** Unable to train on current master. **To Reproduce** Steps to reproduce the behavior: 1. Try to start any segmentation training. 2. See error: ```python-traceback Traceback (most recent call last): File "/software/gandlf_personal/gandlf_run", line 11, in <module> from GANDLF.cli import main_run, copyrightMessage File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/cli/__init__.py", line 2, in <module> from .main_run import main_run File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/cli/main_run.py", line 4, in <module> from GANDLF.training_manager import TrainingManager, TrainingManager_split File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/training_manager.py", line 6, in <module> from GANDLF.compute import training_loop File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/compute/__init__.py", line 1, in <module> from .training_loop import training_loop File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/compute/training_loop.py", line 30, in <module> from .generic import create_pytorch_objects File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/compute/generic.py", line 3, in <module> from GANDLF.models import get_model File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/models/__init__.py", line 32, in <module> from .imagenet_unet import imagenet_unet_wrapper File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/models/imagenet_unet.py", line 7, in <module> from segmentation_models_pytorch.base import ( File "/software/gandlf_personal/venv11/lib/python3.11/site-packages/segmentation_models_pytorch/__init__.py", line 2, in <module> from . import encoders File "/software/gandlf_personal/venv11/lib/python3.11/site-packages/segmentation_models_pytorch/encoders/__init__.py", line 1, in <module> import timm File "/software/gandlf_personal/venv11/lib/python3.11/site-packages/timm/__init__.py", line 2, in <module> from .models import create_model, list_models, is_model, list_modules, model_entrypoint, \ File "/software/gandlf_personal/venv11/lib/python3.11/site-packages/timm/models/__init__.py", line 28, in <module> from .maxxvit import * File "/software/gandlf_personal/venv11/lib/python3.11/site-packages/timm/models/maxxvit.py", line 225, in <module> @dataclass ^^^^^^^^^ File "/software/gandlf_personal/venv11/lib/python3.11/dataclasses.py", line 1230, in dataclass return wrap(cls) ^^^^^^^^^ File "/software/gandlf_personal/venv11/lib/python3.11/dataclasses.py", line 1220, in wrap return _process_class(cls, init, repr, eq, order, unsafe_hash, ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/software/gandlf_personal/venv11/lib/python3.11/dataclasses.py", line 958, in _process_class cls_fields.append(_get_field(cls, name, type, kw_only)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/software/gandlf_personal/venv11/lib/python3.11/dataclasses.py", line 815, in _get_field raise ValueError(f'mutable default {type(f.default)} for field ' ValueError: mutable default <class 'timm.models.maxxvit.MaxxVitConvCfg'> for field conv_cfg is not allowed: use default_factory ``` **Expected behavior** It should work. **Screenshots** N.A. **GaNDLF Version** <!-- Put the output of the following command: python -c 'import GANDLF as g;print(g.__version__)' --> 0.0.18-dev **Desktop (please complete the following information):** N.A. **Additional context** N.A.
[ { "content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport sys, re, os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\ntry:\n with open(\"README.md\") as readme_file:\n readme = readme_file.read()\nexcept Exception as error:\n readme = \"No README information found.\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (\"README.md\", error)\n )\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (filepath, error)\n )\n\n# Handle cases where specific files need to be bundled into the final package as installed via PyPI\ndockerfiles = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"Dockerfile-\"))\n]\nentrypoint_files = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"gandlf_\"))\n]\nsetup_files = [\"setup.py\", \".dockerignore\", \"pyproject.toml\", \"MANIFEST.in\"]\nall_extra_files = dockerfiles + entrypoint_files + setup_files\nall_extra_files_pathcorrected = [\n os.path.join(\"../\", item) for item in all_extra_files\n]\n# find_packages should only ever find these as subpackages of gandlf, not as top-level packages\n# generate this dynamically?\n# GANDLF.GANDLF is needed to prevent recursion madness in deployments\ntoplevel_package_excludes = [\n \"GANDLF.GANDLF\",\n \"anonymize\",\n \"cli\",\n \"compute\",\n \"data\",\n \"grad_clipping\",\n \"losses\",\n \"metrics\",\n \"models\",\n \"optimizers\",\n \"schedulers\",\n \"utils\",\n]\n\n\nrequirements = [\n \"torch==2.1.0\",\n \"black==23.11.0\",\n \"numpy==1.25.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"SimpleITK!=2.2.1\", # https://github.com/mlcommons/GaNDLF/issues/536\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.19.3\",\n \"pandas>=2.0.0\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"gdown\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==1.1.2\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n \"segmentation-models-pytorch==0.3.2\",\n \"ACSConv==0.1.1\",\n \"docker\",\n \"dicom-anonymizer\",\n \"twine\",\n \"zarr\",\n \"keyring\",\n]\n\nif __name__ == \"__main__\":\n setup(\n name=\"GANDLF\",\n version=__version__,\n author=\"MLCommons\",\n author_email=\"[email protected]\",\n python_requires=\">3.8, <3.12\",\n packages=find_packages(\n where=os.path.dirname(os.path.abspath(__file__)),\n exclude=toplevel_package_excludes,\n ),\n cmdclass={\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n \"gandlf_configGenerator\",\n \"gandlf_recoverConfig\",\n \"gandlf_deploy\",\n \"gandlf_optimizeModel\",\n \"gandlf_generateMetrics\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"Apache-2.0\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n package_data={\"GANDLF\": all_extra_files_pathcorrected},\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch\",\n zip_safe=False,\n )\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport sys, re, os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\ntry:\n with open(\"README.md\") as readme_file:\n readme = readme_file.read()\nexcept Exception as error:\n readme = \"No README information found.\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (\"README.md\", error)\n )\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (filepath, error)\n )\n\n# Handle cases where specific files need to be bundled into the final package as installed via PyPI\ndockerfiles = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"Dockerfile-\"))\n]\nentrypoint_files = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"gandlf_\"))\n]\nsetup_files = [\"setup.py\", \".dockerignore\", \"pyproject.toml\", \"MANIFEST.in\"]\nall_extra_files = dockerfiles + entrypoint_files + setup_files\nall_extra_files_pathcorrected = [\n os.path.join(\"../\", item) for item in all_extra_files\n]\n# find_packages should only ever find these as subpackages of gandlf, not as top-level packages\n# generate this dynamically?\n# GANDLF.GANDLF is needed to prevent recursion madness in deployments\ntoplevel_package_excludes = [\n \"GANDLF.GANDLF\",\n \"anonymize\",\n \"cli\",\n \"compute\",\n \"data\",\n \"grad_clipping\",\n \"losses\",\n \"metrics\",\n \"models\",\n \"optimizers\",\n \"schedulers\",\n \"utils\",\n]\n\n\nrequirements = [\n \"torch==2.1.0\",\n \"black==23.11.0\",\n \"numpy==1.25.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"SimpleITK!=2.2.1\", # https://github.com/mlcommons/GaNDLF/issues/536\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.19.3\",\n \"pandas>=2.0.0\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"gdown\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==1.1.2\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n \"segmentation-models-pytorch==0.3.3\",\n \"ACSConv==0.1.1\",\n \"docker\",\n \"dicom-anonymizer\",\n \"twine\",\n \"zarr\",\n \"keyring\",\n]\n\nif __name__ == \"__main__\":\n setup(\n name=\"GANDLF\",\n version=__version__,\n author=\"MLCommons\",\n author_email=\"[email protected]\",\n python_requires=\">3.8, <3.12\",\n packages=find_packages(\n where=os.path.dirname(os.path.abspath(__file__)),\n exclude=toplevel_package_excludes,\n ),\n cmdclass={\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n \"gandlf_configGenerator\",\n \"gandlf_recoverConfig\",\n \"gandlf_deploy\",\n \"gandlf_optimizeModel\",\n \"gandlf_generateMetrics\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"Apache-2.0\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n package_data={\"GANDLF\": all_extra_files_pathcorrected},\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch\",\n zip_safe=False,\n )\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 46323e207..464f7a603 100644 --- a/setup.py +++ b/setup.py @@ -110,7 +110,7 @@ def run(self): "pydicom", "onnx", "torchinfo==1.7.0", - "segmentation-models-pytorch==0.3.2", + "segmentation-models-pytorch==0.3.3", "ACSConv==0.1.1", "docker", "dicom-anonymizer",
Lightning-Universe__lightning-flash-1667
`ObjectDetectionData.from_images` raise an error ## 🐛 Bug <!-- A clear and concise description of what the bug is. --> after pass the datamodule into finetune, at the end of the epoch it throws an error message: ``` /opt/conda/lib/python3.7/site-packages/flash/core/data/utilities/classification.py in _strip(x) 44 45 def _strip(x: str) -> str: ---> 46 return x.strip(", ") 47 48 AttributeError: 'int' object has no attribute 'strip' ``` ### To Reproduce #### Code sample datamodule snipset code was from documentation ```py from flash.image import ObjectDetectionData, ObjectDetector from PIL import Image import numpy as np from flash import Trainer datamodule = ObjectDetectionData.from_images( train_images=[ Image.fromarray(np.random.randint(0, 255, (512, 512, 3), dtype="uint8")), Image.fromarray(np.random.randint(0, 255,(512, 512, 3), dtype="uint8")), Image.fromarray(np.random.randint(0, 255, (512, 512, 3), dtype="uint8")), ], train_targets=[["cat"], ["cat"], ["cat"]], train_bboxes=[ [{"xmin": 10, "ymin": 20, "width": 5, "height": 10}], [{"xmin": 20, "ymin": 30, "width": 10, "height": 10}], [{"xmin": 10, "ymin": 20, "width": 5, "height": 25}], ], predict_images=[Image.fromarray(np.random.randint(0, 255, (512, 512, 3), dtype="uint8"))], transform_kwargs=dict(image_size=(512, 512)), batch_size=2, ) model=ObjectDetector( head="efficientdet", backbone="d0", image_size=512, labels=datamodule.labels, ) trainer = Trainer(max_epochs=20,accelerator="gpu") trainer.finetune(model, datamodule=datamodule, strategy="freeze") ``` ### Expected behavior It should not throw an error after the end of an epoch ### Environment - OS (e.g., Linux): Ubuntu - Python version: 3.7.111 - PyTorch/Lightning/Flash Version : pytorch-lightning==1.7.0, lightning-flash==0.8.0 - GPU models and configuration: Quadro T2000, Cuda Version 11.4
[ { "content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom dataclasses import dataclass\nfrom functools import reduce\nfrom typing import Any, ClassVar, Dict, List, Optional, Tuple, Type, Union, cast\n\nimport numpy as np\nimport torch\nfrom torch import Tensor\n\nfrom flash.core.data.utilities.sort import sorted_alphanumeric\nfrom flash.core.utilities.imports import _TOPIC_CORE_AVAILABLE\n\n# Skip doctests if requirements aren't available\nif not _TOPIC_CORE_AVAILABLE:\n __doctest_skip__ = [\"*\"]\n\n\ndef _is_list_like(x: Any) -> bool:\n try:\n _ = x[0]\n _ = len(x)\n return True\n except (TypeError, IndexError, KeyError):\n return False\n\n\ndef _as_list(x: Union[List, Tensor, np.ndarray]) -> List:\n if torch.is_tensor(x) or isinstance(x, np.ndarray):\n return cast(List, x.tolist())\n return x\n\n\ndef _strip(x: str) -> str:\n return x.strip(\", \")\n\n\n@dataclass\nclass TargetFormatter:\n \"\"\"A ``TargetFormatter`` is used to convert targets of a given type to a standard format required by the loss\n function. To implement a custom ``TargetFormatter``, simply override the ``format`` method with your own logic.\n\n Examples\n ________\n\n .. doctest::\n\n >>> from dataclasses import dataclass\n >>> from typing import ClassVar, Optional\n >>> from flash.core.data.utilities.classification import TargetFormatter\n >>>\n >>> @dataclass\n ... class CustomStringTargetFormatter(TargetFormatter):\n ... \"A ``TargetFormatter`` which converts strings of the format '#<index>' to integers.\"\n ... multi_label: ClassVar[Optional[bool]] = False\n ... def format(self, target: str) -> int:\n ... return int(target.strip(\"#\"))\n ...\n >>> formatter = CustomStringTargetFormatter()\n >>> formatter(\"#1\")\n 1\n \"\"\"\n\n multi_label: ClassVar[Optional[bool]] = None\n numeric: ClassVar[Optional[bool]] = None\n binary: ClassVar[Optional[bool]] = None\n labels: Optional[List[str]] = None\n num_classes: Optional[int] = None\n\n def __post_init__(self):\n self.num_classes = len(self.labels) if self.labels is not None else self.num_classes\n\n def __call__(self, target: Any) -> Any:\n return self.format(target)\n\n def format(self, target: Any) -> Any:\n raise NotImplementedError\n\n\n@dataclass\nclass SingleNumericTargetFormatter(TargetFormatter):\n \"\"\"A ``TargetFormatter`` for targets that contain a single numeric value (the class index).\n\n Examples\n ________\n\n .. doctest::\n\n >>> import torch\n >>> from flash.core.data.utilities.classification import SingleNumericTargetFormatter\n >>> formatter = SingleNumericTargetFormatter(num_classes=10)\n >>> formatter(5)\n 5\n >>> formatter([5])\n 5\n >>> formatter(torch.tensor(5))\n 5\n \"\"\"\n\n multi_label: ClassVar[Optional[bool]] = False\n numeric: ClassVar[Optional[bool]] = True\n binary: ClassVar[Optional[bool]] = False\n\n def format(self, target: Any) -> Any:\n result = _as_list(target)\n if _is_list_like(result):\n result = result[0]\n return result\n\n\n@dataclass\nclass SingleLabelTargetFormatter(TargetFormatter):\n \"\"\"A ``TargetFormatter`` for targets that contain a single string label.\n\n Examples\n ________\n\n .. doctest::\n\n >>> from flash.core.data.utilities.classification import SingleLabelTargetFormatter\n >>> formatter = SingleLabelTargetFormatter(labels=[\"cat\", \"dog\"], num_classes=2)\n >>> formatter(\"cat\")\n 0\n >>> formatter([\"dog\"])\n 1\n \"\"\"\n\n multi_label: ClassVar[Optional[bool]] = False\n numeric: ClassVar[Optional[bool]] = False\n binary: ClassVar[Optional[bool]] = False\n\n def __post_init__(self):\n super().__post_init__()\n self.label_to_idx = {label: idx for idx, label in enumerate(self.labels)}\n\n def format(self, target: Any) -> Any:\n return self.label_to_idx[_strip(target[0] if _is_list_like(target) and not isinstance(target, str) else target)]\n\n\n@dataclass\nclass SingleBinaryTargetFormatter(TargetFormatter):\n \"\"\"A ``TargetFormatter`` for targets that are one-hot encoded binaries.\n\n Examples\n ________\n\n .. doctest::\n\n >>> import torch\n >>> from flash.core.data.utilities.classification import SingleBinaryTargetFormatter\n >>> formatter = SingleBinaryTargetFormatter(num_classes=2)\n >>> formatter([1, 0])\n 0\n >>> formatter(torch.tensor([0, 1]))\n 1\n \"\"\"\n\n multi_label: ClassVar[Optional[bool]] = False\n numeric: ClassVar[Optional[bool]] = False\n binary: ClassVar[Optional[bool]] = True\n\n def format(self, target: Any) -> Any:\n for idx, t in enumerate(target):\n if t == 1:\n return idx\n return 0\n\n\n@dataclass\nclass MultiNumericTargetFormatter(TargetFormatter):\n \"\"\"A ``TargetFormatter`` for targets that contain multiple numeric values (the class indices).\n\n Examples\n ________\n\n .. doctest::\n\n >>> import torch\n >>> from flash.core.data.utilities.classification import MultiNumericTargetFormatter\n >>> formatter = MultiNumericTargetFormatter(num_classes=10)\n >>> formatter([2, 5])\n [0, 0, 1, 0, 0, 1, 0, 0, 0, 0]\n >>> formatter(torch.tensor([2, 5]))\n [0, 0, 1, 0, 0, 1, 0, 0, 0, 0]\n \"\"\"\n\n multi_label: ClassVar[Optional[bool]] = True\n numeric: ClassVar[Optional[bool]] = True\n binary: ClassVar[Optional[bool]] = False\n\n def format(self, target: Any) -> Any:\n result = [0] * self.num_classes\n for idx in target:\n result[idx] = 1\n return result\n\n\n@dataclass\nclass MultiLabelTargetFormatter(SingleLabelTargetFormatter):\n \"\"\"A ``TargetFormatter`` for targets that contain multiple string labels in a list.\n\n Examples\n ________\n\n .. doctest::\n\n >>> from flash.core.data.utilities.classification import MultiLabelTargetFormatter\n >>> formatter = MultiLabelTargetFormatter(labels=[\"bird\", \"cat\", \"dog\"], num_classes=3)\n >>> formatter([\"cat\", \"dog\"])\n [0, 1, 1]\n >>> formatter([\"bird\"])\n [1, 0, 0]\n \"\"\"\n\n multi_label: ClassVar[Optional[bool]] = True\n numeric: ClassVar[Optional[bool]] = False\n binary: ClassVar[Optional[bool]] = False\n\n def format(self, target: Any) -> Any:\n result = [0] * self.num_classes\n for t in target:\n idx = super().format(t)\n result[idx] = 1\n return result\n\n\n@dataclass\nclass CommaDelimitedMultiLabelTargetFormatter(MultiLabelTargetFormatter):\n \"\"\"A ``TargetFormatter`` for targets that contain a string with multiple comma-delimited labels.\n\n Examples\n ________\n\n .. doctest::\n\n >>> from flash.core.data.utilities.classification import CommaDelimitedMultiLabelTargetFormatter\n >>> formatter = CommaDelimitedMultiLabelTargetFormatter(labels=[\"bird\", \"cat\", \"dog\"], num_classes=3)\n >>> formatter(\"cat,dog\")\n [0, 1, 1]\n >>> formatter(\"bird\")\n [1, 0, 0]\n \"\"\"\n\n multi_label: ClassVar[Optional[bool]] = True\n numeric: ClassVar[Optional[bool]] = False\n binary: ClassVar[Optional[bool]] = False\n\n def format(self, target: Any) -> Any:\n return super().format(target.split(\",\"))\n\n\n@dataclass\nclass SpaceDelimitedTargetFormatter(MultiLabelTargetFormatter):\n \"\"\"A ``TargetFormatter`` for targets that contain a string with multiple space-delimited labels.\n\n Examples\n ________\n\n .. doctest::\n\n >>> from flash.core.data.utilities.classification import SpaceDelimitedTargetFormatter\n >>> formatter = SpaceDelimitedTargetFormatter(labels=[\"bird\", \"cat\", \"dog\"], num_classes=3)\n >>> formatter(\"cat dog\")\n [0, 1, 1]\n >>> formatter(\"bird\")\n [1, 0, 0]\n \"\"\"\n\n multi_label: ClassVar[Optional[bool]] = True\n numeric: ClassVar[Optional[bool]] = False\n binary: ClassVar[Optional[bool]] = False\n\n def format(self, target: Any) -> Any:\n return super().format(target.split(\" \"))\n\n\n@dataclass\nclass MultiBinaryTargetFormatter(TargetFormatter):\n \"\"\"A ``TargetFormatter`` for targets that are multi-hot binary.\n\n Examples\n ________\n\n .. doctest::\n\n >>> import torch\n >>> from flash.core.data.utilities.classification import MultiBinaryTargetFormatter\n >>> formatter = MultiBinaryTargetFormatter(num_classes=3)\n >>> formatter([0, 1, 1])\n [0, 1, 1]\n >>> formatter(torch.tensor([1, 0, 0]))\n [1, 0, 0]\n \"\"\"\n\n multi_label: ClassVar[Optional[bool]] = True\n numeric: ClassVar[Optional[bool]] = False\n binary: ClassVar[Optional[bool]] = True\n\n def format(self, target: Any) -> Any:\n return _as_list(target)\n\n\n@dataclass\nclass MultiSoftTargetFormatter(MultiBinaryTargetFormatter):\n \"\"\"A ``TargetFormatter`` for mutli-label soft targets.\n\n Examples\n ________\n\n .. doctest::\n\n >>> import torch\n >>> from flash.core.data.utilities.classification import MultiSoftTargetFormatter\n >>> formatter = MultiSoftTargetFormatter(num_classes=3)\n >>> formatter([0.1, 0.9, 0.6])\n [0.1, 0.9, 0.6]\n >>> formatter(torch.tensor([0.9, 0.6, 0.7])) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE\n [0..., 0..., 0...]\n \"\"\"\n\n binary: ClassVar[Optional[bool]] = False\n\n\ndef _get_target_formatter_type(target: Any) -> Type[TargetFormatter]:\n \"\"\"Determine the ``TargetFormatter`` type for a given target.\n\n Multi-label targets can be:\n * Comma delimited string - ``CommaDelimitedMultiLabelTargetFormatter`` (e.g. [\"blue,green\", \"red\"])\n * Space delimited string - ``SpaceDelimitedMultiLabelTargetFormatter`` (e.g. [\"blue green\", \"red\"])\n * List of strings - ``MultiLabelTargetFormatter`` (e.g. [[\"blue\", \"green\"], [\"red\"]])\n * List of numbers - ``MultiNumericTargetFormatter`` (e.g. [[0, 1], [2]])\n * Binary list - ``MultiBinaryTargetFormatter`` (e.g. [[1, 1, 0], [0, 0, 1]])\n * Soft target - ``MultiSoftTargetFormatter`` (e.g. [[0.1, 0, 0], [0.9, 0.7, 0]])\n\n Single-label targets can be:\n * Single string - ``SingleLabelTargetFormatter`` (e.g. [\"blue\", \"green\", \"red\"])\n * Single number - ``SingleNumericTargetFormatter`` (e.g. [0, 1, 2])\n * One-hot binary list - ``SingleBinaryTargetFormatter`` (e.g. [[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n\n Args:\n target: A target that is one of: a single target, a list of targets, a comma delimited string.\n \"\"\"\n if isinstance(target, str):\n target = _strip(target)\n # TODO: This could be a dangerous assumption if people happen to have a label that contains a comma or space\n if \",\" in target:\n return CommaDelimitedMultiLabelTargetFormatter\n if \" \" in target:\n return SpaceDelimitedTargetFormatter\n return SingleLabelTargetFormatter\n if _is_list_like(target):\n if isinstance(target[0], str):\n return MultiLabelTargetFormatter\n target = _as_list(target)\n if len(target) > 1:\n if all(t == 0 or t == 1 for t in target):\n if sum(target) == 1:\n return SingleBinaryTargetFormatter\n return MultiBinaryTargetFormatter\n if any(isinstance(t, float) for t in target):\n return MultiSoftTargetFormatter\n return MultiNumericTargetFormatter\n return SingleNumericTargetFormatter\n\n\n_RESOLUTION_MAPPING: Dict[Type[TargetFormatter], List[Type[TargetFormatter]]] = {\n MultiBinaryTargetFormatter: [MultiNumericTargetFormatter, MultiSoftTargetFormatter],\n SingleBinaryTargetFormatter: [MultiBinaryTargetFormatter, MultiNumericTargetFormatter, MultiSoftTargetFormatter],\n SingleLabelTargetFormatter: [CommaDelimitedMultiLabelTargetFormatter, SpaceDelimitedTargetFormatter],\n SingleNumericTargetFormatter: [SingleBinaryTargetFormatter, MultiNumericTargetFormatter],\n}\n\n\ndef _resolve_target_formatter(a: Type[TargetFormatter], b: Type[TargetFormatter]) -> Type[TargetFormatter]:\n \"\"\"The purpose of this resolution function is to enable reduction of the ``TargetFormatter`` type over multiple\n targets. For example, if one target formatter type is ``CommaDelimitedMultiLabelTargetFormatter`` and the other type\n is ``SingleLabelTargetFormatter``then their reduction will be ``CommaDelimitedMultiLabelTargetFormatter``.\n\n Raises:\n ValueError: If the two target formatters could not be resolved.\n \"\"\"\n if a is b:\n return a\n if a in _RESOLUTION_MAPPING and b in _RESOLUTION_MAPPING[a]:\n return b\n if b in _RESOLUTION_MAPPING and a in _RESOLUTION_MAPPING[b]:\n return a\n raise ValueError(\n \"Found inconsistent target formats. All targets should be either: single values, lists of values, or \"\n \"comma-delimited strings.\"\n )\n\n\ndef _get_target_details(\n targets: List[Any],\n target_formatter_type: Type[TargetFormatter],\n) -> Tuple[Optional[List[Any]], int]:\n \"\"\"Given a list of targets and their ``TargetFormatter`` type, this function determines the ``labels`` and\n ``num_classes``. Targets can be:\n\n * Token-based: ``labels`` is the unique tokens, ``num_classes`` is the number of unique tokens.\n * Numeric: ``labels`` is ``None`` and ``num_classes`` is the maximum value plus one.\n * Binary: ``labels`` is ``None`` and ``num_classes`` is the length of the binary target.\n\n Args:\n targets: A list of targets.\n target_formatter_type: The ``TargetFormatter`` type.\n\n Returns:\n (labels, num_classes): Tuple containing the inferred ``labels`` (or ``None`` if no labels could be inferred)\n and ``num_classes``.\n \"\"\"\n targets = _as_list(targets)\n if target_formatter_type.numeric:\n # Take a max over all values\n if target_formatter_type is MultiNumericTargetFormatter:\n values = []\n for target in targets:\n values.extend(target)\n else:\n values = targets\n num_classes = _as_list(max(values))\n if _is_list_like(num_classes):\n num_classes = num_classes[0]\n num_classes = num_classes + 1\n labels = None\n elif target_formatter_type.binary or (target_formatter_type is MultiSoftTargetFormatter):\n # Take a length\n # TODO: Add a check here and error if target lengths are not all equal\n num_classes = len(targets[0])\n labels = None\n else:\n # Compute tokens\n tokens = []\n if target_formatter_type is CommaDelimitedMultiLabelTargetFormatter:\n for target in targets:\n tokens.extend(target.split(\",\"))\n elif target_formatter_type is SpaceDelimitedTargetFormatter:\n for target in targets:\n tokens.extend(target.split(\" \"))\n elif target_formatter_type is MultiLabelTargetFormatter:\n for target in targets:\n tokens.extend(target)\n else:\n tokens = targets\n\n tokens = [_strip(token) for token in tokens]\n labels = list(sorted_alphanumeric(set(tokens)))\n num_classes = None\n return labels, num_classes\n\n\ndef get_target_formatter(\n targets: List[Any],\n labels: Optional[List[str]] = None,\n num_classes: Optional[int] = None,\n add_background: bool = False,\n) -> TargetFormatter:\n \"\"\"Get the ``TargetFormatter`` object to use for the given targets.\n\n Args:\n targets: The list of targets to format.\n labels: Optionally provide ``labels`` / ``num_classes`` instead of inferring them.\n num_classes: Optionally provide ``labels`` / ``num_classes`` instead of inferring them.\n add_background: If ``True``, a background class will be inserted as class zero if ``labels`` and\n ``num_classes`` are being inferred.\n\n Returns:\n The target formatter to use when formatting targets.\n \"\"\"\n targets = _as_list(targets)\n target_formatter_type: Type[TargetFormatter] = reduce(\n _resolve_target_formatter, [_get_target_formatter_type(target) for target in targets]\n )\n if labels is None and num_classes is None:\n labels, num_classes = _get_target_details(targets, target_formatter_type)\n if add_background:\n labels = [\"background\"] + labels if labels is not None else labels\n num_classes = num_classes + 1 if num_classes is not None else num_classes\n return target_formatter_type(labels=labels, num_classes=num_classes)\n", "path": "src/flash/core/data/utilities/classification.py" } ]
[ { "content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom dataclasses import dataclass\nfrom functools import reduce\nfrom typing import Any, ClassVar, Dict, List, Optional, Tuple, Type, Union, cast\n\nimport numpy as np\nimport torch\nfrom torch import Tensor\n\nfrom flash.core.data.utilities.sort import sorted_alphanumeric\nfrom flash.core.utilities.imports import _TOPIC_CORE_AVAILABLE\n\n# Skip doctests if requirements aren't available\nif not _TOPIC_CORE_AVAILABLE:\n __doctest_skip__ = [\"*\"]\n\n\ndef _is_list_like(x: Any) -> bool:\n try:\n _ = x[0]\n _ = len(x)\n return True\n except (TypeError, IndexError, KeyError):\n return False\n\n\ndef _as_list(x: Union[List, Tensor, np.ndarray]) -> List:\n if torch.is_tensor(x) or isinstance(x, np.ndarray):\n return cast(List, x.tolist())\n return x\n\n\ndef _strip(x: Union[str, int]) -> str:\n \"\"\"Replace both ` ` and `,` from str.\"\"\"\n if isinstance(x, str):\n return x.strip(\", \")\n return str(x)\n\n\n@dataclass\nclass TargetFormatter:\n \"\"\"A ``TargetFormatter`` is used to convert targets of a given type to a standard format required by the loss\n function. To implement a custom ``TargetFormatter``, simply override the ``format`` method with your own logic.\n\n Examples\n ________\n\n .. doctest::\n\n >>> from dataclasses import dataclass\n >>> from typing import ClassVar, Optional\n >>> from flash.core.data.utilities.classification import TargetFormatter\n >>>\n >>> @dataclass\n ... class CustomStringTargetFormatter(TargetFormatter):\n ... \"A ``TargetFormatter`` which converts strings of the format '#<index>' to integers.\"\n ... multi_label: ClassVar[Optional[bool]] = False\n ... def format(self, target: str) -> int:\n ... return int(target.strip(\"#\"))\n ...\n >>> formatter = CustomStringTargetFormatter()\n >>> formatter(\"#1\")\n 1\n \"\"\"\n\n multi_label: ClassVar[Optional[bool]] = None\n numeric: ClassVar[Optional[bool]] = None\n binary: ClassVar[Optional[bool]] = None\n labels: Optional[List[str]] = None\n num_classes: Optional[int] = None\n\n def __post_init__(self):\n self.num_classes = len(self.labels) if self.labels is not None else self.num_classes\n\n def __call__(self, target: Any) -> Any:\n return self.format(target)\n\n def format(self, target: Any) -> Any:\n raise NotImplementedError\n\n\n@dataclass\nclass SingleNumericTargetFormatter(TargetFormatter):\n \"\"\"A ``TargetFormatter`` for targets that contain a single numeric value (the class index).\n\n Examples\n ________\n\n .. doctest::\n\n >>> import torch\n >>> from flash.core.data.utilities.classification import SingleNumericTargetFormatter\n >>> formatter = SingleNumericTargetFormatter(num_classes=10)\n >>> formatter(5)\n 5\n >>> formatter([5])\n 5\n >>> formatter(torch.tensor(5))\n 5\n \"\"\"\n\n multi_label: ClassVar[Optional[bool]] = False\n numeric: ClassVar[Optional[bool]] = True\n binary: ClassVar[Optional[bool]] = False\n\n def format(self, target: Any) -> Any:\n result = _as_list(target)\n if _is_list_like(result):\n result = result[0]\n return result\n\n\n@dataclass\nclass SingleLabelTargetFormatter(TargetFormatter):\n \"\"\"A ``TargetFormatter`` for targets that contain a single string label.\n\n Examples\n ________\n\n .. doctest::\n\n >>> from flash.core.data.utilities.classification import SingleLabelTargetFormatter\n >>> formatter = SingleLabelTargetFormatter(labels=[\"cat\", \"dog\"], num_classes=2)\n >>> formatter(\"cat\")\n 0\n >>> formatter([\"dog\"])\n 1\n \"\"\"\n\n multi_label: ClassVar[Optional[bool]] = False\n numeric: ClassVar[Optional[bool]] = False\n binary: ClassVar[Optional[bool]] = False\n\n def __post_init__(self):\n super().__post_init__()\n self.label_to_idx = {label: idx for idx, label in enumerate(self.labels)}\n\n def format(self, target: Any) -> Any:\n return self.label_to_idx[_strip(target[0] if _is_list_like(target) and not isinstance(target, str) else target)]\n\n\n@dataclass\nclass SingleBinaryTargetFormatter(TargetFormatter):\n \"\"\"A ``TargetFormatter`` for targets that are one-hot encoded binaries.\n\n Examples\n ________\n\n .. doctest::\n\n >>> import torch\n >>> from flash.core.data.utilities.classification import SingleBinaryTargetFormatter\n >>> formatter = SingleBinaryTargetFormatter(num_classes=2)\n >>> formatter([1, 0])\n 0\n >>> formatter(torch.tensor([0, 1]))\n 1\n \"\"\"\n\n multi_label: ClassVar[Optional[bool]] = False\n numeric: ClassVar[Optional[bool]] = False\n binary: ClassVar[Optional[bool]] = True\n\n def format(self, target: Any) -> Any:\n for idx, t in enumerate(target):\n if t == 1:\n return idx\n return 0\n\n\n@dataclass\nclass MultiNumericTargetFormatter(TargetFormatter):\n \"\"\"A ``TargetFormatter`` for targets that contain multiple numeric values (the class indices).\n\n Examples\n ________\n\n .. doctest::\n\n >>> import torch\n >>> from flash.core.data.utilities.classification import MultiNumericTargetFormatter\n >>> formatter = MultiNumericTargetFormatter(num_classes=10)\n >>> formatter([2, 5])\n [0, 0, 1, 0, 0, 1, 0, 0, 0, 0]\n >>> formatter(torch.tensor([2, 5]))\n [0, 0, 1, 0, 0, 1, 0, 0, 0, 0]\n \"\"\"\n\n multi_label: ClassVar[Optional[bool]] = True\n numeric: ClassVar[Optional[bool]] = True\n binary: ClassVar[Optional[bool]] = False\n\n def format(self, target: Any) -> Any:\n result = [0] * self.num_classes\n for idx in target:\n result[idx] = 1\n return result\n\n\n@dataclass\nclass MultiLabelTargetFormatter(SingleLabelTargetFormatter):\n \"\"\"A ``TargetFormatter`` for targets that contain multiple string labels in a list.\n\n Examples\n ________\n\n .. doctest::\n\n >>> from flash.core.data.utilities.classification import MultiLabelTargetFormatter\n >>> formatter = MultiLabelTargetFormatter(labels=[\"bird\", \"cat\", \"dog\"], num_classes=3)\n >>> formatter([\"cat\", \"dog\"])\n [0, 1, 1]\n >>> formatter([\"bird\"])\n [1, 0, 0]\n \"\"\"\n\n multi_label: ClassVar[Optional[bool]] = True\n numeric: ClassVar[Optional[bool]] = False\n binary: ClassVar[Optional[bool]] = False\n\n def format(self, target: Any) -> Any:\n result = [0] * self.num_classes\n for t in target:\n idx = super().format(t)\n result[idx] = 1\n return result\n\n\n@dataclass\nclass CommaDelimitedMultiLabelTargetFormatter(MultiLabelTargetFormatter):\n \"\"\"A ``TargetFormatter`` for targets that contain a string with multiple comma-delimited labels.\n\n Examples\n ________\n\n .. doctest::\n\n >>> from flash.core.data.utilities.classification import CommaDelimitedMultiLabelTargetFormatter\n >>> formatter = CommaDelimitedMultiLabelTargetFormatter(labels=[\"bird\", \"cat\", \"dog\"], num_classes=3)\n >>> formatter(\"cat,dog\")\n [0, 1, 1]\n >>> formatter(\"bird\")\n [1, 0, 0]\n \"\"\"\n\n multi_label: ClassVar[Optional[bool]] = True\n numeric: ClassVar[Optional[bool]] = False\n binary: ClassVar[Optional[bool]] = False\n\n def format(self, target: Any) -> Any:\n return super().format(target.split(\",\"))\n\n\n@dataclass\nclass SpaceDelimitedTargetFormatter(MultiLabelTargetFormatter):\n \"\"\"A ``TargetFormatter`` for targets that contain a string with multiple space-delimited labels.\n\n Examples\n ________\n\n .. doctest::\n\n >>> from flash.core.data.utilities.classification import SpaceDelimitedTargetFormatter\n >>> formatter = SpaceDelimitedTargetFormatter(labels=[\"bird\", \"cat\", \"dog\"], num_classes=3)\n >>> formatter(\"cat dog\")\n [0, 1, 1]\n >>> formatter(\"bird\")\n [1, 0, 0]\n \"\"\"\n\n multi_label: ClassVar[Optional[bool]] = True\n numeric: ClassVar[Optional[bool]] = False\n binary: ClassVar[Optional[bool]] = False\n\n def format(self, target: Any) -> Any:\n return super().format(target.split(\" \"))\n\n\n@dataclass\nclass MultiBinaryTargetFormatter(TargetFormatter):\n \"\"\"A ``TargetFormatter`` for targets that are multi-hot binary.\n\n Examples\n ________\n\n .. doctest::\n\n >>> import torch\n >>> from flash.core.data.utilities.classification import MultiBinaryTargetFormatter\n >>> formatter = MultiBinaryTargetFormatter(num_classes=3)\n >>> formatter([0, 1, 1])\n [0, 1, 1]\n >>> formatter(torch.tensor([1, 0, 0]))\n [1, 0, 0]\n \"\"\"\n\n multi_label: ClassVar[Optional[bool]] = True\n numeric: ClassVar[Optional[bool]] = False\n binary: ClassVar[Optional[bool]] = True\n\n def format(self, target: Any) -> Any:\n return _as_list(target)\n\n\n@dataclass\nclass MultiSoftTargetFormatter(MultiBinaryTargetFormatter):\n \"\"\"A ``TargetFormatter`` for mutli-label soft targets.\n\n Examples\n ________\n\n .. doctest::\n\n >>> import torch\n >>> from flash.core.data.utilities.classification import MultiSoftTargetFormatter\n >>> formatter = MultiSoftTargetFormatter(num_classes=3)\n >>> formatter([0.1, 0.9, 0.6])\n [0.1, 0.9, 0.6]\n >>> formatter(torch.tensor([0.9, 0.6, 0.7])) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE\n [0..., 0..., 0...]\n \"\"\"\n\n binary: ClassVar[Optional[bool]] = False\n\n\ndef _get_target_formatter_type(target: Any) -> Type[TargetFormatter]:\n \"\"\"Determine the ``TargetFormatter`` type for a given target.\n\n Multi-label targets can be:\n * Comma delimited string - ``CommaDelimitedMultiLabelTargetFormatter`` (e.g. [\"blue,green\", \"red\"])\n * Space delimited string - ``SpaceDelimitedMultiLabelTargetFormatter`` (e.g. [\"blue green\", \"red\"])\n * List of strings - ``MultiLabelTargetFormatter`` (e.g. [[\"blue\", \"green\"], [\"red\"]])\n * List of numbers - ``MultiNumericTargetFormatter`` (e.g. [[0, 1], [2]])\n * Binary list - ``MultiBinaryTargetFormatter`` (e.g. [[1, 1, 0], [0, 0, 1]])\n * Soft target - ``MultiSoftTargetFormatter`` (e.g. [[0.1, 0, 0], [0.9, 0.7, 0]])\n\n Single-label targets can be:\n * Single string - ``SingleLabelTargetFormatter`` (e.g. [\"blue\", \"green\", \"red\"])\n * Single number - ``SingleNumericTargetFormatter`` (e.g. [0, 1, 2])\n * One-hot binary list - ``SingleBinaryTargetFormatter`` (e.g. [[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n\n Args:\n target: A target that is one of: a single target, a list of targets, a comma delimited string.\n \"\"\"\n if isinstance(target, str):\n target = _strip(target)\n # TODO: This could be a dangerous assumption if people happen to have a label that contains a comma or space\n if \",\" in target:\n return CommaDelimitedMultiLabelTargetFormatter\n if \" \" in target:\n return SpaceDelimitedTargetFormatter\n return SingleLabelTargetFormatter\n if _is_list_like(target):\n if isinstance(target[0], str):\n return MultiLabelTargetFormatter\n target = _as_list(target)\n if len(target) > 1:\n if all(t == 0 or t == 1 for t in target):\n if sum(target) == 1:\n return SingleBinaryTargetFormatter\n return MultiBinaryTargetFormatter\n if any(isinstance(t, float) for t in target):\n return MultiSoftTargetFormatter\n return MultiNumericTargetFormatter\n return SingleNumericTargetFormatter\n\n\n_RESOLUTION_MAPPING: Dict[Type[TargetFormatter], List[Type[TargetFormatter]]] = {\n MultiBinaryTargetFormatter: [MultiNumericTargetFormatter, MultiSoftTargetFormatter],\n SingleBinaryTargetFormatter: [MultiBinaryTargetFormatter, MultiNumericTargetFormatter, MultiSoftTargetFormatter],\n SingleLabelTargetFormatter: [CommaDelimitedMultiLabelTargetFormatter, SpaceDelimitedTargetFormatter],\n SingleNumericTargetFormatter: [SingleBinaryTargetFormatter, MultiNumericTargetFormatter],\n}\n\n\ndef _resolve_target_formatter(a: Type[TargetFormatter], b: Type[TargetFormatter]) -> Type[TargetFormatter]:\n \"\"\"The purpose of this resolution function is to enable reduction of the ``TargetFormatter`` type over multiple\n targets. For example, if one target formatter type is ``CommaDelimitedMultiLabelTargetFormatter`` and the other type\n is ``SingleLabelTargetFormatter``then their reduction will be ``CommaDelimitedMultiLabelTargetFormatter``.\n\n Raises:\n ValueError: If the two target formatters could not be resolved.\n \"\"\"\n if a is b:\n return a\n if a in _RESOLUTION_MAPPING and b in _RESOLUTION_MAPPING[a]:\n return b\n if b in _RESOLUTION_MAPPING and a in _RESOLUTION_MAPPING[b]:\n return a\n raise ValueError(\n \"Found inconsistent target formats. All targets should be either: single values, lists of values, or \"\n \"comma-delimited strings.\"\n )\n\n\ndef _get_target_details(\n targets: List[Any],\n target_formatter_type: Type[TargetFormatter],\n) -> Tuple[Optional[List[Any]], int]:\n \"\"\"Given a list of targets and their ``TargetFormatter`` type, this function determines the ``labels`` and\n ``num_classes``. Targets can be:\n\n * Token-based: ``labels`` is the unique tokens, ``num_classes`` is the number of unique tokens.\n * Numeric: ``labels`` is ``None`` and ``num_classes`` is the maximum value plus one.\n * Binary: ``labels`` is ``None`` and ``num_classes`` is the length of the binary target.\n\n Args:\n targets: A list of targets.\n target_formatter_type: The ``TargetFormatter`` type.\n\n Returns:\n (labels, num_classes): Tuple containing the inferred ``labels`` (or ``None`` if no labels could be inferred)\n and ``num_classes``.\n \"\"\"\n targets = _as_list(targets)\n if target_formatter_type.numeric:\n # Take a max over all values\n if target_formatter_type is MultiNumericTargetFormatter:\n values = []\n for target in targets:\n values.extend(target)\n else:\n values = targets\n num_classes = _as_list(max(values))\n if _is_list_like(num_classes):\n num_classes = num_classes[0]\n num_classes = num_classes + 1\n labels = None\n elif target_formatter_type.binary or (target_formatter_type is MultiSoftTargetFormatter):\n # Take a length\n # TODO: Add a check here and error if target lengths are not all equal\n num_classes = len(targets[0])\n labels = None\n else:\n # Compute tokens\n tokens = []\n if target_formatter_type is CommaDelimitedMultiLabelTargetFormatter:\n for target in targets:\n tokens.extend(target.split(\",\"))\n elif target_formatter_type is SpaceDelimitedTargetFormatter:\n for target in targets:\n tokens.extend(target.split(\" \"))\n elif target_formatter_type is MultiLabelTargetFormatter:\n for target in targets:\n tokens.extend(target)\n else:\n tokens = targets\n\n tokens = [_strip(token) for token in tokens]\n labels = list(sorted_alphanumeric(set(tokens)))\n num_classes = None\n return labels, num_classes\n\n\ndef get_target_formatter(\n targets: List[Any],\n labels: Optional[List[str]] = None,\n num_classes: Optional[int] = None,\n add_background: bool = False,\n) -> TargetFormatter:\n \"\"\"Get the ``TargetFormatter`` object to use for the given targets.\n\n Args:\n targets: The list of targets to format.\n labels: Optionally provide ``labels`` / ``num_classes`` instead of inferring them.\n num_classes: Optionally provide ``labels`` / ``num_classes`` instead of inferring them.\n add_background: If ``True``, a background class will be inserted as class zero if ``labels`` and\n ``num_classes`` are being inferred.\n\n Returns:\n The target formatter to use when formatting targets.\n \"\"\"\n targets = _as_list(targets)\n target_formatter_type: Type[TargetFormatter] = reduce(\n _resolve_target_formatter, [_get_target_formatter_type(target) for target in targets]\n )\n if labels is None and num_classes is None:\n labels, num_classes = _get_target_details(targets, target_formatter_type)\n if add_background:\n labels = [\"background\"] + labels if labels is not None else labels\n num_classes = num_classes + 1 if num_classes is not None else num_classes\n return target_formatter_type(labels=labels, num_classes=num_classes)\n", "path": "src/flash/core/data/utilities/classification.py" } ]
diff --git a/src/flash/core/data/utilities/classification.py b/src/flash/core/data/utilities/classification.py index 19a40e0449..6bd6992a5a 100644 --- a/src/flash/core/data/utilities/classification.py +++ b/src/flash/core/data/utilities/classification.py @@ -42,8 +42,11 @@ def _as_list(x: Union[List, Tensor, np.ndarray]) -> List: return x -def _strip(x: str) -> str: - return x.strip(", ") +def _strip(x: Union[str, int]) -> str: + """Replace both ` ` and `,` from str.""" + if isinstance(x, str): + return x.strip(", ") + return str(x) @dataclass
scrapy__scrapy-742
Sort spider names on 'scrapy list' command Hey there, i think the spider names on the `scrapy list` command should be order by name in the console output.
[ { "content": "from __future__ import print_function\nfrom scrapy.command import ScrapyCommand\n\nclass Command(ScrapyCommand):\n\n requires_project = True\n default_settings = {'LOG_ENABLED': False}\n\n def short_desc(self):\n return \"List available spiders\"\n\n def run(self, args, opts):\n crawler = self.crawler_process.create_crawler()\n for s in crawler.spiders.list():\n print(s)\n", "path": "scrapy/commands/list.py" } ]
[ { "content": "from __future__ import print_function\nfrom scrapy.command import ScrapyCommand\n\nclass Command(ScrapyCommand):\n\n requires_project = True\n default_settings = {'LOG_ENABLED': False}\n\n def short_desc(self):\n return \"List available spiders\"\n\n def run(self, args, opts):\n crawler = self.crawler_process.create_crawler()\n for s in sorted(crawler.spiders.list()):\n print(s)\n", "path": "scrapy/commands/list.py" } ]
diff --git a/scrapy/commands/list.py b/scrapy/commands/list.py index f08d57f6f8a..0ea9c2313e4 100644 --- a/scrapy/commands/list.py +++ b/scrapy/commands/list.py @@ -11,5 +11,5 @@ def short_desc(self): def run(self, args, opts): crawler = self.crawler_process.create_crawler() - for s in crawler.spiders.list(): + for s in sorted(crawler.spiders.list()): print(s)
scikit-image__scikit-image-3650
tifffile: try to use the one in the user's install first Should we try importing tifffile before using the one we versionned it?
[ { "content": "from ...external.tifffile import TiffFile, imsave, parse_kwargs\n\n\ndef imread(fname, dtype=None, **kwargs):\n \"\"\"Load a tiff image from file.\n\n Parameters\n ----------\n fname : str or file\n File name or file-like-object.\n dtype : numpy dtype object or string specifier\n Specifies data type of array elements (Not currently used).\n kwargs : keyword pairs, optional\n Additional keyword arguments to pass through (see ``tifffile``'s\n ``imread`` function).\n\n Notes\n -----\n Provided by Christophe Golhke's tifffile.py [1]_, and supports many\n advanced image types including multi-page and floating point.\n\n References\n ----------\n .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py\n\n \"\"\"\n\n if 'img_num' in kwargs:\n kwargs['key'] = kwargs.pop('img_num')\n\n # parse_kwargs will extract keyword arguments intended for the TiffFile \n # class and remove them from the kwargs dictionary in-place\n tiff_keys = ['multifile', 'multifile_close', 'pages', 'fastij', 'is_ome']\n kwargs_tiff = parse_kwargs(kwargs, *tiff_keys)\n\n # read and return tiff as numpy array\n with TiffFile(fname, **kwargs_tiff) as tif:\n return tif.asarray(**kwargs)\n", "path": "skimage/io/_plugins/tifffile_plugin.py" } ]
[ { "content": "try:\n from tifffile import TiffFile, imsave, parse_kwargs\nexcept ImportError:\n from ...external.tifffile import TiffFile, imsave, parse_kwargs\n\n\ndef imread(fname, dtype=None, **kwargs):\n \"\"\"Load a tiff image from file.\n\n Parameters\n ----------\n fname : str or file\n File name or file-like-object.\n dtype : numpy dtype object or string specifier\n Specifies data type of array elements (Not currently used).\n kwargs : keyword pairs, optional\n Additional keyword arguments to pass through (see ``tifffile``'s\n ``imread`` function).\n\n Notes\n -----\n Provided by Christophe Golhke's tifffile.py [1]_, and supports many\n advanced image types including multi-page and floating point.\n\n References\n ----------\n .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py\n\n \"\"\"\n\n if 'img_num' in kwargs:\n kwargs['key'] = kwargs.pop('img_num')\n\n # parse_kwargs will extract keyword arguments intended for the TiffFile \n # class and remove them from the kwargs dictionary in-place\n tiff_keys = ['multifile', 'multifile_close', 'pages', 'fastij', 'is_ome']\n kwargs_tiff = parse_kwargs(kwargs, *tiff_keys)\n\n # read and return tiff as numpy array\n with TiffFile(fname, **kwargs_tiff) as tif:\n return tif.asarray(**kwargs)\n", "path": "skimage/io/_plugins/tifffile_plugin.py" } ]
diff --git a/skimage/io/_plugins/tifffile_plugin.py b/skimage/io/_plugins/tifffile_plugin.py index 21499ffb12d..92752d43c4e 100644 --- a/skimage/io/_plugins/tifffile_plugin.py +++ b/skimage/io/_plugins/tifffile_plugin.py @@ -1,4 +1,7 @@ -from ...external.tifffile import TiffFile, imsave, parse_kwargs +try: + from tifffile import TiffFile, imsave, parse_kwargs +except ImportError: + from ...external.tifffile import TiffFile, imsave, parse_kwargs def imread(fname, dtype=None, **kwargs):
Parsl__parsl-140
Do not import `parsl` before requirements are setup ``` [annawoodard@midway001 parsl]$ python setup.py install Traceback (most recent call last): File "setup.py", line 2, in <module> from parsl.version import VERSION File "/home/annawoodard/parsl/parsl/__init__.py", line 35, in <module> from parsl.executors.ipp import IPyParallelExecutor File "/home/annawoodard/parsl/parsl/executors/ipp.py", line 4, in <module> from ipyparallel import Client ModuleNotFoundError: No module named 'ipyparallel' ``` Setuptools is supposed to take care of dependencies for us, but importing parsl in `setup.py` breaks that (because we require the dependencies by importing the parsl version from `version.py` before they can be installed). We should avoid this.
[ { "content": "from setuptools import setup, find_packages\nfrom parsl.version import VERSION\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\n# tests_require = parse_requirements('test-requirements.txt')\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple data dependent workflows in Python',\n long_description='Simple and easy parallel workflows system for Python',\n url='https://github.com/Parsl/parsl',\n author='Yadu Nand Babuji',\n author_email='[email protected]',\n license='Apache 2.0',\n download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),\n package_data={'': ['LICENSE']},\n packages=find_packages(),\n install_requires=install_requires,\n classifiers=[\n # Maturity\n 'Development Status :: 3 - Alpha',\n # Intended audience\n 'Intended Audience :: Developers',\n # Licence, must match with licence above\n 'License :: OSI Approved :: Apache Software License',\n # Python versions supported\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['Workflows', 'Scientific computing'],\n)\n", "path": "setup.py" } ]
[ { "content": "from setuptools import setup, find_packages\n\nwith open('parsl/version.py') as f:\n exec(f.read())\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\n# tests_require = parse_requirements('test-requirements.txt')\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple data dependent workflows in Python',\n long_description='Simple and easy parallel workflows system for Python',\n url='https://github.com/Parsl/parsl',\n author='Yadu Nand Babuji',\n author_email='[email protected]',\n license='Apache 2.0',\n download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),\n package_data={'': ['LICENSE']},\n packages=find_packages(),\n install_requires=install_requires,\n classifiers=[\n # Maturity\n 'Development Status :: 3 - Alpha',\n # Intended audience\n 'Intended Audience :: Developers',\n # Licence, must match with licence above\n 'License :: OSI Approved :: Apache Software License',\n # Python versions supported\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['Workflows', 'Scientific computing'],\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index c8f4f73db1..3127db154e 100755 --- a/setup.py +++ b/setup.py @@ -1,5 +1,7 @@ from setuptools import setup, find_packages -from parsl.version import VERSION + +with open('parsl/version.py') as f: + exec(f.read()) with open('requirements.txt') as f: install_requires = f.readlines()
pex-tool__pex-1905
AtomicDirectory masks UUID4 collisions Right now, either with an exclusive lock where file locking fails or with a non-exclusive lock, two attempts at creating an atomic directory might collide in the case UUID4 hits a collision: https://github.com/pantsbuild/pex/blob/9901a05d0ec8aee9b8a6e05c6f2a00999df3bab6/pex/common.py#L331-L335 If that ever happened, it would happen silently and the two processes would race each other filling out the work_dir leading to a final os.rename of the work_dir to the target_dir with unexpected contents: https://github.com/pantsbuild/pex/blob/9901a05d0ec8aee9b8a6e05c6f2a00999df3bab6/pex/common.py#L469-L471 Even though this scenario is highly unlikely, it would be better to get an Exception raised than to get directory corruption.
[ { "content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, print_function\n\nimport atexit\nimport contextlib\nimport errno\nimport fcntl\nimport itertools\nimport os\nimport re\nimport shutil\nimport stat\nimport sys\nimport tempfile\nimport threading\nimport time\nimport zipfile\nfrom collections import defaultdict, namedtuple\nfrom contextlib import contextmanager\nfrom datetime import datetime\nfrom uuid import uuid4\n\nfrom pex.enum import Enum\nfrom pex.typing import TYPE_CHECKING, cast\n\nif TYPE_CHECKING:\n from typing import (\n Any,\n Callable,\n DefaultDict,\n Iterable,\n Iterator,\n NoReturn,\n Optional,\n Set,\n Sized,\n Tuple,\n Union,\n )\n\n# We use the start of MS-DOS time, which is what zipfiles use (see section 4.4.6 of\n# https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT).\nDETERMINISTIC_DATETIME = datetime(\n year=1980, month=1, day=1, hour=0, minute=0, second=0, tzinfo=None\n)\n_UNIX_EPOCH = datetime(year=1970, month=1, day=1, hour=0, minute=0, second=0, tzinfo=None)\nDETERMINISTIC_DATETIME_TIMESTAMP = (DETERMINISTIC_DATETIME - _UNIX_EPOCH).total_seconds()\n\n\ndef filter_pyc_dirs(dirs):\n # type: (Iterable[str]) -> Iterator[str]\n \"\"\"Return an iterator over the input `dirs` filtering out Python bytecode cache directories.\"\"\"\n for d in dirs:\n if d != \"__pycache__\":\n yield d\n\n\ndef filter_pyc_files(files):\n # type: (Iterable[str]) -> Iterator[str]\n \"\"\"Iterate the input `files` filtering out any Python bytecode files.\"\"\"\n for f in files:\n # For Python 2.7, `.pyc` files are compiled as siblings to `.py` files (there is no\n # __pycache__ dir).\n if not f.endswith((\".pyc\", \".pyo\")) and not is_pyc_temporary_file(f):\n yield f\n\n\ndef is_pyc_temporary_file(file_path):\n # type: (str) -> bool\n \"\"\"Check if `file` is a temporary Python bytecode file.\"\"\"\n # We rely on the fact that the temporary files created by CPython have object id (integer)\n # suffixes to avoid picking up files where Python bytecode compilation is in-flight; i.e.:\n # `.pyc.0123456789`-style files.\n return re.search(r\"\\.pyc\\.[0-9]+$\", file_path) is not None\n\n\ndef die(msg, exit_code=1):\n # type: (str, int) -> NoReturn\n print(msg, file=sys.stderr)\n sys.exit(exit_code)\n\n\ndef pluralize(\n subject, # type: Sized\n noun, # type: str\n):\n # type: (...) -> str\n if noun == \"\":\n return \"\"\n count = len(subject)\n if count == 1:\n return noun\n if noun[-1] in (\"s\", \"x\", \"z\") or noun[-2:] in (\"sh\", \"ch\"):\n return noun + \"es\"\n else:\n return noun + \"s\"\n\n\ndef safe_copy(source, dest, overwrite=False):\n # type: (str, str, bool) -> None\n def do_copy():\n # type: () -> None\n temp_dest = dest + uuid4().hex\n shutil.copy(source, temp_dest)\n os.rename(temp_dest, dest)\n\n # If the platform supports hard-linking, use that and fall back to copying.\n # Windows does not support hard-linking.\n if hasattr(os, \"link\"):\n try:\n os.link(source, dest)\n except OSError as e:\n if e.errno == errno.EEXIST:\n # File already exists. If overwrite=True, write otherwise skip.\n if overwrite:\n do_copy()\n elif e.errno in (errno.EPERM, errno.EXDEV):\n # For a hard link across devices issue, fall back on copying.\n #\n # For a permission issue, the cause could be one of:\n # 1. We can't read source.\n # 2. We can't write dest.\n # 3. We don't own source but can read it.\n # Although we can't do anything about cases 1 and 2, case 3 is due to\n # `protected_hardlinks` (see: https://www.kernel.org/doc/Documentation/sysctl/fs.txt) and\n # we can fall back to copying in that case.\n #\n # See also https://github.com/pantsbuild/pex/issues/850 where this was discovered.\n do_copy()\n else:\n raise\n elif os.path.exists(dest):\n if overwrite:\n do_copy()\n else:\n do_copy()\n\n\n# See http://stackoverflow.com/questions/2572172/referencing-other-modules-in-atexit\nclass MktempTeardownRegistry(object):\n def __init__(self):\n # type: () -> None\n self._registry = defaultdict(set) # type: DefaultDict[int, Set[str]]\n self._lock = threading.RLock()\n self._getpid = os.getpid\n self._rmtree = shutil.rmtree\n atexit.register(self.teardown)\n\n def __del__(self):\n # type: () -> None\n self.teardown()\n\n def register(self, path):\n # type: (str) -> str\n with self._lock:\n self._registry[self._getpid()].add(path)\n return path\n\n def teardown(self):\n # type: () -> None\n for td in self._registry.pop(self._getpid(), []):\n self._rmtree(td, ignore_errors=True)\n\n\n_MKDTEMP_SINGLETON = MktempTeardownRegistry()\n\n\nclass PermPreservingZipFile(zipfile.ZipFile, object):\n \"\"\"A ZipFile that works around https://bugs.python.org/issue15795.\"\"\"\n\n class ZipEntry(namedtuple(\"ZipEntry\", [\"info\", \"data\"])):\n pass\n\n @classmethod\n def zip_entry_from_file(cls, filename, arcname=None, date_time=None):\n \"\"\"Construct a ZipEntry for a file on the filesystem.\n\n Usually a similar `zip_info_from_file` method is provided by `ZipInfo`, but it is not\n implemented in Python 2.7 so we re-implement it here to construct the `info` for `ZipEntry`\n adding the possibility to control the `ZipInfo` date_time separately from the underlying\n file mtime. See https://github.com/python/cpython/blob/master/Lib/zipfile.py#L495.\n \"\"\"\n st = os.stat(filename)\n isdir = stat.S_ISDIR(st.st_mode)\n if arcname is None:\n arcname = filename\n arcname = os.path.normpath(os.path.splitdrive(arcname)[1])\n while arcname[0] in (os.sep, os.altsep):\n arcname = arcname[1:]\n if isdir:\n arcname += \"/\"\n if date_time is None:\n date_time = time.localtime(st.st_mtime)\n zinfo = zipfile.ZipInfo(filename=arcname, date_time=date_time[:6])\n zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes\n if isdir:\n zinfo.file_size = 0\n zinfo.external_attr |= 0x10 # MS-DOS directory flag\n zinfo.compress_type = zipfile.ZIP_STORED\n data = b\"\"\n else:\n zinfo.file_size = st.st_size\n zinfo.compress_type = zipfile.ZIP_DEFLATED\n with open(filename, \"rb\") as fp:\n data = fp.read()\n return cls.ZipEntry(info=zinfo, data=data)\n\n def _extract_member(self, member, targetpath, pwd):\n result = super(PermPreservingZipFile, self)._extract_member(member, targetpath, pwd)\n info = member if isinstance(member, zipfile.ZipInfo) else self.getinfo(member)\n self._chmod(info, result)\n return result\n\n def _chmod(self, info, path):\n # This magic works to extract perm bits from the 32 bit external file attributes field for\n # unix-created zip files, for the layout, see:\n # https://www.forensicswiki.org/wiki/ZIP#External_file_attributes\n attr = info.external_attr >> 16\n os.chmod(path, attr)\n\n\[email protected]\ndef open_zip(path, *args, **kwargs):\n \"\"\"A contextmanager for zip files.\n\n Passes through positional and kwargs to zipfile.ZipFile.\n \"\"\"\n with contextlib.closing(PermPreservingZipFile(path, *args, **kwargs)) as zip:\n yield zip\n\n\[email protected]\ndef temporary_dir(cleanup=True):\n # type: (bool) -> Iterator[str]\n td = tempfile.mkdtemp()\n try:\n yield td\n finally:\n if cleanup:\n safe_rmtree(td)\n\n\ndef safe_mkdtemp(**kw):\n # type: (**Any) -> str\n \"\"\"Create a temporary directory that is cleaned up on process exit.\n\n Takes the same parameters as tempfile.mkdtemp.\n \"\"\"\n # proper lock sanitation on fork [issue 6721] would be desirable here.\n return _MKDTEMP_SINGLETON.register(tempfile.mkdtemp(**kw))\n\n\ndef register_rmtree(directory):\n # type: (str) -> str\n \"\"\"Register an existing directory to be cleaned up at process exit.\"\"\"\n return _MKDTEMP_SINGLETON.register(directory)\n\n\ndef safe_mkdir(directory, clean=False):\n # type: (str, bool) -> str\n \"\"\"Safely create a directory.\n\n Ensures a directory is present. If it's not there, it is created. If it is, it's a no-op. If\n clean is True, ensures the directory is empty.\n \"\"\"\n if clean:\n safe_rmtree(directory)\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n finally:\n return directory\n\n\ndef safe_open(filename, *args, **kwargs):\n \"\"\"Safely open a file.\n\n ``safe_open`` ensures that the directory components leading up the specified file have been\n created first.\n \"\"\"\n parent_dir = os.path.dirname(filename)\n if parent_dir:\n safe_mkdir(parent_dir)\n return open(filename, *args, **kwargs) # noqa: T802\n\n\ndef safe_delete(filename):\n # type: (str) -> None\n \"\"\"Delete a file safely.\n\n If it's not present, no-op.\n \"\"\"\n try:\n os.unlink(filename)\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise\n\n\ndef safe_rmtree(directory):\n # type: (str) -> None\n \"\"\"Delete a directory if it's present.\n\n If it's not present, no-op.\n \"\"\"\n if os.path.exists(directory):\n shutil.rmtree(directory, True)\n\n\ndef safe_sleep(seconds):\n # type: (float) -> None\n \"\"\"Ensure that the thread sleeps at a minimum the requested seconds.\n\n Until Python 3.5, there was no guarantee that time.sleep() would actually sleep the requested\n time. See https://docs.python.org/3/library/time.html#time.sleep.\n \"\"\"\n if sys.version_info[0:2] >= (3, 5):\n time.sleep(seconds)\n else:\n start_time = current_time = time.time()\n while current_time - start_time < seconds:\n remaining_time = seconds - (current_time - start_time)\n time.sleep(remaining_time)\n current_time = time.time()\n\n\nclass AtomicDirectory(object):\n def __init__(self, target_dir):\n # type: (str) -> None\n self._target_dir = target_dir\n self._work_dir = \"{}.{}\".format(target_dir, uuid4().hex)\n\n @property\n def work_dir(self):\n # type: () -> str\n return self._work_dir\n\n @property\n def target_dir(self):\n # type: () -> str\n return self._target_dir\n\n def is_finalized(self):\n # type: () -> bool\n return os.path.exists(self._target_dir)\n\n def finalize(self, source=None):\n # type: (Optional[str]) -> None\n \"\"\"Rename `work_dir` to `target_dir` using `os.rename()`.\n\n :param source: An optional source offset into the `work_dir`` to use for the atomic update\n of `target_dir`. By default the whole `work_dir` is used.\n\n If a race is lost and `target_dir` already exists, the `target_dir` dir is left unchanged and\n the `work_dir` directory will simply be removed.\n \"\"\"\n if self.is_finalized():\n return\n\n source = os.path.join(self._work_dir, source) if source else self._work_dir\n try:\n # Perform an atomic rename.\n #\n # Per the docs: https://docs.python.org/2.7/library/os.html#os.rename\n #\n # The operation may fail on some Unix flavors if src and dst are on different filesystems.\n # If successful, the renaming will be an atomic operation (this is a POSIX requirement).\n #\n # We have satisfied the single filesystem constraint by arranging the `work_dir` to be a\n # sibling of the `target_dir`.\n os.rename(source, self._target_dir)\n except OSError as e:\n if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):\n raise e\n finally:\n self.cleanup()\n\n def cleanup(self):\n # type: () -> None\n safe_rmtree(self._work_dir)\n\n\nclass FileLockStyle(Enum[\"FileLockStyle.Value\"]):\n class Value(Enum.Value):\n pass\n\n BSD = Value(\"bsd\")\n POSIX = Value(\"posix\")\n\n\n@contextmanager\ndef atomic_directory(\n target_dir, # type: str\n exclusive, # type: Union[bool, FileLockStyle.Value]\n source=None, # type: Optional[str]\n):\n # type: (...) -> Iterator[AtomicDirectory]\n \"\"\"A context manager that yields a potentially exclusively locked AtomicDirectory.\n\n :param target_dir: The target directory to atomically update.\n :param exclusive: If `True`, its guaranteed that only one process will be yielded a non `None`\n workdir; otherwise two or more processes might be yielded unique non-`None`\n workdirs with the last process to finish \"winning\". By default, a POSIX fcntl\n lock will be used to ensure exclusivity. To change this, pass an explicit\n `LockStyle` instead of `True`.\n :param source: An optional source offset into the work directory to use for the atomic update\n of the target directory. By default the whole work directory is used.\n\n If the `target_dir` already exists the enclosed block will be yielded an AtomicDirectory that\n `is_finalized` to signal there is no work to do.\n\n If the enclosed block fails the `target_dir` will be undisturbed.\n\n The new work directory will be cleaned up regardless of whether or not the enclosed block\n succeeds.\n\n If the contents of the resulting directory will be subsequently mutated it's probably correct to\n pass `exclusive=True` to ensure mutations that race the creation process are not lost.\n \"\"\"\n atomic_dir = AtomicDirectory(target_dir=target_dir)\n if atomic_dir.is_finalized():\n # Our work is already done for us so exit early.\n yield atomic_dir\n return\n\n lock_fd = None # type: Optional[int]\n lock_api = cast(\n \"Callable[[int, int], None]\",\n fcntl.flock if exclusive is FileLockStyle.BSD else fcntl.lockf,\n )\n\n def unlock():\n # type: () -> None\n if lock_fd is None:\n return\n try:\n lock_api(lock_fd, fcntl.LOCK_UN)\n finally:\n os.close(lock_fd)\n\n if exclusive:\n head, tail = os.path.split(atomic_dir.target_dir)\n if head:\n safe_mkdir(head)\n # N.B.: We don't actually write anything to the lock file but the fcntl file locking\n # operations only work on files opened for at least write.\n lock_fd = os.open(\n os.path.join(head, \".{}.atomic_directory.lck\".format(tail or \"here\")),\n os.O_CREAT | os.O_WRONLY,\n )\n # N.B.: Since lockf and flock operate on an open file descriptor and these are\n # guaranteed to be closed by the operating system when the owning process exits,\n # this lock is immune to staleness.\n lock_api(lock_fd, fcntl.LOCK_EX) # A blocking write lock.\n if atomic_dir.is_finalized():\n # We lost the double-checked locking race and our work was done for us by the race\n # winner so exit early.\n try:\n yield atomic_dir\n finally:\n unlock()\n return\n\n try:\n safe_mkdir(atomic_dir.work_dir)\n yield atomic_dir\n atomic_dir.finalize(source=source)\n finally:\n unlock()\n atomic_dir.cleanup()\n\n\ndef chmod_plus_x(path):\n # type: (str) -> None\n \"\"\"Equivalent of unix `chmod a+x path`\"\"\"\n path_mode = os.stat(path).st_mode\n path_mode &= int(\"777\", 8)\n if path_mode & stat.S_IRUSR:\n path_mode |= stat.S_IXUSR\n if path_mode & stat.S_IRGRP:\n path_mode |= stat.S_IXGRP\n if path_mode & stat.S_IROTH:\n path_mode |= stat.S_IXOTH\n os.chmod(path, path_mode)\n\n\ndef chmod_plus_w(path):\n # type: (str) -> None\n \"\"\"Equivalent of unix `chmod +w path`\"\"\"\n path_mode = os.stat(path).st_mode\n path_mode &= int(\"777\", 8)\n path_mode |= stat.S_IWRITE\n os.chmod(path, path_mode)\n\n\ndef is_exe(path):\n # type: (str) -> bool\n \"\"\"Determines if the given path is a file executable by the current user.\n\n :param path: The path to check.\n :return: `True if the given path is a file executable by the current user.\n \"\"\"\n return os.path.isfile(path) and os.access(path, os.R_OK | os.X_OK)\n\n\ndef is_script(\n path, # type: str\n pattern=None, # type: Optional[str]\n check_executable=True, # type: bool\n):\n # type: (...) -> bool\n \"\"\"Determines if the given path is a script.\n\n A script is a file that starts with a shebang (#!...) line.\n\n :param path: The path to check.\n :param pattern: An optional pattern to match against the shebang (excluding the leading #!).\n :param check_executable: Check that the script is executable by the current user.\n :return: True if the given path is a script.\n \"\"\"\n if check_executable and not is_exe(path):\n return False\n with open(path, \"rb\") as fp:\n if b\"#!\" != fp.read(2):\n return False\n if not pattern:\n return True\n return bool(re.match(pattern, fp.readline().decode(\"utf-8\")))\n\n\ndef is_python_script(\n path, # type: str\n check_executable=True, # type: bool\n):\n # type: (...) -> bool\n return is_script(path, pattern=r\"(?i)^.*(?:python|pypy)\", check_executable=check_executable)\n\n\ndef can_write_dir(path):\n # type: (str) -> bool\n \"\"\"Determines if the directory at path can be written to by the current process.\n\n If the directory doesn't exist, determines if it can be created and thus written to.\n\n N.B.: This is a best-effort check only that uses permission heuristics and does not actually test\n that the directory can be written to with and writes.\n\n :param path: The directory path to test.\n :return:`True` if the given path is a directory that can be written to by the current process.\n \"\"\"\n while not os.access(path, os.F_OK):\n parent_path = os.path.dirname(path)\n if not parent_path or (parent_path == path):\n # We've recursed up to the root without success, which shouldn't happen,\n return False\n path = parent_path\n return os.path.isdir(path) and os.access(path, os.R_OK | os.W_OK | os.X_OK)\n\n\ndef touch(file):\n # type: (str) -> None\n \"\"\"Equivalent of unix `touch path`.\"\"\"\n with safe_open(file, \"a\"):\n os.utime(file, None)\n\n\nclass Chroot(object):\n \"\"\"A chroot of files overlaid from one directory to another directory.\n\n Files may be tagged when added in order to keep track of multiple overlays in the chroot.\n \"\"\"\n\n class Error(Exception):\n pass\n\n class ChrootTaggingException(Error):\n def __init__(self, filename, orig_tag, new_tag):\n super(Chroot.ChrootTaggingException, self).__init__( # noqa: T800\n \"Trying to add %s to fileset(%s) but already in fileset(%s)!\"\n % (filename, new_tag, orig_tag)\n )\n\n def __init__(self, chroot_base):\n # type: (str) -> None\n \"\"\"Create the chroot.\n\n :chroot_base Directory for the creation of the target chroot.\n \"\"\"\n try:\n safe_mkdir(chroot_base)\n except OSError as e:\n raise self.Error(\"Unable to create chroot in %s: %s\" % (chroot_base, e))\n self.chroot = chroot_base\n self.filesets = defaultdict(set) # type: DefaultDict[str, Set[str]]\n\n def clone(self, into=None):\n \"\"\"Clone this chroot.\n\n :keyword into: (optional) An optional destination directory to clone the\n Chroot into. If not specified, a temporary directory will be created.\n\n .. versionchanged:: 0.8\n The temporary directory created when ``into`` is not specified is now garbage collected on\n interpreter exit.\n \"\"\"\n into = into or safe_mkdtemp()\n new_chroot = Chroot(into)\n for label, fileset in self.filesets.items():\n for fn in fileset:\n new_chroot.link(os.path.join(self.chroot, fn), fn, label=label)\n return new_chroot\n\n def path(self):\n # type: () -> str\n \"\"\"The path of the chroot.\"\"\"\n return self.chroot\n\n def _normalize(self, dst):\n dst = os.path.normpath(dst)\n if dst.startswith(os.sep) or dst.startswith(\"..\"):\n raise self.Error(\"Destination path is not a relative path!\")\n return dst\n\n def _check_tag(self, fn, label):\n for fs_label, fs in self.filesets.items():\n if fn in fs and fs_label != label:\n raise self.ChrootTaggingException(fn, fs_label, label)\n\n def _tag(self, fn, label):\n self._check_tag(fn, label)\n self.filesets[label].add(fn)\n\n def _ensure_parent(self, path):\n safe_mkdir(os.path.dirname(os.path.join(self.chroot, path)))\n\n def copy(self, src, dst, label=None):\n \"\"\"Copy file ``src`` to ``chroot/dst`` with optional label.\n\n May raise anything shutil.copy can raise, e.g.\n IOError(Errno 21 'EISDIR')\n\n May raise ChrootTaggingException if dst is already in a fileset\n but with a different label.\n \"\"\"\n dst = self._normalize(dst)\n self._tag(dst, label)\n self._ensure_parent(dst)\n shutil.copy(src, os.path.join(self.chroot, dst))\n\n def link(self, src, dst, label=None):\n \"\"\"Hard link file from ``src`` to ``chroot/dst`` with optional label.\n\n May raise anything os.link can raise, e.g.\n IOError(Errno 21 'EISDIR')\n\n May raise ChrootTaggingException if dst is already in a fileset\n but with a different label.\n \"\"\"\n dst = self._normalize(dst)\n self._tag(dst, label)\n self._ensure_parent(dst)\n abs_src = src\n abs_dst = os.path.join(self.chroot, dst)\n safe_copy(abs_src, abs_dst, overwrite=False)\n # TODO: Ensure the target and dest are the same if the file already exists.\n\n def symlink(\n self,\n src, # type: str\n dst, # type: str\n label=None, # type: Optional[str]\n ):\n # type: (...) -> None\n dst = self._normalize(dst)\n self._tag(dst, label)\n self._ensure_parent(dst)\n abs_src = os.path.abspath(src)\n abs_dst = os.path.join(self.chroot, dst)\n os.symlink(abs_src, abs_dst)\n\n def write(self, data, dst, label=None, mode=\"wb\", executable=False):\n \"\"\"Write data to ``chroot/dst`` with optional label.\n\n Has similar exceptional cases as ``Chroot.copy``\n \"\"\"\n dst = self._normalize(dst)\n self._tag(dst, label)\n self._ensure_parent(dst)\n with open(os.path.join(self.chroot, dst), mode) as wp:\n wp.write(data)\n if executable:\n chmod_plus_x(wp.name)\n\n def touch(self, dst, label=None):\n \"\"\"Perform 'touch' on ``chroot/dst`` with optional label.\n\n Has similar exceptional cases as Chroot.copy\n \"\"\"\n dst = self._normalize(dst)\n self._tag(dst, label)\n touch(os.path.join(self.chroot, dst))\n\n def get(self, label):\n \"\"\"Get all files labeled with ``label``\"\"\"\n return self.filesets.get(label, set())\n\n def files(self):\n \"\"\"Get all files in the chroot.\"\"\"\n all_files = set()\n for label in self.filesets:\n all_files.update(self.filesets[label])\n return all_files\n\n def labels(self):\n return self.filesets.keys()\n\n def __str__(self):\n return \"Chroot(%s {fs:%s})\" % (\n self.chroot,\n \" \".join(\"%s\" % foo for foo in self.filesets.keys()),\n )\n\n def delete(self):\n shutil.rmtree(self.chroot)\n\n def zip(\n self,\n filename, # type: str\n mode=\"w\", # type: str\n deterministic_timestamp=False, # type: bool\n exclude_file=lambda _: False, # type: Callable[[str], bool]\n strip_prefix=None, # type: Optional[str]\n labels=None, # type: Optional[Iterable[str]]\n compress=True, # type: bool\n ):\n # type: (...) -> None\n\n if labels:\n selected_files = set(\n itertools.chain.from_iterable(self.filesets.get(label, ()) for label in labels)\n )\n else:\n selected_files = self.files()\n\n compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED\n with open_zip(filename, mode, compression) as zf:\n\n def write_entry(\n filename, # type: str\n arcname, # type: str\n ):\n # type: (...) -> None\n zip_entry = zf.zip_entry_from_file(\n filename=filename,\n arcname=os.path.relpath(arcname, strip_prefix) if strip_prefix else arcname,\n date_time=DETERMINISTIC_DATETIME.timetuple()\n if deterministic_timestamp\n else None,\n )\n zf.writestr(zip_entry.info, zip_entry.data, compression)\n\n def get_parent_dir(path):\n # type: (str) -> Optional[str]\n parent_dir = os.path.normpath(os.path.dirname(path))\n if parent_dir and parent_dir != os.curdir:\n return parent_dir\n return None\n\n written_dirs = set()\n\n def maybe_write_parent_dirs(path):\n # type: (str) -> None\n parent_dir = get_parent_dir(path)\n if parent_dir is None or parent_dir in written_dirs:\n return\n maybe_write_parent_dirs(parent_dir)\n if parent_dir != strip_prefix:\n write_entry(filename=os.path.join(self.chroot, parent_dir), arcname=parent_dir)\n written_dirs.add(parent_dir)\n\n def iter_files():\n # type: () -> Iterator[Tuple[str, str]]\n for path in sorted(selected_files):\n full_path = os.path.join(self.chroot, path)\n if os.path.isfile(full_path):\n if exclude_file(full_path):\n continue\n yield full_path, path\n continue\n\n for root, _, files in os.walk(full_path):\n for f in sorted(files):\n if exclude_file(f):\n continue\n abs_path = os.path.join(root, f)\n rel_path = os.path.join(path, os.path.relpath(abs_path, full_path))\n yield abs_path, rel_path\n\n for filename, arcname in iter_files():\n maybe_write_parent_dirs(arcname)\n write_entry(filename, arcname)\n", "path": "pex/common.py" } ]
[ { "content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, print_function\n\nimport atexit\nimport contextlib\nimport errno\nimport fcntl\nimport itertools\nimport os\nimport re\nimport shutil\nimport stat\nimport sys\nimport tempfile\nimport threading\nimport time\nimport zipfile\nfrom collections import defaultdict, namedtuple\nfrom contextlib import contextmanager\nfrom datetime import datetime\nfrom uuid import uuid4\n\nfrom pex.enum import Enum\nfrom pex.typing import TYPE_CHECKING, cast\n\nif TYPE_CHECKING:\n from typing import (\n Any,\n Callable,\n DefaultDict,\n Iterable,\n Iterator,\n NoReturn,\n Optional,\n Set,\n Sized,\n Tuple,\n Union,\n )\n\n# We use the start of MS-DOS time, which is what zipfiles use (see section 4.4.6 of\n# https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT).\nDETERMINISTIC_DATETIME = datetime(\n year=1980, month=1, day=1, hour=0, minute=0, second=0, tzinfo=None\n)\n_UNIX_EPOCH = datetime(year=1970, month=1, day=1, hour=0, minute=0, second=0, tzinfo=None)\nDETERMINISTIC_DATETIME_TIMESTAMP = (DETERMINISTIC_DATETIME - _UNIX_EPOCH).total_seconds()\n\n\ndef filter_pyc_dirs(dirs):\n # type: (Iterable[str]) -> Iterator[str]\n \"\"\"Return an iterator over the input `dirs` filtering out Python bytecode cache directories.\"\"\"\n for d in dirs:\n if d != \"__pycache__\":\n yield d\n\n\ndef filter_pyc_files(files):\n # type: (Iterable[str]) -> Iterator[str]\n \"\"\"Iterate the input `files` filtering out any Python bytecode files.\"\"\"\n for f in files:\n # For Python 2.7, `.pyc` files are compiled as siblings to `.py` files (there is no\n # __pycache__ dir).\n if not f.endswith((\".pyc\", \".pyo\")) and not is_pyc_temporary_file(f):\n yield f\n\n\ndef is_pyc_temporary_file(file_path):\n # type: (str) -> bool\n \"\"\"Check if `file` is a temporary Python bytecode file.\"\"\"\n # We rely on the fact that the temporary files created by CPython have object id (integer)\n # suffixes to avoid picking up files where Python bytecode compilation is in-flight; i.e.:\n # `.pyc.0123456789`-style files.\n return re.search(r\"\\.pyc\\.[0-9]+$\", file_path) is not None\n\n\ndef die(msg, exit_code=1):\n # type: (str, int) -> NoReturn\n print(msg, file=sys.stderr)\n sys.exit(exit_code)\n\n\ndef pluralize(\n subject, # type: Sized\n noun, # type: str\n):\n # type: (...) -> str\n if noun == \"\":\n return \"\"\n count = len(subject)\n if count == 1:\n return noun\n if noun[-1] in (\"s\", \"x\", \"z\") or noun[-2:] in (\"sh\", \"ch\"):\n return noun + \"es\"\n else:\n return noun + \"s\"\n\n\ndef safe_copy(source, dest, overwrite=False):\n # type: (str, str, bool) -> None\n def do_copy():\n # type: () -> None\n temp_dest = dest + uuid4().hex\n shutil.copy(source, temp_dest)\n os.rename(temp_dest, dest)\n\n # If the platform supports hard-linking, use that and fall back to copying.\n # Windows does not support hard-linking.\n if hasattr(os, \"link\"):\n try:\n os.link(source, dest)\n except OSError as e:\n if e.errno == errno.EEXIST:\n # File already exists. If overwrite=True, write otherwise skip.\n if overwrite:\n do_copy()\n elif e.errno in (errno.EPERM, errno.EXDEV):\n # For a hard link across devices issue, fall back on copying.\n #\n # For a permission issue, the cause could be one of:\n # 1. We can't read source.\n # 2. We can't write dest.\n # 3. We don't own source but can read it.\n # Although we can't do anything about cases 1 and 2, case 3 is due to\n # `protected_hardlinks` (see: https://www.kernel.org/doc/Documentation/sysctl/fs.txt) and\n # we can fall back to copying in that case.\n #\n # See also https://github.com/pantsbuild/pex/issues/850 where this was discovered.\n do_copy()\n else:\n raise\n elif os.path.exists(dest):\n if overwrite:\n do_copy()\n else:\n do_copy()\n\n\n# See http://stackoverflow.com/questions/2572172/referencing-other-modules-in-atexit\nclass MktempTeardownRegistry(object):\n def __init__(self):\n # type: () -> None\n self._registry = defaultdict(set) # type: DefaultDict[int, Set[str]]\n self._lock = threading.RLock()\n self._getpid = os.getpid\n self._rmtree = shutil.rmtree\n atexit.register(self.teardown)\n\n def __del__(self):\n # type: () -> None\n self.teardown()\n\n def register(self, path):\n # type: (str) -> str\n with self._lock:\n self._registry[self._getpid()].add(path)\n return path\n\n def teardown(self):\n # type: () -> None\n for td in self._registry.pop(self._getpid(), []):\n self._rmtree(td, ignore_errors=True)\n\n\n_MKDTEMP_SINGLETON = MktempTeardownRegistry()\n\n\nclass PermPreservingZipFile(zipfile.ZipFile, object):\n \"\"\"A ZipFile that works around https://bugs.python.org/issue15795.\"\"\"\n\n class ZipEntry(namedtuple(\"ZipEntry\", [\"info\", \"data\"])):\n pass\n\n @classmethod\n def zip_entry_from_file(cls, filename, arcname=None, date_time=None):\n \"\"\"Construct a ZipEntry for a file on the filesystem.\n\n Usually a similar `zip_info_from_file` method is provided by `ZipInfo`, but it is not\n implemented in Python 2.7 so we re-implement it here to construct the `info` for `ZipEntry`\n adding the possibility to control the `ZipInfo` date_time separately from the underlying\n file mtime. See https://github.com/python/cpython/blob/master/Lib/zipfile.py#L495.\n \"\"\"\n st = os.stat(filename)\n isdir = stat.S_ISDIR(st.st_mode)\n if arcname is None:\n arcname = filename\n arcname = os.path.normpath(os.path.splitdrive(arcname)[1])\n while arcname[0] in (os.sep, os.altsep):\n arcname = arcname[1:]\n if isdir:\n arcname += \"/\"\n if date_time is None:\n date_time = time.localtime(st.st_mtime)\n zinfo = zipfile.ZipInfo(filename=arcname, date_time=date_time[:6])\n zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes\n if isdir:\n zinfo.file_size = 0\n zinfo.external_attr |= 0x10 # MS-DOS directory flag\n zinfo.compress_type = zipfile.ZIP_STORED\n data = b\"\"\n else:\n zinfo.file_size = st.st_size\n zinfo.compress_type = zipfile.ZIP_DEFLATED\n with open(filename, \"rb\") as fp:\n data = fp.read()\n return cls.ZipEntry(info=zinfo, data=data)\n\n def _extract_member(self, member, targetpath, pwd):\n result = super(PermPreservingZipFile, self)._extract_member(member, targetpath, pwd)\n info = member if isinstance(member, zipfile.ZipInfo) else self.getinfo(member)\n self._chmod(info, result)\n return result\n\n def _chmod(self, info, path):\n # This magic works to extract perm bits from the 32 bit external file attributes field for\n # unix-created zip files, for the layout, see:\n # https://www.forensicswiki.org/wiki/ZIP#External_file_attributes\n attr = info.external_attr >> 16\n os.chmod(path, attr)\n\n\[email protected]\ndef open_zip(path, *args, **kwargs):\n \"\"\"A contextmanager for zip files.\n\n Passes through positional and kwargs to zipfile.ZipFile.\n \"\"\"\n with contextlib.closing(PermPreservingZipFile(path, *args, **kwargs)) as zip:\n yield zip\n\n\[email protected]\ndef temporary_dir(cleanup=True):\n # type: (bool) -> Iterator[str]\n td = tempfile.mkdtemp()\n try:\n yield td\n finally:\n if cleanup:\n safe_rmtree(td)\n\n\ndef safe_mkdtemp(**kw):\n # type: (**Any) -> str\n \"\"\"Create a temporary directory that is cleaned up on process exit.\n\n Takes the same parameters as tempfile.mkdtemp.\n \"\"\"\n # proper lock sanitation on fork [issue 6721] would be desirable here.\n return _MKDTEMP_SINGLETON.register(tempfile.mkdtemp(**kw))\n\n\ndef register_rmtree(directory):\n # type: (str) -> str\n \"\"\"Register an existing directory to be cleaned up at process exit.\"\"\"\n return _MKDTEMP_SINGLETON.register(directory)\n\n\ndef safe_mkdir(directory, clean=False):\n # type: (str, bool) -> str\n \"\"\"Safely create a directory.\n\n Ensures a directory is present. If it's not there, it is created. If it is, it's a no-op. If\n clean is True, ensures the directory is empty.\n \"\"\"\n if clean:\n safe_rmtree(directory)\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n finally:\n return directory\n\n\ndef safe_open(filename, *args, **kwargs):\n \"\"\"Safely open a file.\n\n ``safe_open`` ensures that the directory components leading up the specified file have been\n created first.\n \"\"\"\n parent_dir = os.path.dirname(filename)\n if parent_dir:\n safe_mkdir(parent_dir)\n return open(filename, *args, **kwargs) # noqa: T802\n\n\ndef safe_delete(filename):\n # type: (str) -> None\n \"\"\"Delete a file safely.\n\n If it's not present, no-op.\n \"\"\"\n try:\n os.unlink(filename)\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise\n\n\ndef safe_rmtree(directory):\n # type: (str) -> None\n \"\"\"Delete a directory if it's present.\n\n If it's not present, no-op.\n \"\"\"\n if os.path.exists(directory):\n shutil.rmtree(directory, True)\n\n\ndef safe_sleep(seconds):\n # type: (float) -> None\n \"\"\"Ensure that the thread sleeps at a minimum the requested seconds.\n\n Until Python 3.5, there was no guarantee that time.sleep() would actually sleep the requested\n time. See https://docs.python.org/3/library/time.html#time.sleep.\n \"\"\"\n if sys.version_info[0:2] >= (3, 5):\n time.sleep(seconds)\n else:\n start_time = current_time = time.time()\n while current_time - start_time < seconds:\n remaining_time = seconds - (current_time - start_time)\n time.sleep(remaining_time)\n current_time = time.time()\n\n\nclass AtomicDirectory(object):\n def __init__(self, target_dir):\n # type: (str) -> None\n self._target_dir = target_dir\n self._work_dir = \"{}.{}\".format(target_dir, uuid4().hex)\n\n @property\n def work_dir(self):\n # type: () -> str\n return self._work_dir\n\n @property\n def target_dir(self):\n # type: () -> str\n return self._target_dir\n\n def is_finalized(self):\n # type: () -> bool\n return os.path.exists(self._target_dir)\n\n def finalize(self, source=None):\n # type: (Optional[str]) -> None\n \"\"\"Rename `work_dir` to `target_dir` using `os.rename()`.\n\n :param source: An optional source offset into the `work_dir`` to use for the atomic update\n of `target_dir`. By default the whole `work_dir` is used.\n\n If a race is lost and `target_dir` already exists, the `target_dir` dir is left unchanged and\n the `work_dir` directory will simply be removed.\n \"\"\"\n if self.is_finalized():\n return\n\n source = os.path.join(self._work_dir, source) if source else self._work_dir\n try:\n # Perform an atomic rename.\n #\n # Per the docs: https://docs.python.org/2.7/library/os.html#os.rename\n #\n # The operation may fail on some Unix flavors if src and dst are on different filesystems.\n # If successful, the renaming will be an atomic operation (this is a POSIX requirement).\n #\n # We have satisfied the single filesystem constraint by arranging the `work_dir` to be a\n # sibling of the `target_dir`.\n os.rename(source, self._target_dir)\n except OSError as e:\n if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):\n raise e\n finally:\n self.cleanup()\n\n def cleanup(self):\n # type: () -> None\n safe_rmtree(self._work_dir)\n\n\nclass FileLockStyle(Enum[\"FileLockStyle.Value\"]):\n class Value(Enum.Value):\n pass\n\n BSD = Value(\"bsd\")\n POSIX = Value(\"posix\")\n\n\n@contextmanager\ndef atomic_directory(\n target_dir, # type: str\n exclusive, # type: Union[bool, FileLockStyle.Value]\n source=None, # type: Optional[str]\n):\n # type: (...) -> Iterator[AtomicDirectory]\n \"\"\"A context manager that yields a potentially exclusively locked AtomicDirectory.\n\n :param target_dir: The target directory to atomically update.\n :param exclusive: If `True`, its guaranteed that only one process will be yielded a non `None`\n workdir; otherwise two or more processes might be yielded unique non-`None`\n workdirs with the last process to finish \"winning\". By default, a POSIX fcntl\n lock will be used to ensure exclusivity. To change this, pass an explicit\n `LockStyle` instead of `True`.\n :param source: An optional source offset into the work directory to use for the atomic update\n of the target directory. By default the whole work directory is used.\n\n If the `target_dir` already exists the enclosed block will be yielded an AtomicDirectory that\n `is_finalized` to signal there is no work to do.\n\n If the enclosed block fails the `target_dir` will be undisturbed.\n\n The new work directory will be cleaned up regardless of whether or not the enclosed block\n succeeds.\n\n If the contents of the resulting directory will be subsequently mutated it's probably correct to\n pass `exclusive=True` to ensure mutations that race the creation process are not lost.\n \"\"\"\n atomic_dir = AtomicDirectory(target_dir=target_dir)\n if atomic_dir.is_finalized():\n # Our work is already done for us so exit early.\n yield atomic_dir\n return\n\n lock_fd = None # type: Optional[int]\n lock_api = cast(\n \"Callable[[int, int], None]\",\n fcntl.flock if exclusive is FileLockStyle.BSD else fcntl.lockf,\n )\n\n def unlock():\n # type: () -> None\n if lock_fd is None:\n return\n try:\n lock_api(lock_fd, fcntl.LOCK_UN)\n finally:\n os.close(lock_fd)\n\n if exclusive:\n head, tail = os.path.split(atomic_dir.target_dir)\n if head:\n safe_mkdir(head)\n # N.B.: We don't actually write anything to the lock file but the fcntl file locking\n # operations only work on files opened for at least write.\n lock_fd = os.open(\n os.path.join(head, \".{}.atomic_directory.lck\".format(tail or \"here\")),\n os.O_CREAT | os.O_WRONLY,\n )\n # N.B.: Since lockf and flock operate on an open file descriptor and these are\n # guaranteed to be closed by the operating system when the owning process exits,\n # this lock is immune to staleness.\n lock_api(lock_fd, fcntl.LOCK_EX) # A blocking write lock.\n if atomic_dir.is_finalized():\n # We lost the double-checked locking race and our work was done for us by the race\n # winner so exit early.\n try:\n yield atomic_dir\n finally:\n unlock()\n return\n\n try:\n os.makedirs(atomic_dir.work_dir)\n yield atomic_dir\n atomic_dir.finalize(source=source)\n finally:\n unlock()\n atomic_dir.cleanup()\n\n\ndef chmod_plus_x(path):\n # type: (str) -> None\n \"\"\"Equivalent of unix `chmod a+x path`\"\"\"\n path_mode = os.stat(path).st_mode\n path_mode &= int(\"777\", 8)\n if path_mode & stat.S_IRUSR:\n path_mode |= stat.S_IXUSR\n if path_mode & stat.S_IRGRP:\n path_mode |= stat.S_IXGRP\n if path_mode & stat.S_IROTH:\n path_mode |= stat.S_IXOTH\n os.chmod(path, path_mode)\n\n\ndef chmod_plus_w(path):\n # type: (str) -> None\n \"\"\"Equivalent of unix `chmod +w path`\"\"\"\n path_mode = os.stat(path).st_mode\n path_mode &= int(\"777\", 8)\n path_mode |= stat.S_IWRITE\n os.chmod(path, path_mode)\n\n\ndef is_exe(path):\n # type: (str) -> bool\n \"\"\"Determines if the given path is a file executable by the current user.\n\n :param path: The path to check.\n :return: `True if the given path is a file executable by the current user.\n \"\"\"\n return os.path.isfile(path) and os.access(path, os.R_OK | os.X_OK)\n\n\ndef is_script(\n path, # type: str\n pattern=None, # type: Optional[str]\n check_executable=True, # type: bool\n):\n # type: (...) -> bool\n \"\"\"Determines if the given path is a script.\n\n A script is a file that starts with a shebang (#!...) line.\n\n :param path: The path to check.\n :param pattern: An optional pattern to match against the shebang (excluding the leading #!).\n :param check_executable: Check that the script is executable by the current user.\n :return: True if the given path is a script.\n \"\"\"\n if check_executable and not is_exe(path):\n return False\n with open(path, \"rb\") as fp:\n if b\"#!\" != fp.read(2):\n return False\n if not pattern:\n return True\n return bool(re.match(pattern, fp.readline().decode(\"utf-8\")))\n\n\ndef is_python_script(\n path, # type: str\n check_executable=True, # type: bool\n):\n # type: (...) -> bool\n return is_script(path, pattern=r\"(?i)^.*(?:python|pypy)\", check_executable=check_executable)\n\n\ndef can_write_dir(path):\n # type: (str) -> bool\n \"\"\"Determines if the directory at path can be written to by the current process.\n\n If the directory doesn't exist, determines if it can be created and thus written to.\n\n N.B.: This is a best-effort check only that uses permission heuristics and does not actually test\n that the directory can be written to with and writes.\n\n :param path: The directory path to test.\n :return:`True` if the given path is a directory that can be written to by the current process.\n \"\"\"\n while not os.access(path, os.F_OK):\n parent_path = os.path.dirname(path)\n if not parent_path or (parent_path == path):\n # We've recursed up to the root without success, which shouldn't happen,\n return False\n path = parent_path\n return os.path.isdir(path) and os.access(path, os.R_OK | os.W_OK | os.X_OK)\n\n\ndef touch(file):\n # type: (str) -> None\n \"\"\"Equivalent of unix `touch path`.\"\"\"\n with safe_open(file, \"a\"):\n os.utime(file, None)\n\n\nclass Chroot(object):\n \"\"\"A chroot of files overlaid from one directory to another directory.\n\n Files may be tagged when added in order to keep track of multiple overlays in the chroot.\n \"\"\"\n\n class Error(Exception):\n pass\n\n class ChrootTaggingException(Error):\n def __init__(self, filename, orig_tag, new_tag):\n super(Chroot.ChrootTaggingException, self).__init__( # noqa: T800\n \"Trying to add %s to fileset(%s) but already in fileset(%s)!\"\n % (filename, new_tag, orig_tag)\n )\n\n def __init__(self, chroot_base):\n # type: (str) -> None\n \"\"\"Create the chroot.\n\n :chroot_base Directory for the creation of the target chroot.\n \"\"\"\n try:\n safe_mkdir(chroot_base)\n except OSError as e:\n raise self.Error(\"Unable to create chroot in %s: %s\" % (chroot_base, e))\n self.chroot = chroot_base\n self.filesets = defaultdict(set) # type: DefaultDict[str, Set[str]]\n\n def clone(self, into=None):\n \"\"\"Clone this chroot.\n\n :keyword into: (optional) An optional destination directory to clone the\n Chroot into. If not specified, a temporary directory will be created.\n\n .. versionchanged:: 0.8\n The temporary directory created when ``into`` is not specified is now garbage collected on\n interpreter exit.\n \"\"\"\n into = into or safe_mkdtemp()\n new_chroot = Chroot(into)\n for label, fileset in self.filesets.items():\n for fn in fileset:\n new_chroot.link(os.path.join(self.chroot, fn), fn, label=label)\n return new_chroot\n\n def path(self):\n # type: () -> str\n \"\"\"The path of the chroot.\"\"\"\n return self.chroot\n\n def _normalize(self, dst):\n dst = os.path.normpath(dst)\n if dst.startswith(os.sep) or dst.startswith(\"..\"):\n raise self.Error(\"Destination path is not a relative path!\")\n return dst\n\n def _check_tag(self, fn, label):\n for fs_label, fs in self.filesets.items():\n if fn in fs and fs_label != label:\n raise self.ChrootTaggingException(fn, fs_label, label)\n\n def _tag(self, fn, label):\n self._check_tag(fn, label)\n self.filesets[label].add(fn)\n\n def _ensure_parent(self, path):\n safe_mkdir(os.path.dirname(os.path.join(self.chroot, path)))\n\n def copy(self, src, dst, label=None):\n \"\"\"Copy file ``src`` to ``chroot/dst`` with optional label.\n\n May raise anything shutil.copy can raise, e.g.\n IOError(Errno 21 'EISDIR')\n\n May raise ChrootTaggingException if dst is already in a fileset\n but with a different label.\n \"\"\"\n dst = self._normalize(dst)\n self._tag(dst, label)\n self._ensure_parent(dst)\n shutil.copy(src, os.path.join(self.chroot, dst))\n\n def link(self, src, dst, label=None):\n \"\"\"Hard link file from ``src`` to ``chroot/dst`` with optional label.\n\n May raise anything os.link can raise, e.g.\n IOError(Errno 21 'EISDIR')\n\n May raise ChrootTaggingException if dst is already in a fileset\n but with a different label.\n \"\"\"\n dst = self._normalize(dst)\n self._tag(dst, label)\n self._ensure_parent(dst)\n abs_src = src\n abs_dst = os.path.join(self.chroot, dst)\n safe_copy(abs_src, abs_dst, overwrite=False)\n # TODO: Ensure the target and dest are the same if the file already exists.\n\n def symlink(\n self,\n src, # type: str\n dst, # type: str\n label=None, # type: Optional[str]\n ):\n # type: (...) -> None\n dst = self._normalize(dst)\n self._tag(dst, label)\n self._ensure_parent(dst)\n abs_src = os.path.abspath(src)\n abs_dst = os.path.join(self.chroot, dst)\n os.symlink(abs_src, abs_dst)\n\n def write(self, data, dst, label=None, mode=\"wb\", executable=False):\n \"\"\"Write data to ``chroot/dst`` with optional label.\n\n Has similar exceptional cases as ``Chroot.copy``\n \"\"\"\n dst = self._normalize(dst)\n self._tag(dst, label)\n self._ensure_parent(dst)\n with open(os.path.join(self.chroot, dst), mode) as wp:\n wp.write(data)\n if executable:\n chmod_plus_x(wp.name)\n\n def touch(self, dst, label=None):\n \"\"\"Perform 'touch' on ``chroot/dst`` with optional label.\n\n Has similar exceptional cases as Chroot.copy\n \"\"\"\n dst = self._normalize(dst)\n self._tag(dst, label)\n touch(os.path.join(self.chroot, dst))\n\n def get(self, label):\n \"\"\"Get all files labeled with ``label``\"\"\"\n return self.filesets.get(label, set())\n\n def files(self):\n \"\"\"Get all files in the chroot.\"\"\"\n all_files = set()\n for label in self.filesets:\n all_files.update(self.filesets[label])\n return all_files\n\n def labels(self):\n return self.filesets.keys()\n\n def __str__(self):\n return \"Chroot(%s {fs:%s})\" % (\n self.chroot,\n \" \".join(\"%s\" % foo for foo in self.filesets.keys()),\n )\n\n def delete(self):\n shutil.rmtree(self.chroot)\n\n def zip(\n self,\n filename, # type: str\n mode=\"w\", # type: str\n deterministic_timestamp=False, # type: bool\n exclude_file=lambda _: False, # type: Callable[[str], bool]\n strip_prefix=None, # type: Optional[str]\n labels=None, # type: Optional[Iterable[str]]\n compress=True, # type: bool\n ):\n # type: (...) -> None\n\n if labels:\n selected_files = set(\n itertools.chain.from_iterable(self.filesets.get(label, ()) for label in labels)\n )\n else:\n selected_files = self.files()\n\n compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED\n with open_zip(filename, mode, compression) as zf:\n\n def write_entry(\n filename, # type: str\n arcname, # type: str\n ):\n # type: (...) -> None\n zip_entry = zf.zip_entry_from_file(\n filename=filename,\n arcname=os.path.relpath(arcname, strip_prefix) if strip_prefix else arcname,\n date_time=DETERMINISTIC_DATETIME.timetuple()\n if deterministic_timestamp\n else None,\n )\n zf.writestr(zip_entry.info, zip_entry.data, compression)\n\n def get_parent_dir(path):\n # type: (str) -> Optional[str]\n parent_dir = os.path.normpath(os.path.dirname(path))\n if parent_dir and parent_dir != os.curdir:\n return parent_dir\n return None\n\n written_dirs = set()\n\n def maybe_write_parent_dirs(path):\n # type: (str) -> None\n parent_dir = get_parent_dir(path)\n if parent_dir is None or parent_dir in written_dirs:\n return\n maybe_write_parent_dirs(parent_dir)\n if parent_dir != strip_prefix:\n write_entry(filename=os.path.join(self.chroot, parent_dir), arcname=parent_dir)\n written_dirs.add(parent_dir)\n\n def iter_files():\n # type: () -> Iterator[Tuple[str, str]]\n for path in sorted(selected_files):\n full_path = os.path.join(self.chroot, path)\n if os.path.isfile(full_path):\n if exclude_file(full_path):\n continue\n yield full_path, path\n continue\n\n for root, _, files in os.walk(full_path):\n for f in sorted(files):\n if exclude_file(f):\n continue\n abs_path = os.path.join(root, f)\n rel_path = os.path.join(path, os.path.relpath(abs_path, full_path))\n yield abs_path, rel_path\n\n for filename, arcname in iter_files():\n maybe_write_parent_dirs(arcname)\n write_entry(filename, arcname)\n", "path": "pex/common.py" } ]
diff --git a/pex/common.py b/pex/common.py index 0cf30958a..36a768e11 100644 --- a/pex/common.py +++ b/pex/common.py @@ -466,7 +466,7 @@ def unlock(): return try: - safe_mkdir(atomic_dir.work_dir) + os.makedirs(atomic_dir.work_dir) yield atomic_dir atomic_dir.finalize(source=source) finally:
pre-commit__pre-commit-167
npmrc causes npm to install to home directory instead of nodeenv Here is what happened when I tried to get eslint installed: ``` $ pre-commit run --all-files eslint..............................................................................................................................................................................................................................................................................................................Failed hookid: eslint xargs: eslint: No such file or directory ``` Moving .npmrc to nope.npmrc fixed the issue.
[ { "content": "from setuptools import find_packages\nfrom setuptools import setup\n\n\nsetup(\n name='pre_commit',\n description=(\n 'A framework for managing and maintaining multi-language pre-commit '\n 'hooks.'\n ),\n url='https://github.com/pre-commit/pre-commit',\n version='0.2.9',\n\n author='Anthony Sottile',\n author_email='[email protected]',\n\n platforms='linux',\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ],\n\n packages=find_packages('.', exclude=('tests*', 'testing*')),\n package_data={\n 'pre_commit': [\n 'resources/pre-commit-hook',\n 'resources/rbenv.tar.gz',\n 'resources/ruby-build.tar.gz',\n 'resources/ruby-download.tar.gz',\n ]\n },\n install_requires=[\n 'argparse',\n 'aspy.yaml',\n 'cached-property',\n 'jsonschema',\n 'nodeenv>=0.9.4',\n 'ordereddict',\n 'plumbum',\n 'pyyaml',\n 'simplejson',\n 'virtualenv',\n ],\n entry_points={\n 'console_scripts': [\n 'pre-commit = pre_commit.main:main',\n 'validate-config = pre_commit.clientlib.validate_config:run',\n 'validate-manifest = pre_commit.clientlib.validate_manifest:run',\n ],\n },\n)\n", "path": "setup.py" } ]
[ { "content": "from setuptools import find_packages\nfrom setuptools import setup\n\n\nsetup(\n name='pre_commit',\n description=(\n 'A framework for managing and maintaining multi-language pre-commit '\n 'hooks.'\n ),\n url='https://github.com/pre-commit/pre-commit',\n version='0.2.9',\n\n author='Anthony Sottile',\n author_email='[email protected]',\n\n platforms='linux',\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ],\n\n packages=find_packages('.', exclude=('tests*', 'testing*')),\n package_data={\n 'pre_commit': [\n 'resources/pre-commit-hook',\n 'resources/rbenv.tar.gz',\n 'resources/ruby-build.tar.gz',\n 'resources/ruby-download.tar.gz',\n ]\n },\n install_requires=[\n 'argparse',\n 'aspy.yaml',\n 'cached-property',\n 'jsonschema',\n 'nodeenv>=0.11.1',\n 'ordereddict',\n 'plumbum',\n 'pyyaml',\n 'simplejson',\n 'virtualenv',\n ],\n entry_points={\n 'console_scripts': [\n 'pre-commit = pre_commit.main:main',\n 'validate-config = pre_commit.clientlib.validate_config:run',\n 'validate-manifest = pre_commit.clientlib.validate_manifest:run',\n ],\n },\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index cff71ebe6..62ea21198 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ 'aspy.yaml', 'cached-property', 'jsonschema', - 'nodeenv>=0.9.4', + 'nodeenv>=0.11.1', 'ordereddict', 'plumbum', 'pyyaml',
conan-io__conan-8167
[bug] YCM generator uses deprecated FlagsForFile method instead of Settings <!-- Please don't forget to update the issue title. Include all applicable information to help us reproduce your problem. To help us debug your issue please explain: --> ### Environment Details (include every applicable attribute) * Operating System+version: macOS 10.14.5 * Compiler+version: clang 10.0.1 * Conan version: 1.31.4 * Python version: 3.9.0 ### Steps to reproduce (Include if Applicable) Follow instructions at https://docs.conan.io/en/latest/integrations/ide/youcompleteme.html#youcompleteme-integration to configure `.ycm_extra_conf` and `conan_ycm_flags.json`: conanfile.txt ``` [generators] ycm ``` ```bash # from your base folder $ cp build/conan_ycm_extra_conf.py .ycm_extra_conf.py $ ln -s build/conan_ycm_flags.json conan_ycm_flags.json ``` Install `gtest` as a package, and then import it in a source file. ### Logs (Executed commands with output) (Include/Attach if Applicable) <!-- Your log content should be related to the bug description, it can be: - Conan command output - Server output (Artifactory, conan_server) --> YCM was unable to find the gtest package as installed by conan. YCM Debug Info: ``` Printing YouCompleteMe debug information... -- Resolve completions: Up front -- Client logfile: /var/folders/_2/cyfwx31x0y1dh06whkrkrmh00000gn/T/ycm_x9dk66na.log -- Server Python interpreter: /usr/local/opt/[email protected]/bin/python3.9 -- Server Python version: 3.9.0 -- Server has Clang support compiled in: True -- Clang version: clang version 10.0.0 -- Extra configuration file found and loaded -- Extra configuration path: /Users/username/home/projects/project/.ycm_extra_conf.py -- C-family completer debug information: -- Clangd running -- Clangd process ID: 56305 -- Clangd executable: ['/Users/username/.vim/plugged/YouCompleteMe/third_party/ycmd/third_party/clangd/output/bin/clangd', '-header-insertion-decorators=0', '-resource-dir=/Users/ username/.vim/plugged/YouCompleteMe/third_party/ycmd/third_party/clang/lib/clang/10.0.0', '-limit-results=500', '-log=verbose'] -- Clangd logfiles: -- /var/folders/_2/cyfwx31x0y1dh06whkrkrmh00000gn/T/clangd_stderr615mhccn.log -- Clangd Server State: Initialized -- Clangd Project Directory: /Users/username/home/projects/project -- Clangd Settings: {} -- Clangd Compilation Command: False -- Server running at: http://127.0.0.1:50225 -- Server process ID: 56303 -- Server logfiles: -- /var/folders/_2/cyfwx31x0y1dh06whkrkrmh00000gn/T/ycmd_50225_stdout_nstboyjy.log -- /var/folders/_2/cyfwx31x0y1dh06whkrkrmh00000gn/T/ycmd_50225_stderr_ey11rfes.log ``` As can be seen, `clangd` is not using the flags `'-x', 'c++'` as defined in the default `flags` list in the generated `.ycm_extra_conf.py`, or the `gtest` package as installed by conan. The generated `conan_ycm_flags.json` file contains the following: ``` { "includes": [ "-isystem/Users/username/.conan/data/gtest/1.10.0/_/_/package/03ad53d73db1da068548d1d6a87ac3219077b5c0/include", "-isystem/Users/username/.conan/data/rapidjson/1.1.0/_/_/package/5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9/include" ], "defines": [], "flags": [] } ``` These flags are also not included in the compilation arguments. The issue appears to be caused by the fact that the [generator](https://github.com/conan-io/conan/blob/develop/conans/client/generators/ycm.py) uses the deprecated `FlagsForFile` method instead of it's replacement, `Settings`. This can be resolved by modifying line 143 from: ```python def FlagsForFile( filename, **kwargs ): ``` to ```python def Settings( filename, **kwargs): ``` As a new user of YCM and conan, this took an inordinate amount of time to troubleshoot, though it is relatively trivial.
[ { "content": "import json\n\nfrom conans.model import Generator\n\n\nclass YouCompleteMeGenerator(Generator):\n template = '''\n# This file is NOT licensed under the GPLv3, which is the license for the rest\n# of YouCompleteMe.\n#\n# Here's the license text for this file:\n#\n# This is free and unencumbered software released into the public domain.\n#\n# Anyone is free to copy, modify, publish, use, compile, sell, or\n# distribute this software, either in source code form or as a compiled\n# binary, for any purpose, commercial or non-commercial, and by any\n# means.\n#\n# In jurisdictions that recognize copyright laws, the author or authors\n# of this software dedicate any and all copyright interest in the\n# software to the public domain. We make this dedication for the benefit\n# of the public at large and to the detriment of our heirs and\n# successors. We intend this dedication to be an overt act of\n# relinquishment in perpetuity of all present and future rights to this\n# software under copyright law.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\n# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n# OTHER DEALINGS IN THE SOFTWARE.\n#\n# For more information, please refer to <http://unlicense.org/>\n\nimport os\nimport json\nimport ycm_core\nimport logging\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef DirectoryOfThisScript():\n return os.path.dirname( os.path.abspath( __file__ ) )\n\n\n# These are the compilation flags that will be used in case there's no\n# compilation database set (by default, one is not set).\n# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.\nflags = [\n '-x', 'c++'\n]\n\nconan_flags = json.loads(open(\"conan_ycm_flags.json\", \"r\").read())\n\nflags.extend(conan_flags[\"flags\"])\nflags.extend(conan_flags[\"defines\"])\nflags.extend(conan_flags[\"includes\"])\n\n\n# Set this to the absolute path to the folder (NOT the file!) containing the\n# compile_commands.json file to use that instead of 'flags'. See here for\n# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html\n#\n# You can get CMake to generate this file for you by adding:\n# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )\n# to your CMakeLists.txt file.\n#\n# Most projects will NOT need to set this to anything; you can just change the\n# 'flags' list of compilation flags. Notice that YCM itself uses that approach.\ncompilation_database_folder = os.path.join(DirectoryOfThisScript(), 'Debug')\n\nif os.path.exists( compilation_database_folder ):\n database = ycm_core.CompilationDatabase( compilation_database_folder )\n if not database.DatabaseSuccessfullyLoaded():\n _logger.warn(\"Failed to load database\")\n database = None\nelse:\n database = None\n\nSOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]\n\ndef GetAbsolutePath(include_path, working_directory):\n if os.path.isabs(include_path):\n return include_path\n return os.path.join(working_directory, include_path)\n\n\ndef MakeRelativePathsInFlagsAbsolute( flags, working_directory ):\n if not working_directory:\n return list( flags )\n new_flags = []\n make_next_absolute = False\n path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]\n for flag in flags:\n new_flag = flag\n\n if make_next_absolute:\n make_next_absolute = False\n new_flag = GetAbsolutePath(flag, working_directory)\n\n for path_flag in path_flags:\n if flag == path_flag:\n make_next_absolute = True\n break\n\n if flag.startswith( path_flag ):\n path = flag[ len( path_flag ): ]\n new_flag = flag[:len(path_flag)] + GetAbsolutePath(path, working_directory)\n break\n\n if new_flag:\n new_flags.append( new_flag )\n return new_flags\n\n\ndef IsHeaderFile( filename ):\n extension = os.path.splitext( filename )[ 1 ]\n return extension.lower() in [ '.h', '.hxx', '.hpp', '.hh' ]\n\n\ndef GetCompilationInfoForFile( filename ):\n # The compilation_commands.json file generated by CMake does not have entries\n # for header files. So we do our best by asking the db for flags for a\n # corresponding source file, if any. If one exists, the flags for that file\n # should be good enough.\n if IsHeaderFile( filename ):\n basename = os.path.splitext( filename )[ 0 ]\n for extension in SOURCE_EXTENSIONS:\n replacement_file = basename + extension\n if os.path.exists( replacement_file ):\n compilation_info = database.GetCompilationInfoForFile( replacement_file )\n if compilation_info.compiler_flags_:\n return compilation_info\n return None\n return database.GetCompilationInfoForFile( filename )\n\n\ndef FlagsForFile( filename, **kwargs ):\n relative_to = None\n compiler_flags = None\n\n if database:\n # Bear in mind that compilation_info.compiler_flags_ does NOT return a\n # python list, but a \"list-like\" StringVec object\n compilation_info = GetCompilationInfoForFile( filename )\n if compilation_info is None:\n relative_to = DirectoryOfThisScript()\n compiler_flags = flags\n else:\n relative_to = compilation_info.compiler_working_dir_\n compiler_flags = compilation_info.compiler_flags_\n\n else:\n relative_to = DirectoryOfThisScript()\n compiler_flags = flags\n\n final_flags = MakeRelativePathsInFlagsAbsolute( compiler_flags, relative_to )\n for flag in final_flags:\n if flag.startswith(\"-W\"):\n final_flags.remove(flag)\n _logger.info(\"Final flags for %s are %s\" % (filename, ' '.join(final_flags)))\n\n return {{\n 'flags': final_flags + [\"-I/usr/include\", \"-I/usr/include/c++/{cxx_version}\"],\n 'do_cache': True\n }}\n'''\n\n @property\n def filename(self):\n pass\n\n @property\n def content(self):\n def prefixed(prefix, values):\n return [prefix + x for x in values]\n\n conan_flags = {\n \"includes\": prefixed(\"-isystem\", self.deps_build_info.include_paths),\n \"defines\": prefixed(\"-D\", self.deps_build_info.defines),\n \"flags\": self.deps_build_info.cxxflags\n }\n\n cxx_version = ''\n try:\n cxx_version = str(self.settings.compiler.version).split('.')[0]\n except Exception:\n pass\n\n ycm_data = self.template.format(cxx_version=cxx_version)\n return {\"conan_ycm_extra_conf.py\": ycm_data,\n \"conan_ycm_flags.json\": json.dumps(conan_flags, indent=2)}\n", "path": "conans/client/generators/ycm.py" } ]
[ { "content": "import json\n\nfrom conans.model import Generator\n\n\nclass YouCompleteMeGenerator(Generator):\n template = '''\n# This file is NOT licensed under the GPLv3, which is the license for the rest\n# of YouCompleteMe.\n#\n# Here's the license text for this file:\n#\n# This is free and unencumbered software released into the public domain.\n#\n# Anyone is free to copy, modify, publish, use, compile, sell, or\n# distribute this software, either in source code form or as a compiled\n# binary, for any purpose, commercial or non-commercial, and by any\n# means.\n#\n# In jurisdictions that recognize copyright laws, the author or authors\n# of this software dedicate any and all copyright interest in the\n# software to the public domain. We make this dedication for the benefit\n# of the public at large and to the detriment of our heirs and\n# successors. We intend this dedication to be an overt act of\n# relinquishment in perpetuity of all present and future rights to this\n# software under copyright law.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\n# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n# OTHER DEALINGS IN THE SOFTWARE.\n#\n# For more information, please refer to <http://unlicense.org/>\n\nimport os\nimport json\nimport ycm_core\nimport logging\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef DirectoryOfThisScript():\n return os.path.dirname( os.path.abspath( __file__ ) )\n\n\n# These are the compilation flags that will be used in case there's no\n# compilation database set (by default, one is not set).\n# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.\nflags = [\n '-x', 'c++'\n]\n\nconan_flags = json.loads(open(\"conan_ycm_flags.json\", \"r\").read())\n\nflags.extend(conan_flags[\"flags\"])\nflags.extend(conan_flags[\"defines\"])\nflags.extend(conan_flags[\"includes\"])\n\n\n# Set this to the absolute path to the folder (NOT the file!) containing the\n# compile_commands.json file to use that instead of 'flags'. See here for\n# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html\n#\n# You can get CMake to generate this file for you by adding:\n# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )\n# to your CMakeLists.txt file.\n#\n# Most projects will NOT need to set this to anything; you can just change the\n# 'flags' list of compilation flags. Notice that YCM itself uses that approach.\ncompilation_database_folder = os.path.join(DirectoryOfThisScript(), 'Debug')\n\nif os.path.exists( compilation_database_folder ):\n database = ycm_core.CompilationDatabase( compilation_database_folder )\n if not database.DatabaseSuccessfullyLoaded():\n _logger.warn(\"Failed to load database\")\n database = None\nelse:\n database = None\n\nSOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]\n\ndef GetAbsolutePath(include_path, working_directory):\n if os.path.isabs(include_path):\n return include_path\n return os.path.join(working_directory, include_path)\n\n\ndef MakeRelativePathsInFlagsAbsolute( flags, working_directory ):\n if not working_directory:\n return list( flags )\n new_flags = []\n make_next_absolute = False\n path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]\n for flag in flags:\n new_flag = flag\n\n if make_next_absolute:\n make_next_absolute = False\n new_flag = GetAbsolutePath(flag, working_directory)\n\n for path_flag in path_flags:\n if flag == path_flag:\n make_next_absolute = True\n break\n\n if flag.startswith( path_flag ):\n path = flag[ len( path_flag ): ]\n new_flag = flag[:len(path_flag)] + GetAbsolutePath(path, working_directory)\n break\n\n if new_flag:\n new_flags.append( new_flag )\n return new_flags\n\n\ndef IsHeaderFile( filename ):\n extension = os.path.splitext( filename )[ 1 ]\n return extension.lower() in [ '.h', '.hxx', '.hpp', '.hh' ]\n\n\ndef GetCompilationInfoForFile( filename ):\n # The compilation_commands.json file generated by CMake does not have entries\n # for header files. So we do our best by asking the db for flags for a\n # corresponding source file, if any. If one exists, the flags for that file\n # should be good enough.\n if IsHeaderFile( filename ):\n basename = os.path.splitext( filename )[ 0 ]\n for extension in SOURCE_EXTENSIONS:\n replacement_file = basename + extension\n if os.path.exists( replacement_file ):\n compilation_info = database.GetCompilationInfoForFile( replacement_file )\n if compilation_info.compiler_flags_:\n return compilation_info\n return None\n return database.GetCompilationInfoForFile( filename )\n\n\ndef Settings( filename, **kwargs ):\n relative_to = None\n compiler_flags = None\n\n if database:\n # Bear in mind that compilation_info.compiler_flags_ does NOT return a\n # python list, but a \"list-like\" StringVec object\n compilation_info = GetCompilationInfoForFile( filename )\n if compilation_info is None:\n relative_to = DirectoryOfThisScript()\n compiler_flags = flags\n else:\n relative_to = compilation_info.compiler_working_dir_\n compiler_flags = compilation_info.compiler_flags_\n\n else:\n relative_to = DirectoryOfThisScript()\n compiler_flags = flags\n\n final_flags = MakeRelativePathsInFlagsAbsolute( compiler_flags, relative_to )\n for flag in final_flags:\n if flag.startswith(\"-W\"):\n final_flags.remove(flag)\n _logger.info(\"Final flags for %s are %s\" % (filename, ' '.join(final_flags)))\n\n return {{\n 'flags': final_flags + [\"-I/usr/include\", \"-I/usr/include/c++/{cxx_version}\"],\n 'do_cache': True\n }}\n'''\n\n @property\n def filename(self):\n pass\n\n @property\n def content(self):\n def prefixed(prefix, values):\n return [prefix + x for x in values]\n\n conan_flags = {\n \"includes\": prefixed(\"-isystem\", self.deps_build_info.include_paths),\n \"defines\": prefixed(\"-D\", self.deps_build_info.defines),\n \"flags\": self.deps_build_info.cxxflags\n }\n\n cxx_version = ''\n try:\n cxx_version = str(self.settings.compiler.version).split('.')[0]\n except Exception:\n pass\n\n ycm_data = self.template.format(cxx_version=cxx_version)\n return {\"conan_ycm_extra_conf.py\": ycm_data,\n \"conan_ycm_flags.json\": json.dumps(conan_flags, indent=2)}\n", "path": "conans/client/generators/ycm.py" } ]
diff --git a/conans/client/generators/ycm.py b/conans/client/generators/ycm.py index 1a147d01ca4..3e391be6345 100644 --- a/conans/client/generators/ycm.py +++ b/conans/client/generators/ycm.py @@ -140,7 +140,7 @@ def GetCompilationInfoForFile( filename ): return database.GetCompilationInfoForFile( filename ) -def FlagsForFile( filename, **kwargs ): +def Settings( filename, **kwargs ): relative_to = None compiler_flags = None
streamlit__streamlit-4724
streamlit's webserver not working when using pdm for installation ### Summary When I'm using [pdm](https://pdm.fming.dev/) to install streamlit, I cannot connect to streamlit's webserver. From pdm's web site: PDM is a modern Python package manager with PEP 582 support. ### Steps to reproduce 1. Install PDM: `pipx install pdm` 2. Create new directory and go there: `mkdir st_test; cd st_test` 3. Init pdm: `pdm init` (and use the defaults) 4. Install streamlit: `pdm add streamlit` 5. Use any minimal streamlit example and run streamlit: `pdm run streamlit run hello_world.py` **Expected behavior:** Browser opens correct web page served by streamlit. **Actual behavior:** Browser tries to connect to `localhost:3000` (which is also mentioned in the logs), but when I look at the logs, the server actually runs on port 8501. When I try this port, I get a 404, so I _can_ connect to the server on this port, but something's broken. ### Is this a regression? That is, did this use to work the way you expected in the past? never tried before ### Debug info - Streamlit version: 0.80.0 - Python version: 3.8.8 - Using pdm - OS version: Linux 5.11.11 - Browser version: Chrome 89.0.4389.114 (Official Build) (64-bit) ### Additional information jupyter-lab show a similar issue with pdm, but it can be fixed by running `pdm run jupyter-lab --core-mode` streamlit's webserver not working when using pdm for installation ### Summary When I'm using [pdm](https://pdm.fming.dev/) to install streamlit, I cannot connect to streamlit's webserver. From pdm's web site: PDM is a modern Python package manager with PEP 582 support. ### Steps to reproduce 1. Install PDM: `pipx install pdm` 2. Create new directory and go there: `mkdir st_test; cd st_test` 3. Init pdm: `pdm init` (and use the defaults) 4. Install streamlit: `pdm add streamlit` 5. Use any minimal streamlit example and run streamlit: `pdm run streamlit run hello_world.py` **Expected behavior:** Browser opens correct web page served by streamlit. **Actual behavior:** Browser tries to connect to `localhost:3000` (which is also mentioned in the logs), but when I look at the logs, the server actually runs on port 8501. When I try this port, I get a 404, so I _can_ connect to the server on this port, but something's broken. ### Is this a regression? That is, did this use to work the way you expected in the past? never tried before ### Debug info - Streamlit version: 0.80.0 - Python version: 3.8.8 - Using pdm - OS version: Linux 5.11.11 - Browser version: Chrome 89.0.4389.114 (Official Build) (64-bit) ### Additional information jupyter-lab show a similar issue with pdm, but it can be fixed by running `pdm run jupyter-lab --core-mode`
[ { "content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Loads the configuration data.\"\"\"\n\nimport copy\nimport os\nimport secrets\nimport threading\nimport toml\nfrom collections import OrderedDict\nfrom typing import Any, Callable, cast, Dict, Optional\n\nfrom blinker import Signal\n\nfrom streamlit import config_util\nfrom streamlit import development\nfrom streamlit import env_util\nfrom streamlit import file_util\nfrom streamlit import util\nfrom streamlit.config_option import ConfigOption\n\n# Config System Global State #\n\n# Descriptions of each of the possible config sections.\n# (We use OrderedDict to make the order in which sections are declared in this\n# file be the same order as the sections appear with `streamlit config show`)\n_section_descriptions: Dict[str, str] = OrderedDict(\n _test=\"Special test section just used for unit tests.\"\n)\n\n# Ensures that we don't try to get or set config options when config.toml files\n# change so are re-parsed.\n_config_lock = threading.RLock()\n\n# Stores config options with their default values (or None if they don't have\n# a default) before they are updated with values from config.toml files, flags\n# to `streamlit run`, etc. Note that this and _config_options below are\n# OrderedDicts to ensure stable ordering when printed using\n# `streamlit config show`.\n_config_options_template: Dict[str, ConfigOption] = OrderedDict()\n\n# Stores the current state of config options.\n_config_options: Optional[Dict[str, ConfigOption]] = None\n\n\n# Indicates that a config option was defined by the user.\n_USER_DEFINED = \"<user defined>\"\n\n# Indicates that a config option was defined either in an environment variable\n# or via command-line flag.\n_DEFINED_BY_FLAG = \"command-line argument or environment variable\"\n\n\ndef set_option(key: str, value: Any, where_defined: str = _USER_DEFINED) -> None:\n \"\"\"Set config option.\n\n Run `streamlit config show` in the terminal to see all available options.\n\n Parameters\n ----------\n key : str\n The config option key of the form \"section.optionName\". To see all\n available options, run `streamlit config show` on a terminal.\n\n value\n The new value to assign to this config option.\n\n where_defined : str\n Tells the config system where this was set.\n \"\"\"\n\n with _config_lock:\n # Ensure that our config files have been parsed.\n get_config_options()\n _set_option(key, value, where_defined)\n\n\ndef get_option(key: str) -> Any:\n \"\"\"Return the current value of a given Streamlit config option.\n\n Run `streamlit config show` in the terminal to see all available options.\n\n Parameters\n ----------\n key : str\n The config option key of the form \"section.optionName\". To see all\n available options, run `streamlit config show` on a terminal.\n \"\"\"\n with _config_lock:\n config_options = get_config_options()\n\n if key not in config_options:\n raise RuntimeError('Config key \"%s\" not defined.' % key)\n return config_options[key].value\n\n\ndef get_options_for_section(section: str) -> Dict[str, Any]:\n \"\"\"Get all of the config options for the given section.\n\n Run `streamlit config show` in the terminal to see all available options.\n\n Parameters\n ----------\n section : str\n The name of the config section to fetch options for.\n\n Returns\n ----------\n Dict[str, Any]\n A dict mapping the names of the options in the given section (without\n the section name as a prefix) to their values.\n \"\"\"\n with _config_lock:\n config_options = get_config_options()\n\n options_for_section = {}\n for option in config_options.values():\n if option.section == section:\n options_for_section[option.name] = option.value\n return options_for_section\n\n\ndef _create_section(section: str, description: str) -> None:\n \"\"\"Create a config section and store it globally in this module.\"\"\"\n assert section not in _section_descriptions, (\n 'Cannot define section \"%s\" twice.' % section\n )\n _section_descriptions[section] = description\n\n\ndef _create_option(\n key: str,\n description: Optional[str] = None,\n default_val: Optional[Any] = None,\n scriptable: bool = False,\n visibility: str = \"visible\",\n deprecated: bool = False,\n deprecation_text: Optional[str] = None,\n expiration_date: Optional[str] = None,\n replaced_by: Optional[str] = None,\n type_: type = str,\n) -> ConfigOption:\n '''Create a ConfigOption and store it globally in this module.\n\n There are two ways to create a ConfigOption:\n\n (1) Simple, constant config options are created as follows:\n\n _create_option('section.optionName',\n description = 'Put the description here.',\n default_val = 12345)\n\n (2) More complex, programmable config options use decorator syntax to\n resolve their values at runtime:\n\n @_create_option('section.optionName')\n def _section_option_name():\n \"\"\"Put the description here.\"\"\"\n return 12345\n\n To achieve this sugar, _create_option() returns a *callable object* of type\n ConfigObject, which then decorates the function.\n\n NOTE: ConfigObjects call their evaluation functions *every time* the option\n is requested. To prevent this, use the `streamlit.util.memoize` decorator as\n follows:\n\n @_create_option('section.memoizedOptionName')\n @util.memoize\n def _section_memoized_option_name():\n \"\"\"Put the description here.\"\"\"\n\n (This function is only called once.)\n \"\"\"\n return 12345\n\n '''\n option = ConfigOption(\n key,\n description=description,\n default_val=default_val,\n scriptable=scriptable,\n visibility=visibility,\n deprecated=deprecated,\n deprecation_text=deprecation_text,\n expiration_date=expiration_date,\n replaced_by=replaced_by,\n type_=type_,\n )\n assert (\n option.section in _section_descriptions\n ), 'Section \"%s\" must be one of %s.' % (\n option.section,\n \", \".join(_section_descriptions.keys()),\n )\n assert key not in _config_options_template, 'Cannot define option \"%s\" twice.' % key\n _config_options_template[key] = option\n return option\n\n\ndef _delete_option(key: str) -> None:\n \"\"\"Remove a ConfigOption by key from the global store.\n\n Only for use in testing.\n \"\"\"\n try:\n del _config_options_template[key]\n del cast(Dict[str, ConfigOption], _config_options)[key]\n except Exception:\n pass\n\n\n# Config Section: Global #\n\n_create_section(\"global\", \"Global options that apply across all of Streamlit.\")\n\n_create_option(\n \"global.disableWatchdogWarning\",\n description=\"\"\"\n By default, Streamlit checks if the Python watchdog module is available\n and, if not, prints a warning asking for you to install it. The watchdog\n module is not required, but highly recommended. It improves Streamlit's\n ability to detect changes to files in your filesystem.\n\n If you'd like to turn off this warning, set this to True.\n \"\"\",\n default_val=False,\n type_=bool,\n)\n\n\n_create_option(\n \"global.showWarningOnDirectExecution\",\n description=\"\"\"\n If True, will show a warning when you run a Streamlit-enabled script\n via \"python my_script.py\".\n \"\"\",\n default_val=True,\n type_=bool,\n)\n\n\n@_create_option(\"global.developmentMode\", visibility=\"hidden\", type_=bool)\ndef _global_development_mode() -> bool:\n \"\"\"Are we in development mode.\n\n This option defaults to True if and only if Streamlit wasn't installed\n normally.\n \"\"\"\n return (\n not env_util.is_pex()\n and \"site-packages\" not in __file__\n and \"dist-packages\" not in __file__\n )\n\n\n_create_option(\n \"global.logLevel\",\n description=\"\"\"Level of logging: 'error', 'warning', 'info', or 'debug'.\n\n Default: 'info'\n \"\"\",\n deprecated=True,\n deprecation_text=\"global.logLevel has been replaced with logger.level\",\n expiration_date=\"2020-11-30\",\n replaced_by=\"logger.level\",\n)\n\n\n@_create_option(\"global.unitTest\", visibility=\"hidden\", type_=bool)\ndef _global_unit_test() -> bool:\n \"\"\"Are we in a unit test?\n\n This option defaults to False.\n \"\"\"\n return False\n\n\n_create_option(\n \"global.suppressDeprecationWarnings\",\n description=\"Hide deprecation warnings in the streamlit app.\",\n visibility=\"hidden\",\n default_val=False,\n type_=bool,\n)\n\n_create_option(\n \"global.minCachedMessageSize\",\n description=\"\"\"Only cache ForwardMsgs that are greater than or equal to\n this minimum.\"\"\",\n visibility=\"hidden\",\n default_val=10 * 1e3,\n type_=float,\n) # 10k\n\n_create_option(\n \"global.maxCachedMessageAge\",\n description=\"\"\"Expire cached ForwardMsgs whose age is greater than this\n value. A message's age is defined by how many times its script has\n finished running since the message has been accessed.\"\"\",\n visibility=\"hidden\",\n default_val=2,\n type_=int,\n)\n\n_create_option(\n \"global.dataFrameSerialization\",\n description=\"\"\"\n DataFrame serialization.\n\n Acceptable values:\n - 'legacy': Serialize DataFrames using Streamlit's custom format. Slow\n but battle-tested.\n - 'arrow': Serialize DataFrames using Apache Arrow. Much faster and versatile.\"\"\",\n default_val=\"arrow\",\n type_=str,\n)\n\n\n# Config Section: Logger #\n_create_section(\"logger\", \"Settings to customize Streamlit log messages.\")\n\n\n@_create_option(\"logger.level\", type_=str)\ndef _logger_log_level() -> str:\n \"\"\"Level of logging: 'error', 'warning', 'info', or 'debug'.\n\n Default: 'info'\n \"\"\"\n\n if get_option(\"global.logLevel\"):\n return str(get_option(\"global.logLevel\"))\n elif get_option(\"global.developmentMode\"):\n return \"debug\"\n else:\n return \"info\"\n\n\n@_create_option(\"logger.messageFormat\", type_=str)\ndef _logger_message_format() -> str:\n \"\"\"String format for logging messages. If logger.datetimeFormat is set,\n logger messages will default to `%(asctime)s.%(msecs)03d %(message)s`. See\n [Python's documentation](https://docs.python.org/2.6/library/logging.html#formatter-objects)\n for available attributes.\n\n Default: \"%(asctime)s %(message)s\"\n \"\"\"\n if get_option(\"global.developmentMode\"):\n from streamlit.logger import DEFAULT_LOG_MESSAGE\n\n return DEFAULT_LOG_MESSAGE\n else:\n return \"%(asctime)s %(message)s\"\n\n\n_create_option(\n \"logger.enableRich\",\n description=\"\"\"\n Controls whether uncaught app exceptions are logged via the rich library.\n\n If True and if rich is installed, exception tracebacks will be logged with syntax highlighting and formatting.\n Rich tracebacks are easier to read and show more code than standard Python tracebacks.\n\n If set to False, the default Python traceback formatting will be used.\"\"\",\n default_val=False,\n visibility=\"hidden\",\n type_=bool,\n scriptable=True,\n)\n\n# Config Section: Client #\n\n_create_section(\"client\", \"Settings for scripts that use Streamlit.\")\n\n_create_option(\n \"client.caching\",\n description=\"Whether to enable st.cache.\",\n default_val=True,\n type_=bool,\n scriptable=True,\n)\n\n_create_option(\n \"client.displayEnabled\",\n description=\"\"\"If false, makes your Streamlit script not draw to a\n Streamlit app.\"\"\",\n default_val=True,\n type_=bool,\n scriptable=True,\n)\n\n_create_option(\n \"client.showErrorDetails\",\n description=\"\"\"\n Controls whether uncaught app exceptions are displayed in the browser.\n By default, this is set to True and Streamlit displays app exceptions\n and associated tracebacks in the browser.\n\n If set to False, an exception will result in a generic message being\n shown in the browser, and exceptions and tracebacks will be printed to\n the console only.\"\"\",\n default_val=True,\n type_=bool,\n scriptable=True,\n)\n\n# Config Section: Runner #\n\n_create_section(\"runner\", \"Settings for how Streamlit executes your script\")\n\n_create_option(\n \"runner.magicEnabled\",\n description=\"\"\"\n Allows you to type a variable or string by itself in a single line of\n Python code to write it to the app.\n \"\"\",\n default_val=True,\n type_=bool,\n)\n\n_create_option(\n \"runner.installTracer\",\n description=\"\"\"\n Install a Python tracer to allow you to stop or pause your script at\n any point and introspect it. As a side-effect, this slows down your\n script's execution.\n \"\"\",\n default_val=False,\n type_=bool,\n)\n\n_create_option(\n \"runner.fixMatplotlib\",\n description=\"\"\"\n Sets the MPLBACKEND environment variable to Agg inside Streamlit to\n prevent Python crashing.\n \"\"\",\n default_val=True,\n type_=bool,\n)\n\n_create_option(\n \"runner.postScriptGC\",\n description=\"\"\"\n Run the Python Garbage Collector after each script execution. This\n can help avoid excess memory use in Streamlit apps, but could\n introduce delay in rerunning the app script for high-memory-use\n applications.\n \"\"\",\n default_val=True,\n type_=bool,\n)\n\n_create_option(\n \"runner.fastReruns\",\n description=\"\"\"\n Handle script rerun requests immediately, rather than waiting for\n script execution to reach a yield point. Enabling this will\n make Streamlit much more responsive to user interaction, but it can\n lead to race conditions in apps that mutate session_state data outside\n of explicit session_state assignment statements.\n \"\"\",\n default_val=False,\n type_=bool,\n)\n\n# Config Section: Server #\n\n_create_section(\"server\", \"Settings for the Streamlit server\")\n\n_create_option(\n \"server.folderWatchBlacklist\",\n description=\"\"\"List of folders that should not be watched for changes. This\n impacts both \"Run on Save\" and @st.cache.\n\n Relative paths will be taken as relative to the current working directory.\n\n Example: ['/home/user1/env', 'relative/path/to/folder']\n \"\"\",\n default_val=[],\n)\n\n_create_option(\n \"server.fileWatcherType\",\n description=\"\"\"\n Change the type of file watcher used by Streamlit, or turn it off\n completely.\n\n Allowed values:\n * \"auto\" : Streamlit will attempt to use the watchdog module, and\n falls back to polling if watchdog is not available.\n * \"watchdog\" : Force Streamlit to use the watchdog module.\n * \"poll\" : Force Streamlit to always use polling.\n * \"none\" : Streamlit will not watch files.\n \"\"\",\n default_val=\"auto\",\n type_=str,\n)\n\n\n@_create_option(\"server.cookieSecret\", type_=str)\[email protected]\ndef _server_cookie_secret() -> str:\n \"\"\"Symmetric key used to produce signed cookies. If deploying on multiple replicas, this should\n be set to the same value across all replicas to ensure they all share the same secret.\n\n Default: randomly generated secret key.\n \"\"\"\n return secrets.token_hex()\n\n\n@_create_option(\"server.headless\", type_=bool)\ndef _server_headless() -> bool:\n \"\"\"If false, will attempt to open a browser window on start.\n\n Default: false unless (1) we are on a Linux box where DISPLAY is unset, or\n (2) we are running in the Streamlit Atom plugin.\n \"\"\"\n if env_util.IS_LINUX_OR_BSD and not os.getenv(\"DISPLAY\"):\n # We're running in Linux and DISPLAY is unset\n return True\n\n if os.getenv(\"IS_RUNNING_IN_STREAMLIT_EDITOR_PLUGIN\") is not None:\n # We're running within the Streamlit Atom plugin\n return True\n\n return False\n\n\n@_create_option(\"server.runOnSave\", type_=bool)\ndef _server_run_on_save() -> bool:\n \"\"\"Automatically rerun script when the file is modified on disk.\n\n Default: false\n \"\"\"\n return False\n\n\n@_create_option(\"server.allowRunOnSave\", type_=bool, visibility=\"hidden\")\ndef _server_allow_run_on_save() -> bool:\n \"\"\"Allows users to automatically rerun when app is updated.\n\n Default: true\n \"\"\"\n return True\n\n\n@_create_option(\"server.address\")\ndef _server_address() -> Optional[str]:\n \"\"\"The address where the server will listen for client and browser\n connections. Use this if you want to bind the server to a specific address.\n If set, the server will only be accessible from this address, and not from\n any aliases (like localhost).\n\n Default: (unset)\n \"\"\"\n return None\n\n\n@_create_option(\"server.port\", type_=int)\ndef _server_port() -> int:\n \"\"\"The port where the server will listen for browser\n connections.\n\n Default: 8501\n \"\"\"\n return 8501\n\n\n_create_option(\n \"server.scriptHealthCheckEnabled\",\n visibility=\"hidden\",\n description=\"\"\"\n Flag for enabling the script health check endpoint. It used for checking if\n a script loads successfully. On success, the endpoint will return a 200\n HTTP status code. On failure, the endpoint will return a 503 HTTP status code.\n\n Note: This is an experimental Streamlit internal API. The API is subject\n to change anytime so this should be used at your own risk\n \"\"\",\n default_val=False,\n type_=bool,\n)\n\n_create_option(\n \"server.baseUrlPath\",\n description=\"\"\"\n The base path for the URL where Streamlit should be served from.\n \"\"\",\n default_val=\"\",\n type_=str,\n)\n\n\n# TODO: Rename to server.enableCorsProtection.\n@_create_option(\"server.enableCORS\", type_=bool)\ndef _server_enable_cors() -> bool:\n \"\"\"Enables support for Cross-Origin Request Sharing (CORS) protection, for added security.\n\n Due to conflicts between CORS and XSRF, if `server.enableXsrfProtection` is on and\n `server.enableCORS` is off at the same time, we will prioritize `server.enableXsrfProtection`.\n\n Default: true\n \"\"\"\n return True\n\n\n@_create_option(\"server.enableXsrfProtection\", type_=bool)\ndef _server_enable_xsrf_protection() -> bool:\n \"\"\"Enables support for Cross-Site Request Forgery (XSRF) protection, for added security.\n\n Due to conflicts between CORS and XSRF, if `server.enableXsrfProtection` is on and\n `server.enableCORS` is off at the same time, we will prioritize `server.enableXsrfProtection`.\n\n Default: true\n \"\"\"\n return True\n\n\n@_create_option(\"server.maxUploadSize\", type_=int)\ndef _server_max_upload_size() -> int:\n \"\"\"Max size, in megabytes, for files uploaded with the file_uploader.\n\n Default: 200\n \"\"\"\n # If this default is changed, please also update the docstring\n # for `DeltaGenerator.file_uploader`.\n return 200\n\n\n@_create_option(\"server.maxMessageSize\", type_=int)\ndef _server_max_message_size() -> int:\n \"\"\"Max size, in megabytes, of messages that can be sent via the WebSocket connection.\n\n Default: 200\n \"\"\"\n return 200\n\n\n@_create_option(\"server.enableWebsocketCompression\", type_=bool)\ndef _server_enable_websocket_compression() -> bool:\n \"\"\"Enables support for websocket compression.\n\n Default: false\n \"\"\"\n return False\n\n\n# Config Section: Browser #\n\n_create_section(\"browser\", \"Configuration of non-UI browser options.\")\n\n\n@_create_option(\"browser.serverAddress\")\ndef _browser_server_address() -> str:\n \"\"\"Internet address where users should point their browsers in order to\n connect to the app. Can be IP address or DNS name and path.\n\n This is used to:\n - Set the correct URL for CORS and XSRF protection purposes.\n - Show the URL on the terminal\n - Open the browser\n\n Default: 'localhost'\n \"\"\"\n return \"localhost\"\n\n\n@_create_option(\"browser.gatherUsageStats\", type_=bool)\ndef _gather_usage_stats() -> bool:\n \"\"\"Whether to send usage statistics to Streamlit.\n\n Default: true\n \"\"\"\n return True\n\n\n@_create_option(\"browser.serverPort\", type_=int)\ndef _browser_server_port() -> int:\n \"\"\"Port where users should point their browsers in order to connect to the\n app.\n\n This is used to:\n - Set the correct URL for CORS and XSRF protection purposes.\n - Show the URL on the terminal\n - Open the browser\n\n Default: whatever value is set in server.port.\n \"\"\"\n return int(get_option(\"server.port\"))\n\n\n# Config Section: UI #\n\n# NOTE: We currently hide the ui config section in the `streamlit config show`\n# output as all of its options are hidden. If a non-hidden option is eventually\n# added, the section should be unhidden by removing it from the `SKIP_SECTIONS`\n# set in config_util.show_config.\n_create_section(\"ui\", \"Configuration of UI elements displayed in the browser.\")\n\n_create_option(\n \"ui.hideTopBar\",\n description=\"\"\"\n Flag to hide most of the UI elements found at the top of a Streamlit app.\n\n NOTE: This does *not* hide the hamburger menu in the top-right of an app.\n \"\"\",\n default_val=False,\n type_=bool,\n visibility=\"hidden\",\n)\n\n_create_option(\n \"ui.hideSidebarNav\",\n description=\"\"\"\n Flag to hide the sidebar page navigation component.\n\n We have this default to True for now so that we can \"soft-launch\" the\n multipage apps feature and merge the feature branch into develop earlier.\n Once we're ready to have multipage apps enabled by default, we'll flip the\n default to False.\n \"\"\",\n default_val=True,\n type_=bool,\n visibility=\"hidden\",\n)\n\n\n# Config Section: Mapbox #\n\n_create_section(\"mapbox\", \"Mapbox configuration that is being used by DeckGL.\")\n\n_create_option(\n \"mapbox.token\",\n description=\"\"\"Configure Streamlit to use a custom Mapbox\n token for elements like st.pydeck_chart and st.map.\n To get a token for yourself, create an account at\n https://mapbox.com. It's free (for moderate usage levels)!\"\"\",\n default_val=\"\",\n)\n\n\n# Config Section: deprecations\n\n_create_section(\"deprecation\", \"Configuration to show or hide deprecation warnings.\")\n\n_create_option(\n \"deprecation.showfileUploaderEncoding\",\n description=\"Set to false to disable the deprecation warning for the file uploader encoding.\",\n default_val=True,\n scriptable=True,\n type_=bool,\n expiration_date=\"2021-01-06\",\n)\n\n_create_option(\n \"deprecation.showImageFormat\",\n description=\"Set to false to disable the deprecation warning for the image format parameter.\",\n default_val=True,\n scriptable=True,\n type_=bool,\n deprecated=True,\n deprecation_text=\"The format parameter for st.image has been removed.\",\n expiration_date=\"2021-03-24\",\n)\n\n_create_option(\n \"deprecation.showPyplotGlobalUse\",\n description=\"Set to false to disable the deprecation warning for using the global pyplot instance.\",\n default_val=True,\n scriptable=True,\n type_=bool,\n)\n\n\n# Config Section: Custom Theme #\n\n_create_section(\"theme\", \"Settings to define a custom theme for your Streamlit app.\")\n\n_create_option(\n \"theme.base\",\n description=\"\"\"The preset Streamlit theme that your custom theme inherits from.\n One of \"light\" or \"dark\".\"\"\",\n)\n\n_create_option(\n \"theme.primaryColor\",\n description=\"Primary accent color for interactive elements.\",\n)\n\n_create_option(\n \"theme.backgroundColor\",\n description=\"Background color for the main content area.\",\n)\n\n_create_option(\n \"theme.secondaryBackgroundColor\",\n description=\"Background color used for the sidebar and most interactive widgets.\",\n)\n\n_create_option(\n \"theme.textColor\",\n description=\"Color used for almost all text.\",\n)\n\n_create_option(\n \"theme.font\",\n description=\"\"\"\n Font family for all text in the app, except code blocks. One of \"sans serif\",\n \"serif\", or \"monospace\".\n \"\"\",\n)\n\n\ndef get_where_defined(key: str) -> str:\n \"\"\"Indicate where (e.g. in which file) this option was defined.\n\n Parameters\n ----------\n key : str\n The config option key of the form \"section.optionName\"\n\n \"\"\"\n with _config_lock:\n config_options = get_config_options()\n\n if key not in config_options:\n raise RuntimeError('Config key \"%s\" not defined.' % key)\n return config_options[key].where_defined\n\n\ndef _is_unset(option_name: str) -> bool:\n \"\"\"Check if a given option has not been set by the user.\n\n Parameters\n ----------\n option_name : str\n The option to check\n\n\n Returns\n -------\n bool\n True if the option has not been set by the user.\n\n \"\"\"\n return get_where_defined(option_name) == ConfigOption.DEFAULT_DEFINITION\n\n\ndef is_manually_set(option_name: str) -> bool:\n \"\"\"Check if a given option was actually defined by the user.\n\n Parameters\n ----------\n option_name : str\n The option to check\n\n\n Returns\n -------\n bool\n True if the option has been set by the user.\n\n \"\"\"\n return get_where_defined(option_name) not in (\n ConfigOption.DEFAULT_DEFINITION,\n ConfigOption.STREAMLIT_DEFINITION,\n )\n\n\ndef show_config() -> None:\n \"\"\"Print all config options to the terminal.\"\"\"\n with _config_lock:\n config_util.show_config(\n _section_descriptions, cast(Dict[str, ConfigOption], _config_options)\n )\n\n\n# Load Config Files #\n\n\ndef _set_option(key: str, value: Any, where_defined: str) -> None:\n \"\"\"Set a config option by key / value pair.\n\n This function assumes that the _config_options dictionary has already been\n populated and thus should only be used within this file and by tests.\n\n Parameters\n ----------\n key : str\n The key of the option, like \"logger.level\".\n value\n The value of the option.\n where_defined : str\n Tells the config system where this was set.\n\n \"\"\"\n assert (\n _config_options is not None\n ), \"_config_options should always be populated here.\"\n if key not in _config_options:\n # Import logger locally to prevent circular references\n from streamlit.logger import get_logger\n\n LOGGER = get_logger(__name__)\n\n LOGGER.warning(\n f'\"{key}\" is not a valid config option. If you previously had this config option set, it may have been removed.'\n )\n\n else:\n _config_options[key].set_value(value, where_defined)\n\n\ndef _update_config_with_toml(raw_toml: str, where_defined: str) -> None:\n \"\"\"Update the config system by parsing this string.\n\n This should only be called from get_config_options.\n\n Parameters\n ----------\n raw_toml : str\n The TOML file to parse to update the config values.\n where_defined : str\n Tells the config system where this was set.\n\n \"\"\"\n parsed_config_file = toml.loads(raw_toml)\n\n for section, options in parsed_config_file.items():\n for name, value in options.items():\n value = _maybe_read_env_variable(value)\n _set_option(f\"{section}.{name}\", value, where_defined)\n\n\ndef _maybe_read_env_variable(value: Any) -> Any:\n \"\"\"If value is \"env:foo\", return value of environment variable \"foo\".\n\n If value is not in the shape above, returns the value right back.\n\n Parameters\n ----------\n value : any\n The value to check\n\n Returns\n -------\n any\n Either returns value right back, or the value of the environment\n variable.\n\n \"\"\"\n\n if isinstance(value, str) and value.startswith(\"env:\"):\n var_name = value[len(\"env:\") :]\n env_var = os.environ.get(var_name)\n\n if env_var is None:\n # Import logger locally to prevent circular references\n from streamlit.logger import get_logger\n\n LOGGER = get_logger(__name__)\n\n LOGGER.error(\"No environment variable called %s\" % var_name)\n else:\n return _maybe_convert_to_number(env_var)\n\n return value\n\n\ndef _maybe_convert_to_number(v: Any) -> Any:\n \"\"\"Convert v to int or float, or leave it as is.\"\"\"\n try:\n return int(v)\n except Exception:\n pass\n\n try:\n return float(v)\n except Exception:\n pass\n\n return v\n\n\n# Allow outside modules to wait for the config file to be parsed before doing\n# something.\n_on_config_parsed = Signal(doc=\"Emitted when the config file is parsed.\")\n\nCONFIG_FILENAMES = [\n file_util.get_streamlit_file_path(\"config.toml\"),\n file_util.get_project_streamlit_file_path(\"config.toml\"),\n]\n\n\ndef get_config_options(\n force_reparse=False, options_from_flags: Optional[Dict[str, Any]] = None\n) -> Dict[str, ConfigOption]:\n \"\"\"Create and return a dict mapping config option names to their values,\n returning a cached dict if possible.\n\n Config option values are sourced from the following locations. Values\n set in locations further down the list overwrite those set earlier.\n 1. default values defined in this file\n 2. the global `~/.streamlit/config.toml` file\n 3. per-project `$CWD/.streamlit/config.toml` files\n 4. environment variables such as `STREAMLIT_SERVER_PORT`\n 5. command line flags passed to `streamlit run`\n\n Parameters\n ----------\n force_reparse : bool\n Force config files to be parsed so that we pick up any changes to them.\n\n options_from_flags : Optional[Dict[str, any]\n Config options that we received via CLI flag.\n\n Returns\n ----------\n Dict[str, ConfigOption]\n An ordered dict that maps config option names to their values.\n \"\"\"\n global _config_options\n\n if not options_from_flags:\n options_from_flags = {}\n\n # Avoid grabbing the lock in the case where there's nothing for us to do.\n config_options = _config_options\n if config_options and not force_reparse:\n return config_options\n\n with _config_lock:\n # Short-circuit if config files were parsed while we were waiting on\n # the lock.\n if _config_options and not force_reparse:\n return _config_options\n\n old_options = _config_options\n _config_options = copy.deepcopy(_config_options_template)\n\n # Values set in files later in the CONFIG_FILENAMES list overwrite those\n # set earlier.\n for filename in CONFIG_FILENAMES:\n if not os.path.exists(filename):\n continue\n\n with open(filename, \"r\", encoding=\"utf-8\") as input:\n file_contents = input.read()\n\n _update_config_with_toml(file_contents, filename)\n\n for opt_name, opt_val in options_from_flags.items():\n _set_option(opt_name, opt_val, _DEFINED_BY_FLAG)\n\n if old_options and config_util.server_option_changed(\n old_options, _config_options\n ):\n # Import logger locally to prevent circular references.\n from streamlit.logger import get_logger\n\n LOGGER = get_logger(__name__)\n LOGGER.warning(\n \"An update to the [server] config option section was detected.\"\n \" To have these changes be reflected, please restart streamlit.\"\n )\n\n _on_config_parsed.send()\n return _config_options\n\n\ndef _check_conflicts() -> None:\n # Node-related conflicts\n\n # When using the Node server, we must always connect to 8501 (this is\n # hard-coded in JS). Otherwise, the browser would decide what port to\n # connect to based on window.location.port, which in dev is going to\n # be (3000)\n\n # Import logger locally to prevent circular references\n from streamlit.logger import get_logger\n\n LOGGER = get_logger(__name__)\n\n if get_option(\"global.developmentMode\"):\n assert _is_unset(\n \"server.port\"\n ), \"server.port does not work when global.developmentMode is true.\"\n\n assert _is_unset(\"browser.serverPort\"), (\n \"browser.serverPort does not work when global.developmentMode is \" \"true.\"\n )\n\n # XSRF conflicts\n if get_option(\"server.enableXsrfProtection\"):\n if not get_option(\"server.enableCORS\") or get_option(\"global.developmentMode\"):\n LOGGER.warning(\n \"\"\"\nWarning: the config option 'server.enableCORS=false' is not compatible with 'server.enableXsrfProtection=true'.\nAs a result, 'server.enableCORS' is being overridden to 'true'.\n\nMore information:\nIn order to protect against CSRF attacks, we send a cookie with each request.\nTo do so, we must specify allowable origins, which places a restriction on\ncross-origin resource sharing.\n\nIf cross origin resource sharing is required, please disable server.enableXsrfProtection.\n \"\"\"\n )\n\n\ndef _set_development_mode() -> None:\n development.is_development_mode = get_option(\"global.developmentMode\")\n\n\ndef on_config_parsed(\n func: Callable[[], None], force_connect=False, lock=False\n) -> Callable[[], bool]:\n \"\"\"Wait for the config file to be parsed then call func.\n\n If the config file has already been parsed, just calls func immediately\n unless force_connect is set.\n\n Parameters\n ----------\n func : Callable[[], None]\n A function to run on config parse.\n\n force_connect : bool\n Wait until the next config file parse to run func, even if config files\n have already been parsed.\n\n lock : bool\n If set, grab _config_lock before running func.\n\n Returns\n -------\n Callable[[], bool]\n A function that the caller can use to deregister func.\n \"\"\"\n\n # We need to use the same receiver when we connect or disconnect on the\n # Signal. If we don't do this, then the registered receiver won't be released\n # leading to a memory leak because the Signal will keep a reference of the\n # callable argument. When the callable argument is an object method, then\n # the reference to that object won't be released.\n receiver = lambda _: func_with_lock()\n\n def disconnect():\n return _on_config_parsed.disconnect(receiver)\n\n def func_with_lock():\n if lock:\n with _config_lock:\n func()\n else:\n func()\n\n if force_connect or not _config_options:\n # weak=False so that we have control of when the on_config_parsed\n # callback is deregistered.\n _on_config_parsed.connect(receiver, weak=False)\n else:\n func_with_lock()\n\n return disconnect\n\n\n# Run _check_conflicts only once the config file is parsed in order to avoid\n# loops. We also need to grab the lock when running _check_conflicts since it\n# may edit config options based on the values of other config options.\non_config_parsed(_check_conflicts, lock=True)\non_config_parsed(_set_development_mode)\n", "path": "lib/streamlit/config.py" } ]
[ { "content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Loads the configuration data.\"\"\"\n\nimport copy\nimport os\nimport secrets\nimport threading\nimport toml\nfrom collections import OrderedDict\nfrom typing import Any, Callable, cast, Dict, Optional\n\nfrom blinker import Signal\n\nfrom streamlit import config_util\nfrom streamlit import development\nfrom streamlit import env_util\nfrom streamlit import file_util\nfrom streamlit import util\nfrom streamlit.config_option import ConfigOption\n\n# Config System Global State #\n\n# Descriptions of each of the possible config sections.\n# (We use OrderedDict to make the order in which sections are declared in this\n# file be the same order as the sections appear with `streamlit config show`)\n_section_descriptions: Dict[str, str] = OrderedDict(\n _test=\"Special test section just used for unit tests.\"\n)\n\n# Ensures that we don't try to get or set config options when config.toml files\n# change so are re-parsed.\n_config_lock = threading.RLock()\n\n# Stores config options with their default values (or None if they don't have\n# a default) before they are updated with values from config.toml files, flags\n# to `streamlit run`, etc. Note that this and _config_options below are\n# OrderedDicts to ensure stable ordering when printed using\n# `streamlit config show`.\n_config_options_template: Dict[str, ConfigOption] = OrderedDict()\n\n# Stores the current state of config options.\n_config_options: Optional[Dict[str, ConfigOption]] = None\n\n\n# Indicates that a config option was defined by the user.\n_USER_DEFINED = \"<user defined>\"\n\n# Indicates that a config option was defined either in an environment variable\n# or via command-line flag.\n_DEFINED_BY_FLAG = \"command-line argument or environment variable\"\n\n\ndef set_option(key: str, value: Any, where_defined: str = _USER_DEFINED) -> None:\n \"\"\"Set config option.\n\n Run `streamlit config show` in the terminal to see all available options.\n\n Parameters\n ----------\n key : str\n The config option key of the form \"section.optionName\". To see all\n available options, run `streamlit config show` on a terminal.\n\n value\n The new value to assign to this config option.\n\n where_defined : str\n Tells the config system where this was set.\n \"\"\"\n\n with _config_lock:\n # Ensure that our config files have been parsed.\n get_config_options()\n _set_option(key, value, where_defined)\n\n\ndef get_option(key: str) -> Any:\n \"\"\"Return the current value of a given Streamlit config option.\n\n Run `streamlit config show` in the terminal to see all available options.\n\n Parameters\n ----------\n key : str\n The config option key of the form \"section.optionName\". To see all\n available options, run `streamlit config show` on a terminal.\n \"\"\"\n with _config_lock:\n config_options = get_config_options()\n\n if key not in config_options:\n raise RuntimeError('Config key \"%s\" not defined.' % key)\n return config_options[key].value\n\n\ndef get_options_for_section(section: str) -> Dict[str, Any]:\n \"\"\"Get all of the config options for the given section.\n\n Run `streamlit config show` in the terminal to see all available options.\n\n Parameters\n ----------\n section : str\n The name of the config section to fetch options for.\n\n Returns\n ----------\n Dict[str, Any]\n A dict mapping the names of the options in the given section (without\n the section name as a prefix) to their values.\n \"\"\"\n with _config_lock:\n config_options = get_config_options()\n\n options_for_section = {}\n for option in config_options.values():\n if option.section == section:\n options_for_section[option.name] = option.value\n return options_for_section\n\n\ndef _create_section(section: str, description: str) -> None:\n \"\"\"Create a config section and store it globally in this module.\"\"\"\n assert section not in _section_descriptions, (\n 'Cannot define section \"%s\" twice.' % section\n )\n _section_descriptions[section] = description\n\n\ndef _create_option(\n key: str,\n description: Optional[str] = None,\n default_val: Optional[Any] = None,\n scriptable: bool = False,\n visibility: str = \"visible\",\n deprecated: bool = False,\n deprecation_text: Optional[str] = None,\n expiration_date: Optional[str] = None,\n replaced_by: Optional[str] = None,\n type_: type = str,\n) -> ConfigOption:\n '''Create a ConfigOption and store it globally in this module.\n\n There are two ways to create a ConfigOption:\n\n (1) Simple, constant config options are created as follows:\n\n _create_option('section.optionName',\n description = 'Put the description here.',\n default_val = 12345)\n\n (2) More complex, programmable config options use decorator syntax to\n resolve their values at runtime:\n\n @_create_option('section.optionName')\n def _section_option_name():\n \"\"\"Put the description here.\"\"\"\n return 12345\n\n To achieve this sugar, _create_option() returns a *callable object* of type\n ConfigObject, which then decorates the function.\n\n NOTE: ConfigObjects call their evaluation functions *every time* the option\n is requested. To prevent this, use the `streamlit.util.memoize` decorator as\n follows:\n\n @_create_option('section.memoizedOptionName')\n @util.memoize\n def _section_memoized_option_name():\n \"\"\"Put the description here.\"\"\"\n\n (This function is only called once.)\n \"\"\"\n return 12345\n\n '''\n option = ConfigOption(\n key,\n description=description,\n default_val=default_val,\n scriptable=scriptable,\n visibility=visibility,\n deprecated=deprecated,\n deprecation_text=deprecation_text,\n expiration_date=expiration_date,\n replaced_by=replaced_by,\n type_=type_,\n )\n assert (\n option.section in _section_descriptions\n ), 'Section \"%s\" must be one of %s.' % (\n option.section,\n \", \".join(_section_descriptions.keys()),\n )\n assert key not in _config_options_template, 'Cannot define option \"%s\" twice.' % key\n _config_options_template[key] = option\n return option\n\n\ndef _delete_option(key: str) -> None:\n \"\"\"Remove a ConfigOption by key from the global store.\n\n Only for use in testing.\n \"\"\"\n try:\n del _config_options_template[key]\n del cast(Dict[str, ConfigOption], _config_options)[key]\n except Exception:\n pass\n\n\n# Config Section: Global #\n\n_create_section(\"global\", \"Global options that apply across all of Streamlit.\")\n\n_create_option(\n \"global.disableWatchdogWarning\",\n description=\"\"\"\n By default, Streamlit checks if the Python watchdog module is available\n and, if not, prints a warning asking for you to install it. The watchdog\n module is not required, but highly recommended. It improves Streamlit's\n ability to detect changes to files in your filesystem.\n\n If you'd like to turn off this warning, set this to True.\n \"\"\",\n default_val=False,\n type_=bool,\n)\n\n\n_create_option(\n \"global.showWarningOnDirectExecution\",\n description=\"\"\"\n If True, will show a warning when you run a Streamlit-enabled script\n via \"python my_script.py\".\n \"\"\",\n default_val=True,\n type_=bool,\n)\n\n\n@_create_option(\"global.developmentMode\", visibility=\"hidden\", type_=bool)\ndef _global_development_mode() -> bool:\n \"\"\"Are we in development mode.\n\n This option defaults to True if and only if Streamlit wasn't installed\n normally.\n \"\"\"\n return (\n not env_util.is_pex()\n and \"site-packages\" not in __file__\n and \"dist-packages\" not in __file__\n and \"__pypackages__\" not in __file__\n )\n\n\n_create_option(\n \"global.logLevel\",\n description=\"\"\"Level of logging: 'error', 'warning', 'info', or 'debug'.\n\n Default: 'info'\n \"\"\",\n deprecated=True,\n deprecation_text=\"global.logLevel has been replaced with logger.level\",\n expiration_date=\"2020-11-30\",\n replaced_by=\"logger.level\",\n)\n\n\n@_create_option(\"global.unitTest\", visibility=\"hidden\", type_=bool)\ndef _global_unit_test() -> bool:\n \"\"\"Are we in a unit test?\n\n This option defaults to False.\n \"\"\"\n return False\n\n\n_create_option(\n \"global.suppressDeprecationWarnings\",\n description=\"Hide deprecation warnings in the streamlit app.\",\n visibility=\"hidden\",\n default_val=False,\n type_=bool,\n)\n\n_create_option(\n \"global.minCachedMessageSize\",\n description=\"\"\"Only cache ForwardMsgs that are greater than or equal to\n this minimum.\"\"\",\n visibility=\"hidden\",\n default_val=10 * 1e3,\n type_=float,\n) # 10k\n\n_create_option(\n \"global.maxCachedMessageAge\",\n description=\"\"\"Expire cached ForwardMsgs whose age is greater than this\n value. A message's age is defined by how many times its script has\n finished running since the message has been accessed.\"\"\",\n visibility=\"hidden\",\n default_val=2,\n type_=int,\n)\n\n_create_option(\n \"global.dataFrameSerialization\",\n description=\"\"\"\n DataFrame serialization.\n\n Acceptable values:\n - 'legacy': Serialize DataFrames using Streamlit's custom format. Slow\n but battle-tested.\n - 'arrow': Serialize DataFrames using Apache Arrow. Much faster and versatile.\"\"\",\n default_val=\"arrow\",\n type_=str,\n)\n\n\n# Config Section: Logger #\n_create_section(\"logger\", \"Settings to customize Streamlit log messages.\")\n\n\n@_create_option(\"logger.level\", type_=str)\ndef _logger_log_level() -> str:\n \"\"\"Level of logging: 'error', 'warning', 'info', or 'debug'.\n\n Default: 'info'\n \"\"\"\n\n if get_option(\"global.logLevel\"):\n return str(get_option(\"global.logLevel\"))\n elif get_option(\"global.developmentMode\"):\n return \"debug\"\n else:\n return \"info\"\n\n\n@_create_option(\"logger.messageFormat\", type_=str)\ndef _logger_message_format() -> str:\n \"\"\"String format for logging messages. If logger.datetimeFormat is set,\n logger messages will default to `%(asctime)s.%(msecs)03d %(message)s`. See\n [Python's documentation](https://docs.python.org/2.6/library/logging.html#formatter-objects)\n for available attributes.\n\n Default: \"%(asctime)s %(message)s\"\n \"\"\"\n if get_option(\"global.developmentMode\"):\n from streamlit.logger import DEFAULT_LOG_MESSAGE\n\n return DEFAULT_LOG_MESSAGE\n else:\n return \"%(asctime)s %(message)s\"\n\n\n_create_option(\n \"logger.enableRich\",\n description=\"\"\"\n Controls whether uncaught app exceptions are logged via the rich library.\n\n If True and if rich is installed, exception tracebacks will be logged with syntax highlighting and formatting.\n Rich tracebacks are easier to read and show more code than standard Python tracebacks.\n\n If set to False, the default Python traceback formatting will be used.\"\"\",\n default_val=False,\n visibility=\"hidden\",\n type_=bool,\n scriptable=True,\n)\n\n# Config Section: Client #\n\n_create_section(\"client\", \"Settings for scripts that use Streamlit.\")\n\n_create_option(\n \"client.caching\",\n description=\"Whether to enable st.cache.\",\n default_val=True,\n type_=bool,\n scriptable=True,\n)\n\n_create_option(\n \"client.displayEnabled\",\n description=\"\"\"If false, makes your Streamlit script not draw to a\n Streamlit app.\"\"\",\n default_val=True,\n type_=bool,\n scriptable=True,\n)\n\n_create_option(\n \"client.showErrorDetails\",\n description=\"\"\"\n Controls whether uncaught app exceptions are displayed in the browser.\n By default, this is set to True and Streamlit displays app exceptions\n and associated tracebacks in the browser.\n\n If set to False, an exception will result in a generic message being\n shown in the browser, and exceptions and tracebacks will be printed to\n the console only.\"\"\",\n default_val=True,\n type_=bool,\n scriptable=True,\n)\n\n# Config Section: Runner #\n\n_create_section(\"runner\", \"Settings for how Streamlit executes your script\")\n\n_create_option(\n \"runner.magicEnabled\",\n description=\"\"\"\n Allows you to type a variable or string by itself in a single line of\n Python code to write it to the app.\n \"\"\",\n default_val=True,\n type_=bool,\n)\n\n_create_option(\n \"runner.installTracer\",\n description=\"\"\"\n Install a Python tracer to allow you to stop or pause your script at\n any point and introspect it. As a side-effect, this slows down your\n script's execution.\n \"\"\",\n default_val=False,\n type_=bool,\n)\n\n_create_option(\n \"runner.fixMatplotlib\",\n description=\"\"\"\n Sets the MPLBACKEND environment variable to Agg inside Streamlit to\n prevent Python crashing.\n \"\"\",\n default_val=True,\n type_=bool,\n)\n\n_create_option(\n \"runner.postScriptGC\",\n description=\"\"\"\n Run the Python Garbage Collector after each script execution. This\n can help avoid excess memory use in Streamlit apps, but could\n introduce delay in rerunning the app script for high-memory-use\n applications.\n \"\"\",\n default_val=True,\n type_=bool,\n)\n\n_create_option(\n \"runner.fastReruns\",\n description=\"\"\"\n Handle script rerun requests immediately, rather than waiting for\n script execution to reach a yield point. Enabling this will\n make Streamlit much more responsive to user interaction, but it can\n lead to race conditions in apps that mutate session_state data outside\n of explicit session_state assignment statements.\n \"\"\",\n default_val=False,\n type_=bool,\n)\n\n# Config Section: Server #\n\n_create_section(\"server\", \"Settings for the Streamlit server\")\n\n_create_option(\n \"server.folderWatchBlacklist\",\n description=\"\"\"List of folders that should not be watched for changes. This\n impacts both \"Run on Save\" and @st.cache.\n\n Relative paths will be taken as relative to the current working directory.\n\n Example: ['/home/user1/env', 'relative/path/to/folder']\n \"\"\",\n default_val=[],\n)\n\n_create_option(\n \"server.fileWatcherType\",\n description=\"\"\"\n Change the type of file watcher used by Streamlit, or turn it off\n completely.\n\n Allowed values:\n * \"auto\" : Streamlit will attempt to use the watchdog module, and\n falls back to polling if watchdog is not available.\n * \"watchdog\" : Force Streamlit to use the watchdog module.\n * \"poll\" : Force Streamlit to always use polling.\n * \"none\" : Streamlit will not watch files.\n \"\"\",\n default_val=\"auto\",\n type_=str,\n)\n\n\n@_create_option(\"server.cookieSecret\", type_=str)\[email protected]\ndef _server_cookie_secret() -> str:\n \"\"\"Symmetric key used to produce signed cookies. If deploying on multiple replicas, this should\n be set to the same value across all replicas to ensure they all share the same secret.\n\n Default: randomly generated secret key.\n \"\"\"\n return secrets.token_hex()\n\n\n@_create_option(\"server.headless\", type_=bool)\ndef _server_headless() -> bool:\n \"\"\"If false, will attempt to open a browser window on start.\n\n Default: false unless (1) we are on a Linux box where DISPLAY is unset, or\n (2) we are running in the Streamlit Atom plugin.\n \"\"\"\n if env_util.IS_LINUX_OR_BSD and not os.getenv(\"DISPLAY\"):\n # We're running in Linux and DISPLAY is unset\n return True\n\n if os.getenv(\"IS_RUNNING_IN_STREAMLIT_EDITOR_PLUGIN\") is not None:\n # We're running within the Streamlit Atom plugin\n return True\n\n return False\n\n\n@_create_option(\"server.runOnSave\", type_=bool)\ndef _server_run_on_save() -> bool:\n \"\"\"Automatically rerun script when the file is modified on disk.\n\n Default: false\n \"\"\"\n return False\n\n\n@_create_option(\"server.allowRunOnSave\", type_=bool, visibility=\"hidden\")\ndef _server_allow_run_on_save() -> bool:\n \"\"\"Allows users to automatically rerun when app is updated.\n\n Default: true\n \"\"\"\n return True\n\n\n@_create_option(\"server.address\")\ndef _server_address() -> Optional[str]:\n \"\"\"The address where the server will listen for client and browser\n connections. Use this if you want to bind the server to a specific address.\n If set, the server will only be accessible from this address, and not from\n any aliases (like localhost).\n\n Default: (unset)\n \"\"\"\n return None\n\n\n@_create_option(\"server.port\", type_=int)\ndef _server_port() -> int:\n \"\"\"The port where the server will listen for browser\n connections.\n\n Default: 8501\n \"\"\"\n return 8501\n\n\n_create_option(\n \"server.scriptHealthCheckEnabled\",\n visibility=\"hidden\",\n description=\"\"\"\n Flag for enabling the script health check endpoint. It used for checking if\n a script loads successfully. On success, the endpoint will return a 200\n HTTP status code. On failure, the endpoint will return a 503 HTTP status code.\n\n Note: This is an experimental Streamlit internal API. The API is subject\n to change anytime so this should be used at your own risk\n \"\"\",\n default_val=False,\n type_=bool,\n)\n\n_create_option(\n \"server.baseUrlPath\",\n description=\"\"\"\n The base path for the URL where Streamlit should be served from.\n \"\"\",\n default_val=\"\",\n type_=str,\n)\n\n\n# TODO: Rename to server.enableCorsProtection.\n@_create_option(\"server.enableCORS\", type_=bool)\ndef _server_enable_cors() -> bool:\n \"\"\"Enables support for Cross-Origin Request Sharing (CORS) protection, for added security.\n\n Due to conflicts between CORS and XSRF, if `server.enableXsrfProtection` is on and\n `server.enableCORS` is off at the same time, we will prioritize `server.enableXsrfProtection`.\n\n Default: true\n \"\"\"\n return True\n\n\n@_create_option(\"server.enableXsrfProtection\", type_=bool)\ndef _server_enable_xsrf_protection() -> bool:\n \"\"\"Enables support for Cross-Site Request Forgery (XSRF) protection, for added security.\n\n Due to conflicts between CORS and XSRF, if `server.enableXsrfProtection` is on and\n `server.enableCORS` is off at the same time, we will prioritize `server.enableXsrfProtection`.\n\n Default: true\n \"\"\"\n return True\n\n\n@_create_option(\"server.maxUploadSize\", type_=int)\ndef _server_max_upload_size() -> int:\n \"\"\"Max size, in megabytes, for files uploaded with the file_uploader.\n\n Default: 200\n \"\"\"\n # If this default is changed, please also update the docstring\n # for `DeltaGenerator.file_uploader`.\n return 200\n\n\n@_create_option(\"server.maxMessageSize\", type_=int)\ndef _server_max_message_size() -> int:\n \"\"\"Max size, in megabytes, of messages that can be sent via the WebSocket connection.\n\n Default: 200\n \"\"\"\n return 200\n\n\n@_create_option(\"server.enableWebsocketCompression\", type_=bool)\ndef _server_enable_websocket_compression() -> bool:\n \"\"\"Enables support for websocket compression.\n\n Default: false\n \"\"\"\n return False\n\n\n# Config Section: Browser #\n\n_create_section(\"browser\", \"Configuration of non-UI browser options.\")\n\n\n@_create_option(\"browser.serverAddress\")\ndef _browser_server_address() -> str:\n \"\"\"Internet address where users should point their browsers in order to\n connect to the app. Can be IP address or DNS name and path.\n\n This is used to:\n - Set the correct URL for CORS and XSRF protection purposes.\n - Show the URL on the terminal\n - Open the browser\n\n Default: 'localhost'\n \"\"\"\n return \"localhost\"\n\n\n@_create_option(\"browser.gatherUsageStats\", type_=bool)\ndef _gather_usage_stats() -> bool:\n \"\"\"Whether to send usage statistics to Streamlit.\n\n Default: true\n \"\"\"\n return True\n\n\n@_create_option(\"browser.serverPort\", type_=int)\ndef _browser_server_port() -> int:\n \"\"\"Port where users should point their browsers in order to connect to the\n app.\n\n This is used to:\n - Set the correct URL for CORS and XSRF protection purposes.\n - Show the URL on the terminal\n - Open the browser\n\n Default: whatever value is set in server.port.\n \"\"\"\n return int(get_option(\"server.port\"))\n\n\n# Config Section: UI #\n\n# NOTE: We currently hide the ui config section in the `streamlit config show`\n# output as all of its options are hidden. If a non-hidden option is eventually\n# added, the section should be unhidden by removing it from the `SKIP_SECTIONS`\n# set in config_util.show_config.\n_create_section(\"ui\", \"Configuration of UI elements displayed in the browser.\")\n\n_create_option(\n \"ui.hideTopBar\",\n description=\"\"\"\n Flag to hide most of the UI elements found at the top of a Streamlit app.\n\n NOTE: This does *not* hide the hamburger menu in the top-right of an app.\n \"\"\",\n default_val=False,\n type_=bool,\n visibility=\"hidden\",\n)\n\n_create_option(\n \"ui.hideSidebarNav\",\n description=\"\"\"\n Flag to hide the sidebar page navigation component.\n\n We have this default to True for now so that we can \"soft-launch\" the\n multipage apps feature and merge the feature branch into develop earlier.\n Once we're ready to have multipage apps enabled by default, we'll flip the\n default to False.\n \"\"\",\n default_val=True,\n type_=bool,\n visibility=\"hidden\",\n)\n\n\n# Config Section: Mapbox #\n\n_create_section(\"mapbox\", \"Mapbox configuration that is being used by DeckGL.\")\n\n_create_option(\n \"mapbox.token\",\n description=\"\"\"Configure Streamlit to use a custom Mapbox\n token for elements like st.pydeck_chart and st.map.\n To get a token for yourself, create an account at\n https://mapbox.com. It's free (for moderate usage levels)!\"\"\",\n default_val=\"\",\n)\n\n\n# Config Section: deprecations\n\n_create_section(\"deprecation\", \"Configuration to show or hide deprecation warnings.\")\n\n_create_option(\n \"deprecation.showfileUploaderEncoding\",\n description=\"Set to false to disable the deprecation warning for the file uploader encoding.\",\n default_val=True,\n scriptable=True,\n type_=bool,\n expiration_date=\"2021-01-06\",\n)\n\n_create_option(\n \"deprecation.showImageFormat\",\n description=\"Set to false to disable the deprecation warning for the image format parameter.\",\n default_val=True,\n scriptable=True,\n type_=bool,\n deprecated=True,\n deprecation_text=\"The format parameter for st.image has been removed.\",\n expiration_date=\"2021-03-24\",\n)\n\n_create_option(\n \"deprecation.showPyplotGlobalUse\",\n description=\"Set to false to disable the deprecation warning for using the global pyplot instance.\",\n default_val=True,\n scriptable=True,\n type_=bool,\n)\n\n\n# Config Section: Custom Theme #\n\n_create_section(\"theme\", \"Settings to define a custom theme for your Streamlit app.\")\n\n_create_option(\n \"theme.base\",\n description=\"\"\"The preset Streamlit theme that your custom theme inherits from.\n One of \"light\" or \"dark\".\"\"\",\n)\n\n_create_option(\n \"theme.primaryColor\",\n description=\"Primary accent color for interactive elements.\",\n)\n\n_create_option(\n \"theme.backgroundColor\",\n description=\"Background color for the main content area.\",\n)\n\n_create_option(\n \"theme.secondaryBackgroundColor\",\n description=\"Background color used for the sidebar and most interactive widgets.\",\n)\n\n_create_option(\n \"theme.textColor\",\n description=\"Color used for almost all text.\",\n)\n\n_create_option(\n \"theme.font\",\n description=\"\"\"\n Font family for all text in the app, except code blocks. One of \"sans serif\",\n \"serif\", or \"monospace\".\n \"\"\",\n)\n\n\ndef get_where_defined(key: str) -> str:\n \"\"\"Indicate where (e.g. in which file) this option was defined.\n\n Parameters\n ----------\n key : str\n The config option key of the form \"section.optionName\"\n\n \"\"\"\n with _config_lock:\n config_options = get_config_options()\n\n if key not in config_options:\n raise RuntimeError('Config key \"%s\" not defined.' % key)\n return config_options[key].where_defined\n\n\ndef _is_unset(option_name: str) -> bool:\n \"\"\"Check if a given option has not been set by the user.\n\n Parameters\n ----------\n option_name : str\n The option to check\n\n\n Returns\n -------\n bool\n True if the option has not been set by the user.\n\n \"\"\"\n return get_where_defined(option_name) == ConfigOption.DEFAULT_DEFINITION\n\n\ndef is_manually_set(option_name: str) -> bool:\n \"\"\"Check if a given option was actually defined by the user.\n\n Parameters\n ----------\n option_name : str\n The option to check\n\n\n Returns\n -------\n bool\n True if the option has been set by the user.\n\n \"\"\"\n return get_where_defined(option_name) not in (\n ConfigOption.DEFAULT_DEFINITION,\n ConfigOption.STREAMLIT_DEFINITION,\n )\n\n\ndef show_config() -> None:\n \"\"\"Print all config options to the terminal.\"\"\"\n with _config_lock:\n config_util.show_config(\n _section_descriptions, cast(Dict[str, ConfigOption], _config_options)\n )\n\n\n# Load Config Files #\n\n\ndef _set_option(key: str, value: Any, where_defined: str) -> None:\n \"\"\"Set a config option by key / value pair.\n\n This function assumes that the _config_options dictionary has already been\n populated and thus should only be used within this file and by tests.\n\n Parameters\n ----------\n key : str\n The key of the option, like \"logger.level\".\n value\n The value of the option.\n where_defined : str\n Tells the config system where this was set.\n\n \"\"\"\n assert (\n _config_options is not None\n ), \"_config_options should always be populated here.\"\n if key not in _config_options:\n # Import logger locally to prevent circular references\n from streamlit.logger import get_logger\n\n LOGGER = get_logger(__name__)\n\n LOGGER.warning(\n f'\"{key}\" is not a valid config option. If you previously had this config option set, it may have been removed.'\n )\n\n else:\n _config_options[key].set_value(value, where_defined)\n\n\ndef _update_config_with_toml(raw_toml: str, where_defined: str) -> None:\n \"\"\"Update the config system by parsing this string.\n\n This should only be called from get_config_options.\n\n Parameters\n ----------\n raw_toml : str\n The TOML file to parse to update the config values.\n where_defined : str\n Tells the config system where this was set.\n\n \"\"\"\n parsed_config_file = toml.loads(raw_toml)\n\n for section, options in parsed_config_file.items():\n for name, value in options.items():\n value = _maybe_read_env_variable(value)\n _set_option(f\"{section}.{name}\", value, where_defined)\n\n\ndef _maybe_read_env_variable(value: Any) -> Any:\n \"\"\"If value is \"env:foo\", return value of environment variable \"foo\".\n\n If value is not in the shape above, returns the value right back.\n\n Parameters\n ----------\n value : any\n The value to check\n\n Returns\n -------\n any\n Either returns value right back, or the value of the environment\n variable.\n\n \"\"\"\n\n if isinstance(value, str) and value.startswith(\"env:\"):\n var_name = value[len(\"env:\") :]\n env_var = os.environ.get(var_name)\n\n if env_var is None:\n # Import logger locally to prevent circular references\n from streamlit.logger import get_logger\n\n LOGGER = get_logger(__name__)\n\n LOGGER.error(\"No environment variable called %s\" % var_name)\n else:\n return _maybe_convert_to_number(env_var)\n\n return value\n\n\ndef _maybe_convert_to_number(v: Any) -> Any:\n \"\"\"Convert v to int or float, or leave it as is.\"\"\"\n try:\n return int(v)\n except Exception:\n pass\n\n try:\n return float(v)\n except Exception:\n pass\n\n return v\n\n\n# Allow outside modules to wait for the config file to be parsed before doing\n# something.\n_on_config_parsed = Signal(doc=\"Emitted when the config file is parsed.\")\n\nCONFIG_FILENAMES = [\n file_util.get_streamlit_file_path(\"config.toml\"),\n file_util.get_project_streamlit_file_path(\"config.toml\"),\n]\n\n\ndef get_config_options(\n force_reparse=False, options_from_flags: Optional[Dict[str, Any]] = None\n) -> Dict[str, ConfigOption]:\n \"\"\"Create and return a dict mapping config option names to their values,\n returning a cached dict if possible.\n\n Config option values are sourced from the following locations. Values\n set in locations further down the list overwrite those set earlier.\n 1. default values defined in this file\n 2. the global `~/.streamlit/config.toml` file\n 3. per-project `$CWD/.streamlit/config.toml` files\n 4. environment variables such as `STREAMLIT_SERVER_PORT`\n 5. command line flags passed to `streamlit run`\n\n Parameters\n ----------\n force_reparse : bool\n Force config files to be parsed so that we pick up any changes to them.\n\n options_from_flags : Optional[Dict[str, any]\n Config options that we received via CLI flag.\n\n Returns\n ----------\n Dict[str, ConfigOption]\n An ordered dict that maps config option names to their values.\n \"\"\"\n global _config_options\n\n if not options_from_flags:\n options_from_flags = {}\n\n # Avoid grabbing the lock in the case where there's nothing for us to do.\n config_options = _config_options\n if config_options and not force_reparse:\n return config_options\n\n with _config_lock:\n # Short-circuit if config files were parsed while we were waiting on\n # the lock.\n if _config_options and not force_reparse:\n return _config_options\n\n old_options = _config_options\n _config_options = copy.deepcopy(_config_options_template)\n\n # Values set in files later in the CONFIG_FILENAMES list overwrite those\n # set earlier.\n for filename in CONFIG_FILENAMES:\n if not os.path.exists(filename):\n continue\n\n with open(filename, \"r\", encoding=\"utf-8\") as input:\n file_contents = input.read()\n\n _update_config_with_toml(file_contents, filename)\n\n for opt_name, opt_val in options_from_flags.items():\n _set_option(opt_name, opt_val, _DEFINED_BY_FLAG)\n\n if old_options and config_util.server_option_changed(\n old_options, _config_options\n ):\n # Import logger locally to prevent circular references.\n from streamlit.logger import get_logger\n\n LOGGER = get_logger(__name__)\n LOGGER.warning(\n \"An update to the [server] config option section was detected.\"\n \" To have these changes be reflected, please restart streamlit.\"\n )\n\n _on_config_parsed.send()\n return _config_options\n\n\ndef _check_conflicts() -> None:\n # Node-related conflicts\n\n # When using the Node server, we must always connect to 8501 (this is\n # hard-coded in JS). Otherwise, the browser would decide what port to\n # connect to based on window.location.port, which in dev is going to\n # be (3000)\n\n # Import logger locally to prevent circular references\n from streamlit.logger import get_logger\n\n LOGGER = get_logger(__name__)\n\n if get_option(\"global.developmentMode\"):\n assert _is_unset(\n \"server.port\"\n ), \"server.port does not work when global.developmentMode is true.\"\n\n assert _is_unset(\"browser.serverPort\"), (\n \"browser.serverPort does not work when global.developmentMode is \" \"true.\"\n )\n\n # XSRF conflicts\n if get_option(\"server.enableXsrfProtection\"):\n if not get_option(\"server.enableCORS\") or get_option(\"global.developmentMode\"):\n LOGGER.warning(\n \"\"\"\nWarning: the config option 'server.enableCORS=false' is not compatible with 'server.enableXsrfProtection=true'.\nAs a result, 'server.enableCORS' is being overridden to 'true'.\n\nMore information:\nIn order to protect against CSRF attacks, we send a cookie with each request.\nTo do so, we must specify allowable origins, which places a restriction on\ncross-origin resource sharing.\n\nIf cross origin resource sharing is required, please disable server.enableXsrfProtection.\n \"\"\"\n )\n\n\ndef _set_development_mode() -> None:\n development.is_development_mode = get_option(\"global.developmentMode\")\n\n\ndef on_config_parsed(\n func: Callable[[], None], force_connect=False, lock=False\n) -> Callable[[], bool]:\n \"\"\"Wait for the config file to be parsed then call func.\n\n If the config file has already been parsed, just calls func immediately\n unless force_connect is set.\n\n Parameters\n ----------\n func : Callable[[], None]\n A function to run on config parse.\n\n force_connect : bool\n Wait until the next config file parse to run func, even if config files\n have already been parsed.\n\n lock : bool\n If set, grab _config_lock before running func.\n\n Returns\n -------\n Callable[[], bool]\n A function that the caller can use to deregister func.\n \"\"\"\n\n # We need to use the same receiver when we connect or disconnect on the\n # Signal. If we don't do this, then the registered receiver won't be released\n # leading to a memory leak because the Signal will keep a reference of the\n # callable argument. When the callable argument is an object method, then\n # the reference to that object won't be released.\n receiver = lambda _: func_with_lock()\n\n def disconnect():\n return _on_config_parsed.disconnect(receiver)\n\n def func_with_lock():\n if lock:\n with _config_lock:\n func()\n else:\n func()\n\n if force_connect or not _config_options:\n # weak=False so that we have control of when the on_config_parsed\n # callback is deregistered.\n _on_config_parsed.connect(receiver, weak=False)\n else:\n func_with_lock()\n\n return disconnect\n\n\n# Run _check_conflicts only once the config file is parsed in order to avoid\n# loops. We also need to grab the lock when running _check_conflicts since it\n# may edit config options based on the values of other config options.\non_config_parsed(_check_conflicts, lock=True)\non_config_parsed(_set_development_mode)\n", "path": "lib/streamlit/config.py" } ]
diff --git a/lib/streamlit/config.py b/lib/streamlit/config.py index b1733341d2f9..b763ff4ba4bc 100644 --- a/lib/streamlit/config.py +++ b/lib/streamlit/config.py @@ -263,6 +263,7 @@ def _global_development_mode() -> bool: not env_util.is_pex() and "site-packages" not in __file__ and "dist-packages" not in __file__ + and "__pypackages__" not in __file__ )
spyder-ide__spyder-4602
Move to support only Rope 0.10.5+ That's because 0.10.5 is the first version to support Python 2 and 3 in the same package.
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright © Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"\nSpyder\n======\n\nThe Scientific PYthon Development EnviRonment\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport os.path as osp\nimport subprocess\nimport sys\nimport shutil\n\nfrom distutils.core import setup\nfrom distutils.command.build import build\nfrom distutils.command.install import install\nfrom distutils.command.install_data import install_data\n\n\n#==============================================================================\n# Check for Python 3\n#==============================================================================\nPY3 = sys.version_info[0] == 3\n\n\n#==============================================================================\n# Minimal Python version sanity check\n# Taken from the notebook setup.py -- Modified BSD License\n#==============================================================================\nv = sys.version_info\nif v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)):\n error = \"ERROR: Spyder requires Python version 2.7 or 3.3 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\n\n#==============================================================================\n# Constants\n#==============================================================================\nNAME = 'spyder'\nLIBNAME = 'spyder'\nfrom spyder import __version__, __project_url__\n\n\n#==============================================================================\n# Auxiliary functions\n#==============================================================================\ndef get_package_data(name, extlist):\n \"\"\"Return data files for package *name* with extensions in *extlist*\"\"\"\n flist = []\n # Workaround to replace os.path.relpath (not available until Python 2.6):\n offset = len(name)+len(os.pathsep)\n for dirpath, _dirnames, filenames in os.walk(name):\n for fname in filenames:\n if not fname.startswith('.') and osp.splitext(fname)[1] in extlist:\n flist.append(osp.join(dirpath, fname)[offset:])\n return flist\n\n\ndef get_subpackages(name):\n \"\"\"Return subpackages of package *name*\"\"\"\n splist = []\n for dirpath, _dirnames, _filenames in os.walk(name):\n if osp.isfile(osp.join(dirpath, '__init__.py')):\n splist.append(\".\".join(dirpath.split(os.sep)))\n return splist\n\n\ndef get_data_files():\n \"\"\"Return data_files in a platform dependent manner\"\"\"\n if sys.platform.startswith('linux'):\n if PY3:\n data_files = [('share/applications', ['scripts/spyder3.desktop']),\n ('share/pixmaps', ['img_src/spyder3.png']),\n ('share/metainfo', ['scripts/spyder3.appdata.xml'])]\n else:\n data_files = [('share/applications', ['scripts/spyder.desktop']),\n ('share/pixmaps', ['img_src/spyder.png'])]\n elif os.name == 'nt':\n data_files = [('scripts', ['img_src/spyder.ico',\n 'img_src/spyder_reset.ico'])]\n else:\n data_files = []\n return data_files\n\n\ndef get_packages():\n \"\"\"Return package list\"\"\"\n packages = (\n get_subpackages(LIBNAME)\n + get_subpackages('spyder_breakpoints')\n + get_subpackages('spyder_profiler')\n + get_subpackages('spyder_pylint')\n + get_subpackages('spyder_io_dcm')\n + get_subpackages('spyder_io_hdf5')\n )\n return packages\n\n\n#==============================================================================\n# Make Linux detect Spyder desktop file\n#==============================================================================\nclass MyInstallData(install_data):\n def run(self):\n install_data.run(self)\n if sys.platform.startswith('linux'):\n try:\n subprocess.call(['update-desktop-database'])\n except:\n print(\"ERROR: unable to update desktop database\",\n file=sys.stderr)\nCMDCLASS = {'install_data': MyInstallData}\n\n\n#==============================================================================\n# Sphinx build (documentation)\n#==============================================================================\ndef get_html_help_exe():\n \"\"\"Return HTML Help Workshop executable path (Windows only)\"\"\"\n if os.name == 'nt':\n hhc_base = r'C:\\Program Files%s\\HTML Help Workshop\\hhc.exe'\n for hhc_exe in (hhc_base % '', hhc_base % ' (x86)'):\n if osp.isfile(hhc_exe):\n return hhc_exe\n else:\n return\n\ntry:\n from sphinx import setup_command\n\n class MyBuild(build):\n user_options = [('no-doc', None, \"Don't build Spyder documentation\")] \\\n + build.user_options\n def __init__(self, *args, **kwargs):\n build.__init__(self, *args, **kwargs)\n self.no_doc = False\n def with_doc(self):\n setup_dir = os.path.dirname(os.path.abspath(__file__))\n is_doc_dir = os.path.isdir(os.path.join(setup_dir, 'doc'))\n install_obj = self.distribution.get_command_obj('install')\n return (is_doc_dir and not self.no_doc and not install_obj.no_doc)\n sub_commands = build.sub_commands + [('build_doc', with_doc)]\n CMDCLASS['build'] = MyBuild\n\n\n class MyInstall(install):\n user_options = [('no-doc', None, \"Don't build Spyder documentation\")] \\\n + install.user_options\n def __init__(self, *args, **kwargs):\n install.__init__(self, *args, **kwargs)\n self.no_doc = False\n CMDCLASS['install'] = MyInstall\n\n\n class MyBuildDoc(setup_command.BuildDoc):\n def run(self):\n build = self.get_finalized_command('build')\n sys.path.insert(0, os.path.abspath(build.build_lib))\n dirname = self.distribution.get_command_obj('build').build_purelib\n self.builder_target_dir = osp.join(dirname, 'spyder', 'doc')\n\n if not osp.exists(self.builder_target_dir):\n os.mkdir(self.builder_target_dir)\n\n hhc_exe = get_html_help_exe()\n self.builder = \"html\" if hhc_exe is None else \"htmlhelp\"\n\n try:\n setup_command.BuildDoc.run(self)\n except UnicodeDecodeError:\n print(\"ERROR: unable to build documentation because Sphinx \"\\\n \"do not handle source path with non-ASCII characters. \"\\\n \"Please try to move the source package to another \"\\\n \"location (path with *only* ASCII characters).\",\n file=sys.stderr)\n sys.path.pop(0)\n\n # Building chm doc, if HTML Help Workshop is installed\n if hhc_exe is not None:\n fname = osp.join(self.builder_target_dir, 'Spyderdoc.chm')\n subprocess.call('\"%s\" %s' % (hhc_exe, fname), shell=True)\n if osp.isfile(fname):\n dest = osp.join(dirname, 'spyder')\n try:\n shutil.move(fname, dest)\n except shutil.Error:\n print(\"Unable to replace %s\" % dest)\n shutil.rmtree(self.builder_target_dir)\n\n CMDCLASS['build_doc'] = MyBuildDoc\nexcept ImportError:\n print('WARNING: unable to build documentation because Sphinx '\\\n 'is not installed', file=sys.stderr)\n\n\n#==============================================================================\n# Main scripts\n#==============================================================================\n# NOTE: the '[...]_win_post_install.py' script is installed even on non-Windows\n# platforms due to a bug in pip installation process (see Issue 1158)\nSCRIPTS = ['%s_win_post_install.py' % NAME]\nif PY3 and sys.platform.startswith('linux'):\n SCRIPTS.append('spyder3')\nelse:\n SCRIPTS.append('spyder')\n\n\n#==============================================================================\n# Files added to the package\n#==============================================================================\nEXTLIST = ['.mo', '.svg', '.png', '.css', '.html', '.js', '.chm', '.ini',\n '.txt', '.rst', '.qss', '.ttf', '.json', '.c', '.cpp', '.java',\n '.md', '.R', '.csv', '.pyx', '.ipynb']\nif os.name == 'nt':\n SCRIPTS += ['spyder.bat']\n EXTLIST += ['.ico']\n\n\n#==============================================================================\n# Setup arguments\n#==============================================================================\nsetup_args = dict(name=NAME,\n version=__version__,\n description='Scientific PYthon Development EnviRonment',\n long_description=\n\"\"\"Spyder is an interactive Python development environment providing\nMATLAB-like features in a simple and light-weighted software.\nIt also provides ready-to-use pure-Python widgets to your PyQt5 or\nPyQt4 application: source code editor with syntax highlighting and\ncode introspection/analysis features, NumPy array editor, dictionary\neditor, Python console, etc.\"\"\",\n download_url='%s/files/%s-%s.zip' % (__project_url__, NAME, __version__),\n author=\"The Spyder Project Contributors\",\n url=__project_url__,\n license='MIT',\n keywords='PyQt5 PyQt4 editor shell console widgets IDE',\n platforms=['any'],\n packages=get_packages(),\n package_data={LIBNAME: get_package_data(LIBNAME, EXTLIST),\n 'spyder_breakpoints': get_package_data('spyder_breakpoints', EXTLIST),\n 'spyder_profiler': get_package_data('spyder_profiler', EXTLIST),\n 'spyder_pylint': get_package_data('spyder_pylint', EXTLIST),\n 'spyder_io_dcm': get_package_data('spyder_io_dcm', EXTLIST),\n 'spyder_io_hdf5': get_package_data('spyder_io_hdf5', EXTLIST),\n },\n scripts=[osp.join('scripts', fname) for fname in SCRIPTS],\n data_files=get_data_files(),\n classifiers=['License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Development Status :: 5 - Production/Stable',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Software Development :: Widget Sets'],\n cmdclass=CMDCLASS)\n\n\n#==============================================================================\n# Setuptools deps\n#==============================================================================\nif any(arg == 'bdist_wheel' for arg in sys.argv):\n import setuptools # analysis:ignore\n\ninstall_requires = [\n 'rope_py3k' if PY3 else 'rope>=0.9.4',\n 'jedi>=0.9.0',\n 'pyflakes',\n 'pygments>=2.0',\n 'qtconsole>=4.2.0',\n 'nbconvert',\n 'sphinx',\n 'pycodestyle',\n 'pylint',\n 'psutil',\n 'qtawesome>=0.4.1',\n 'qtpy>=1.1.0',\n 'pickleshare',\n 'pyzmq',\n 'chardet>=2.0.0',\n 'numpydoc',\n]\n\nextras_require = {\n 'test:python_version == \"2.7\"': ['mock'],\n 'test': ['pytest',\n 'pytest-qt',\n 'pytest-cov',\n 'pytest-xvfb',\n 'mock',\n 'flaky',\n 'pandas',\n 'scipy',\n 'sympy',\n 'pillow',\n 'matplotlib',\n 'cython'],\n}\n\nif 'setuptools' in sys.modules:\n setup_args['install_requires'] = install_requires\n setup_args['extras_require'] = extras_require\n\n setup_args['entry_points'] = {\n 'gui_scripts': [\n '{} = spyder.app.start:main'.format(\n 'spyder3' if PY3 else 'spyder')\n ]\n }\n\n setup_args.pop('scripts', None)\n\n\n#==============================================================================\n# Main setup\n#==============================================================================\nsetup(**setup_args)\n", "path": "setup.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright © Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"\nSpyder\n======\n\nThe Scientific PYthon Development EnviRonment\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport os.path as osp\nimport subprocess\nimport sys\nimport shutil\n\nfrom distutils.core import setup\nfrom distutils.command.build import build\nfrom distutils.command.install import install\nfrom distutils.command.install_data import install_data\n\n\n#==============================================================================\n# Check for Python 3\n#==============================================================================\nPY3 = sys.version_info[0] == 3\n\n\n#==============================================================================\n# Minimal Python version sanity check\n# Taken from the notebook setup.py -- Modified BSD License\n#==============================================================================\nv = sys.version_info\nif v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)):\n error = \"ERROR: Spyder requires Python version 2.7 or 3.3 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\n\n#==============================================================================\n# Constants\n#==============================================================================\nNAME = 'spyder'\nLIBNAME = 'spyder'\nfrom spyder import __version__, __project_url__\n\n\n#==============================================================================\n# Auxiliary functions\n#==============================================================================\ndef get_package_data(name, extlist):\n \"\"\"Return data files for package *name* with extensions in *extlist*\"\"\"\n flist = []\n # Workaround to replace os.path.relpath (not available until Python 2.6):\n offset = len(name)+len(os.pathsep)\n for dirpath, _dirnames, filenames in os.walk(name):\n for fname in filenames:\n if not fname.startswith('.') and osp.splitext(fname)[1] in extlist:\n flist.append(osp.join(dirpath, fname)[offset:])\n return flist\n\n\ndef get_subpackages(name):\n \"\"\"Return subpackages of package *name*\"\"\"\n splist = []\n for dirpath, _dirnames, _filenames in os.walk(name):\n if osp.isfile(osp.join(dirpath, '__init__.py')):\n splist.append(\".\".join(dirpath.split(os.sep)))\n return splist\n\n\ndef get_data_files():\n \"\"\"Return data_files in a platform dependent manner\"\"\"\n if sys.platform.startswith('linux'):\n if PY3:\n data_files = [('share/applications', ['scripts/spyder3.desktop']),\n ('share/pixmaps', ['img_src/spyder3.png']),\n ('share/metainfo', ['scripts/spyder3.appdata.xml'])]\n else:\n data_files = [('share/applications', ['scripts/spyder.desktop']),\n ('share/pixmaps', ['img_src/spyder.png'])]\n elif os.name == 'nt':\n data_files = [('scripts', ['img_src/spyder.ico',\n 'img_src/spyder_reset.ico'])]\n else:\n data_files = []\n return data_files\n\n\ndef get_packages():\n \"\"\"Return package list\"\"\"\n packages = (\n get_subpackages(LIBNAME)\n + get_subpackages('spyder_breakpoints')\n + get_subpackages('spyder_profiler')\n + get_subpackages('spyder_pylint')\n + get_subpackages('spyder_io_dcm')\n + get_subpackages('spyder_io_hdf5')\n )\n return packages\n\n\n#==============================================================================\n# Make Linux detect Spyder desktop file\n#==============================================================================\nclass MyInstallData(install_data):\n def run(self):\n install_data.run(self)\n if sys.platform.startswith('linux'):\n try:\n subprocess.call(['update-desktop-database'])\n except:\n print(\"ERROR: unable to update desktop database\",\n file=sys.stderr)\nCMDCLASS = {'install_data': MyInstallData}\n\n\n#==============================================================================\n# Sphinx build (documentation)\n#==============================================================================\ndef get_html_help_exe():\n \"\"\"Return HTML Help Workshop executable path (Windows only)\"\"\"\n if os.name == 'nt':\n hhc_base = r'C:\\Program Files%s\\HTML Help Workshop\\hhc.exe'\n for hhc_exe in (hhc_base % '', hhc_base % ' (x86)'):\n if osp.isfile(hhc_exe):\n return hhc_exe\n else:\n return\n\ntry:\n from sphinx import setup_command\n\n class MyBuild(build):\n user_options = [('no-doc', None, \"Don't build Spyder documentation\")] \\\n + build.user_options\n def __init__(self, *args, **kwargs):\n build.__init__(self, *args, **kwargs)\n self.no_doc = False\n def with_doc(self):\n setup_dir = os.path.dirname(os.path.abspath(__file__))\n is_doc_dir = os.path.isdir(os.path.join(setup_dir, 'doc'))\n install_obj = self.distribution.get_command_obj('install')\n return (is_doc_dir and not self.no_doc and not install_obj.no_doc)\n sub_commands = build.sub_commands + [('build_doc', with_doc)]\n CMDCLASS['build'] = MyBuild\n\n\n class MyInstall(install):\n user_options = [('no-doc', None, \"Don't build Spyder documentation\")] \\\n + install.user_options\n def __init__(self, *args, **kwargs):\n install.__init__(self, *args, **kwargs)\n self.no_doc = False\n CMDCLASS['install'] = MyInstall\n\n\n class MyBuildDoc(setup_command.BuildDoc):\n def run(self):\n build = self.get_finalized_command('build')\n sys.path.insert(0, os.path.abspath(build.build_lib))\n dirname = self.distribution.get_command_obj('build').build_purelib\n self.builder_target_dir = osp.join(dirname, 'spyder', 'doc')\n\n if not osp.exists(self.builder_target_dir):\n os.mkdir(self.builder_target_dir)\n\n hhc_exe = get_html_help_exe()\n self.builder = \"html\" if hhc_exe is None else \"htmlhelp\"\n\n try:\n setup_command.BuildDoc.run(self)\n except UnicodeDecodeError:\n print(\"ERROR: unable to build documentation because Sphinx \"\\\n \"do not handle source path with non-ASCII characters. \"\\\n \"Please try to move the source package to another \"\\\n \"location (path with *only* ASCII characters).\",\n file=sys.stderr)\n sys.path.pop(0)\n\n # Building chm doc, if HTML Help Workshop is installed\n if hhc_exe is not None:\n fname = osp.join(self.builder_target_dir, 'Spyderdoc.chm')\n subprocess.call('\"%s\" %s' % (hhc_exe, fname), shell=True)\n if osp.isfile(fname):\n dest = osp.join(dirname, 'spyder')\n try:\n shutil.move(fname, dest)\n except shutil.Error:\n print(\"Unable to replace %s\" % dest)\n shutil.rmtree(self.builder_target_dir)\n\n CMDCLASS['build_doc'] = MyBuildDoc\nexcept ImportError:\n print('WARNING: unable to build documentation because Sphinx '\\\n 'is not installed', file=sys.stderr)\n\n\n#==============================================================================\n# Main scripts\n#==============================================================================\n# NOTE: the '[...]_win_post_install.py' script is installed even on non-Windows\n# platforms due to a bug in pip installation process (see Issue 1158)\nSCRIPTS = ['%s_win_post_install.py' % NAME]\nif PY3 and sys.platform.startswith('linux'):\n SCRIPTS.append('spyder3')\nelse:\n SCRIPTS.append('spyder')\n\n\n#==============================================================================\n# Files added to the package\n#==============================================================================\nEXTLIST = ['.mo', '.svg', '.png', '.css', '.html', '.js', '.chm', '.ini',\n '.txt', '.rst', '.qss', '.ttf', '.json', '.c', '.cpp', '.java',\n '.md', '.R', '.csv', '.pyx', '.ipynb']\nif os.name == 'nt':\n SCRIPTS += ['spyder.bat']\n EXTLIST += ['.ico']\n\n\n#==============================================================================\n# Setup arguments\n#==============================================================================\nsetup_args = dict(name=NAME,\n version=__version__,\n description='Scientific PYthon Development EnviRonment',\n long_description=\n\"\"\"Spyder is an interactive Python development environment providing\nMATLAB-like features in a simple and light-weighted software.\nIt also provides ready-to-use pure-Python widgets to your PyQt5 or\nPyQt4 application: source code editor with syntax highlighting and\ncode introspection/analysis features, NumPy array editor, dictionary\neditor, Python console, etc.\"\"\",\n download_url='%s/files/%s-%s.zip' % (__project_url__, NAME, __version__),\n author=\"The Spyder Project Contributors\",\n url=__project_url__,\n license='MIT',\n keywords='PyQt5 PyQt4 editor shell console widgets IDE',\n platforms=['any'],\n packages=get_packages(),\n package_data={LIBNAME: get_package_data(LIBNAME, EXTLIST),\n 'spyder_breakpoints': get_package_data('spyder_breakpoints', EXTLIST),\n 'spyder_profiler': get_package_data('spyder_profiler', EXTLIST),\n 'spyder_pylint': get_package_data('spyder_pylint', EXTLIST),\n 'spyder_io_dcm': get_package_data('spyder_io_dcm', EXTLIST),\n 'spyder_io_hdf5': get_package_data('spyder_io_hdf5', EXTLIST),\n },\n scripts=[osp.join('scripts', fname) for fname in SCRIPTS],\n data_files=get_data_files(),\n classifiers=['License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Development Status :: 5 - Production/Stable',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Software Development :: Widget Sets'],\n cmdclass=CMDCLASS)\n\n\n#==============================================================================\n# Setuptools deps\n#==============================================================================\nif any(arg == 'bdist_wheel' for arg in sys.argv):\n import setuptools # analysis:ignore\n\ninstall_requires = [\n 'rope>=0.10.5',\n 'jedi>=0.9.0',\n 'pyflakes',\n 'pygments>=2.0',\n 'qtconsole>=4.2.0',\n 'nbconvert',\n 'sphinx',\n 'pycodestyle',\n 'pylint',\n 'psutil',\n 'qtawesome>=0.4.1',\n 'qtpy>=1.1.0',\n 'pickleshare',\n 'pyzmq',\n 'chardet>=2.0.0',\n 'numpydoc',\n]\n\nextras_require = {\n 'test:python_version == \"2.7\"': ['mock'],\n 'test': ['pytest',\n 'pytest-qt',\n 'pytest-cov',\n 'pytest-xvfb',\n 'mock',\n 'flaky',\n 'pandas',\n 'scipy',\n 'sympy',\n 'pillow',\n 'matplotlib',\n 'cython'],\n}\n\nif 'setuptools' in sys.modules:\n setup_args['install_requires'] = install_requires\n setup_args['extras_require'] = extras_require\n\n setup_args['entry_points'] = {\n 'gui_scripts': [\n '{} = spyder.app.start:main'.format(\n 'spyder3' if PY3 else 'spyder')\n ]\n }\n\n setup_args.pop('scripts', None)\n\n\n#==============================================================================\n# Main setup\n#==============================================================================\nsetup(**setup_args)\n", "path": "setup.py" } ]
diff --git a/README.md b/README.md index ebdc37c1983..91636309080 100644 --- a/README.md +++ b/README.md @@ -143,7 +143,7 @@ a Python version greater than 2.7 (Python 3.2 is not supported anymore). * **Python** 2.7 or 3.3+ * **PyQt5** 5.2+ or **PyQt4** 4.6+: PyQt5 is recommended. * **qtconsole** 4.2.0+: Enhanced Python interpreter. -* **Rope** and **Jedi**: Editor code completion, calltips +* **Rope** 0.10.5+ and **Jedi** 0.9.0+: Editor code completion, calltips and go-to-definition. * **Pyflakes**: Real-time code analysis. * **Sphinx**: Rich text mode for the Help pane. diff --git a/conda.recipe/meta.yaml b/conda.recipe/meta.yaml index 1d019baf85c..4f208a914bc 100644 --- a/conda.recipe/meta.yaml +++ b/conda.recipe/meta.yaml @@ -18,10 +18,9 @@ requirements: - python.app # [osx] - pyqt >=5.6.0 # [osx] - pyqt # [not osx] - - rope 0.9.* # [py34 or py35] - - rope # [py27] + - rope >=0.10.5 - pyflakes - - jedi + - jedi >=0.9.0 - qtconsole >=4.2.0 - nbconvert - pygments >=2.0 diff --git a/doc/installation.rst b/doc/installation.rst index 1b255ddae28..788107fd528 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -160,7 +160,7 @@ The requirements to run Spyder are: * `Qtconsole <http://jupyter.org/qtconsole/stable/>`_ >=4.2.0 -- for an enhanced Python interpreter. -* `Rope <http://rope.sourceforge.net/>`_ >=0.9.4 and +* `Rope <http://rope.sourceforge.net/>`_ >=0.10.5 and `Jedi <http://jedi.jedidjah.ch/en/latest/>`_ >=0.9.0 -- for code completion, go-to-definition and calltips on the Editor. diff --git a/requirements/requirements.txt b/requirements/requirements.txt index a94aacd1b16..5d335425ef0 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -1,4 +1,4 @@ -rope>=0.9.4 +rope>=0.10.5 jedi>=0.9.0 pyflakes pygments>=2.0 diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000000..3c6e79cf31d --- /dev/null +++ b/setup.cfg @@ -0,0 +1,2 @@ +[bdist_wheel] +universal=1 diff --git a/setup.py b/setup.py index a34462ed253..c340679da93 100644 --- a/setup.py +++ b/setup.py @@ -272,7 +272,7 @@ def run(self): import setuptools # analysis:ignore install_requires = [ - 'rope_py3k' if PY3 else 'rope>=0.9.4', + 'rope>=0.10.5', 'jedi>=0.9.0', 'pyflakes', 'pygments>=2.0',
google__flax-2407
Outdated `rich` dependency version The version of `rich` is currently limited to `rich~=11.1`, causing problems with `pip` dependency resolution when installing with other packages. https://github.com/google/flax/blob/cda7a4c85bbce744e412ab82e298ddf76d4770d2/setup.py#L33 Should be a trivial fix since `flax.linen.summary` doesn't seem to need any changes, I'll open a PR.
[ { "content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"setup.py for Flax.\"\"\"\n\nimport os\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n README = open(os.path.join(here, \"README.md\"), encoding=\"utf-8\").read()\nexcept OSError:\n README = \"\"\n\ninstall_requires = [\n \"numpy>=1.12\",\n \"jax>=0.3.16\",\n \"matplotlib\", # only needed for tensorboard export\n \"msgpack\",\n \"optax\",\n \"rich~=11.1\",\n \"typing_extensions>=4.1.1\",\n \"PyYAML>=5.4.1\",\n]\n\ntests_require = [\n \"atari-py==0.2.5\", # Last version does not have the ROMs we test on pre-packaged\n \"clu\", # All examples.\n \"gym==0.18.3\",\n \"jaxlib\",\n \"jraph>=0.0.6dev0\",\n \"ml-collections\",\n \"opencv-python\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-custom_exit_code\",\n \"pytest-xdist==1.34.0\", # upgrading to 2.0 broke tests, need to investigate\n \"pytype\",\n \"sentencepiece\", # WMT example.\n \"svn\",\n \"tensorflow_text>=2.4.0\", # WMT example.\n \"tensorflow_datasets\",\n \"tensorflow\",\n \"torch\",\n]\n\n__version__ = None\n\nwith open(\"flax/version.py\") as f:\n exec(f.read(), globals())\n\nsetup(\n name=\"flax\",\n version=__version__,\n description=\"Flax: A neural network library for JAX designed for flexibility\",\n long_description=\"\\n\\n\".join([README]),\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n keywords=\"\",\n author=\"Flax team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/google/flax\",\n packages=find_packages(),\n package_data={\"flax\": [\"py.typed\"]},\n zip_safe=False,\n install_requires=install_requires,\n extras_require={\n \"testing\": tests_require,\n },\n )\n", "path": "setup.py" } ]
[ { "content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"setup.py for Flax.\"\"\"\n\nimport os\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n README = open(os.path.join(here, \"README.md\"), encoding=\"utf-8\").read()\nexcept OSError:\n README = \"\"\n\ninstall_requires = [\n \"numpy>=1.12\",\n \"jax>=0.3.16\",\n \"matplotlib\", # only needed for tensorboard export\n \"msgpack\",\n \"optax\",\n \"rich>=11.1\",\n \"typing_extensions>=4.1.1\",\n \"PyYAML>=5.4.1\",\n]\n\ntests_require = [\n \"atari-py==0.2.5\", # Last version does not have the ROMs we test on pre-packaged\n \"clu\", # All examples.\n \"gym==0.18.3\",\n \"jaxlib\",\n \"jraph>=0.0.6dev0\",\n \"ml-collections\",\n \"opencv-python\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-custom_exit_code\",\n \"pytest-xdist==1.34.0\", # upgrading to 2.0 broke tests, need to investigate\n \"pytype\",\n \"sentencepiece\", # WMT example.\n \"svn\",\n \"tensorflow_text>=2.4.0\", # WMT example.\n \"tensorflow_datasets\",\n \"tensorflow\",\n \"torch\",\n]\n\n__version__ = None\n\nwith open(\"flax/version.py\") as f:\n exec(f.read(), globals())\n\nsetup(\n name=\"flax\",\n version=__version__,\n description=\"Flax: A neural network library for JAX designed for flexibility\",\n long_description=\"\\n\\n\".join([README]),\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n keywords=\"\",\n author=\"Flax team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/google/flax\",\n packages=find_packages(),\n package_data={\"flax\": [\"py.typed\"]},\n zip_safe=False,\n install_requires=install_requires,\n extras_require={\n \"testing\": tests_require,\n },\n )\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 8e4a6e8ba..c0a508e8f 100644 --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ "matplotlib", # only needed for tensorboard export "msgpack", "optax", - "rich~=11.1", + "rich>=11.1", "typing_extensions>=4.1.1", "PyYAML>=5.4.1", ]
pyca__cryptography-1599
Update year in copyright notice for vectors Refs #1597
[ { "content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\n__all__ = [\n \"__title__\", \"__summary__\", \"__uri__\", \"__version__\", \"__author__\",\n \"__email__\", \"__license__\", \"__copyright__\",\n]\n\n__title__ = \"cryptography_vectors\"\n__summary__ = \"Test vectors for the cryptography package.\"\n\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"0.8.dev1\"\n\n__author__ = \"The cryptography developers\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2014 %s\" % __author__\n", "path": "vectors/cryptography_vectors/__about__.py" } ]
[ { "content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\n__all__ = [\n \"__title__\", \"__summary__\", \"__uri__\", \"__version__\", \"__author__\",\n \"__email__\", \"__license__\", \"__copyright__\",\n]\n\n__title__ = \"cryptography_vectors\"\n__summary__ = \"Test vectors for the cryptography package.\"\n\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"0.8.dev1\"\n\n__author__ = \"The cryptography developers\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2015 %s\" % __author__\n", "path": "vectors/cryptography_vectors/__about__.py" } ]
diff --git a/vectors/cryptography_vectors/__about__.py b/vectors/cryptography_vectors/__about__.py index aa6fce09e639..f17d7b8e2f8f 100644 --- a/vectors/cryptography_vectors/__about__.py +++ b/vectors/cryptography_vectors/__about__.py @@ -20,4 +20,4 @@ __email__ = "[email protected]" __license__ = "BSD or Apache License, Version 2.0" -__copyright__ = "Copyright 2013-2014 %s" % __author__ +__copyright__ = "Copyright 2013-2015 %s" % __author__
googleapis__google-api-python-client-871
AttributeError: module 'googleapiclient' has no attribute '__version__' When importing new version of google-api-python-client `from apiclient import discovery` i'm getting the error `AttributeError: module 'googleapiclient' has no attribute '__version__'` https://github.com/googleapis/google-api-python-client/blob/84d45619d753cb04d957651886231034194058b6/apiclient/__init__.py#L22 i guess this happens since you have removed `__version__` var from `googleapiclient/__init__.py` https://github.com/googleapis/google-api-python-client/commit/f706cfd821ab7457e5db37abfc3619772657dd0e#diff-b926d296d4c856bcbf877809e4523562L15 can you please fix? @busunkim96 @mik-laj @crwilcox Traceback: ``` from apiclient import discovery File "/usr/local/lib/python3.7/site-packages/apiclient/__init__.py", line 22, in <module> __version__ = googleapiclient.__version__ AttributeError: module 'googleapiclient' has no attribute '__version__'
[ { "content": "\"\"\"Retain apiclient as an alias for googleapiclient.\"\"\"\n\nfrom six import iteritems\n\nimport googleapiclient\n\nfrom googleapiclient import channel\nfrom googleapiclient import discovery\nfrom googleapiclient import errors\nfrom googleapiclient import http\nfrom googleapiclient import mimeparse\nfrom googleapiclient import model\n\ntry:\n from googleapiclient import sample_tools\nexcept ImportError:\n # Silently ignore, because the vast majority of consumers won't use it and\n # it has deep dependence on oauth2client, an optional dependency.\n sample_tools = None\nfrom googleapiclient import schema\n\n__version__ = googleapiclient.__version__\n\n_SUBMODULES = {\n \"channel\": channel,\n \"discovery\": discovery,\n \"errors\": errors,\n \"http\": http,\n \"mimeparse\": mimeparse,\n \"model\": model,\n \"sample_tools\": sample_tools,\n \"schema\": schema,\n}\n\nimport sys\n\nfor module_name, module in iteritems(_SUBMODULES):\n sys.modules[\"apiclient.%s\" % module_name] = module\n", "path": "apiclient/__init__.py" } ]
[ { "content": "\"\"\"Retain apiclient as an alias for googleapiclient.\"\"\"\n\nfrom six import iteritems\n\nimport googleapiclient\n\nfrom googleapiclient import channel\nfrom googleapiclient import discovery\nfrom googleapiclient import errors\nfrom googleapiclient import http\nfrom googleapiclient import mimeparse\nfrom googleapiclient import model\n\ntry:\n from googleapiclient import sample_tools\nexcept ImportError:\n # Silently ignore, because the vast majority of consumers won't use it and\n # it has deep dependence on oauth2client, an optional dependency.\n sample_tools = None\nfrom googleapiclient import schema\n\n_SUBMODULES = {\n \"channel\": channel,\n \"discovery\": discovery,\n \"errors\": errors,\n \"http\": http,\n \"mimeparse\": mimeparse,\n \"model\": model,\n \"sample_tools\": sample_tools,\n \"schema\": schema,\n}\n\nimport sys\n\nfor module_name, module in iteritems(_SUBMODULES):\n sys.modules[\"apiclient.%s\" % module_name] = module\n", "path": "apiclient/__init__.py" } ]
diff --git a/apiclient/__init__.py b/apiclient/__init__.py index 38dd24b111c..8d9c4ecb8f3 100644 --- a/apiclient/__init__.py +++ b/apiclient/__init__.py @@ -19,8 +19,6 @@ sample_tools = None from googleapiclient import schema -__version__ = googleapiclient.__version__ - _SUBMODULES = { "channel": channel, "discovery": discovery,
scikit-hep__awkward-1830
`ak.fill_none(axis=None)` does nothing ### Version of Awkward Array main ### Description and code to reproduce The `apply` function for this case does not return (or recurse)
[ { "content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport numbers\n\nimport awkward as ak\n\nnp = ak.nplikes.NumpyMetadata.instance()\n\n\ndef fill_none(array, value, axis=-1, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Data in which to replace None with a given value.\n value: Data with which to replace None.\n axis (None or int): If None, replace all None values in the array\n with the given value; if an int, The dimension at which this\n operation is applied. The outermost dimension is `0`, followed\n by `1`, etc., and negative values count backward from the\n innermost: `-1` is the innermost dimension, `-2` is the next\n level up, etc.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Replaces missing values (None) with a given `value`.\n\n For example, in the following `array`,\n\n ak.Array([[1.1, None, 2.2], [], [None, 3.3, 4.4]])\n\n The None values could be replaced with `0` by\n\n >>> ak.fill_none(array, 0)\n <Array [[1.1, 0, 2.2], [], [0, 3.3, 4.4]] type='3 * var * float64'>\n\n The replacement value doesn't strictly need the same type as the\n surrounding data. For example, the None values could also be replaced\n by a string.\n\n >>> ak.fill_none(array, \"hi\")\n <Array [[1.1, 'hi', 2.2], ... ['hi', 3.3, 4.4]] type='3 * var * union[float64, s...'>\n\n The list content now has a union type:\n\n >>> ak.type(ak.fill_none(array, \"hi\"))\n 3 * var * union[float64, string]\n\n The values could be floating-point numbers or strings.\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.fill_none\",\n dict(\n array=array, value=value, axis=axis, highlevel=highlevel, behavior=behavior\n ),\n ):\n return _impl(array, value, axis, highlevel, behavior)\n\n\ndef _impl(array, value, axis, highlevel, behavior):\n arraylayout = ak.operations.to_layout(array, allow_record=True, allow_other=False)\n nplike = ak.nplikes.nplike_of(arraylayout)\n\n # Convert value type to appropriate layout\n if (\n isinstance(value, np.ndarray)\n and issubclass(value.dtype.type, (np.bool_, np.number))\n and len(value.shape) != 0\n ):\n valuelayout = ak.operations.to_layout(\n nplike.asarray(value)[np.newaxis], allow_record=False, allow_other=False\n )\n elif isinstance(value, (bool, numbers.Number, np.bool_, np.number)) or (\n isinstance(value, np.ndarray)\n and issubclass(value.dtype.type, (np.bool_, np.number))\n ):\n valuelayout = ak.operations.to_layout(\n nplike.asarray(value), allow_record=False, allow_other=False\n )\n elif (\n ak._util.is_sized_iterable(value)\n and not (isinstance(value, (str, bytes)))\n or isinstance(value, (ak.highlevel.Record, ak.record.Record))\n ):\n valuelayout = ak.operations.to_layout(\n value, allow_record=True, allow_other=False\n )\n if isinstance(valuelayout, ak.record.Record):\n valuelayout = valuelayout.array[valuelayout.at : valuelayout.at + 1]\n elif len(valuelayout) == 0:\n offsets = ak.index.Index64(\n nplike.array([0, 0], dtype=np.int64), nplike=nplike\n )\n valuelayout = ak.contents.ListOffsetArray(offsets, valuelayout)\n else:\n valuelayout = ak.contents.RegularArray(valuelayout, len(valuelayout), 1)\n else:\n valuelayout = ak.operations.to_layout(\n [value], allow_record=False, allow_other=False\n )\n\n def maybe_fillna(layout):\n if layout.is_OptionType:\n return layout.fill_none(valuelayout)\n else:\n return layout\n\n if axis is None:\n\n def action(layout, depth, depth_context, **kwargs):\n layout = maybe_fillna(layout)\n\n else:\n\n def action(layout, depth, depth_context, **kwargs):\n posaxis = layout.axis_wrap_if_negative(depth_context[\"posaxis\"])\n depth_context[\"posaxis\"] = posaxis\n if posaxis + 1 < depth:\n return layout\n elif posaxis + 1 == depth:\n return maybe_fillna(layout)\n\n depth_context = {\"posaxis\": axis}\n out = arraylayout.recursively_apply(action, behavior, depth_context=depth_context)\n\n return ak._util.wrap(out, ak._util.behavior_of(array, behavior=behavior), highlevel)\n", "path": "src/awkward/operations/ak_fill_none.py" } ]
[ { "content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport numbers\n\nimport awkward as ak\n\nnp = ak.nplikes.NumpyMetadata.instance()\n\n\ndef fill_none(array, value, axis=-1, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Data in which to replace None with a given value.\n value: Data with which to replace None.\n axis (None or int): If None, replace all None values in the array\n with the given value; if an int, The dimension at which this\n operation is applied. The outermost dimension is `0`, followed\n by `1`, etc., and negative values count backward from the\n innermost: `-1` is the innermost dimension, `-2` is the next\n level up, etc.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Replaces missing values (None) with a given `value`.\n\n For example, in the following `array`,\n\n ak.Array([[1.1, None, 2.2], [], [None, 3.3, 4.4]])\n\n The None values could be replaced with `0` by\n\n >>> ak.fill_none(array, 0)\n <Array [[1.1, 0, 2.2], [], [0, 3.3, 4.4]] type='3 * var * float64'>\n\n The replacement value doesn't strictly need the same type as the\n surrounding data. For example, the None values could also be replaced\n by a string.\n\n >>> ak.fill_none(array, \"hi\")\n <Array [[1.1, 'hi', 2.2], ... ['hi', 3.3, 4.4]] type='3 * var * union[float64, s...'>\n\n The list content now has a union type:\n\n >>> ak.type(ak.fill_none(array, \"hi\"))\n 3 * var * union[float64, string]\n\n The values could be floating-point numbers or strings.\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.fill_none\",\n dict(\n array=array, value=value, axis=axis, highlevel=highlevel, behavior=behavior\n ),\n ):\n return _impl(array, value, axis, highlevel, behavior)\n\n\ndef _impl(array, value, axis, highlevel, behavior):\n arraylayout = ak.operations.to_layout(array, allow_record=True, allow_other=False)\n nplike = ak.nplikes.nplike_of(arraylayout)\n\n # Convert value type to appropriate layout\n if (\n isinstance(value, np.ndarray)\n and issubclass(value.dtype.type, (np.bool_, np.number))\n and len(value.shape) != 0\n ):\n valuelayout = ak.operations.to_layout(\n nplike.asarray(value)[np.newaxis], allow_record=False, allow_other=False\n )\n elif isinstance(value, (bool, numbers.Number, np.bool_, np.number)) or (\n isinstance(value, np.ndarray)\n and issubclass(value.dtype.type, (np.bool_, np.number))\n ):\n valuelayout = ak.operations.to_layout(\n nplike.asarray(value), allow_record=False, allow_other=False\n )\n elif (\n ak._util.is_sized_iterable(value)\n and not (isinstance(value, (str, bytes)))\n or isinstance(value, (ak.highlevel.Record, ak.record.Record))\n ):\n valuelayout = ak.operations.to_layout(\n value, allow_record=True, allow_other=False\n )\n if isinstance(valuelayout, ak.record.Record):\n valuelayout = valuelayout.array[valuelayout.at : valuelayout.at + 1]\n elif len(valuelayout) == 0:\n offsets = ak.index.Index64(\n nplike.array([0, 0], dtype=np.int64), nplike=nplike\n )\n valuelayout = ak.contents.ListOffsetArray(offsets, valuelayout)\n else:\n valuelayout = ak.contents.RegularArray(valuelayout, len(valuelayout), 1)\n else:\n valuelayout = ak.operations.to_layout(\n [value], allow_record=False, allow_other=False\n )\n\n def maybe_fillna(layout):\n if layout.is_OptionType:\n return layout.fill_none(valuelayout)\n else:\n return layout\n\n if axis is None:\n\n def action(layout, continuation, **kwargs):\n return maybe_fillna(continuation())\n\n else:\n\n def action(layout, depth, depth_context, **kwargs):\n posaxis = layout.axis_wrap_if_negative(depth_context[\"posaxis\"])\n depth_context[\"posaxis\"] = posaxis\n if posaxis + 1 < depth:\n return layout\n elif posaxis + 1 == depth:\n return maybe_fillna(layout)\n\n depth_context = {\"posaxis\": axis}\n out = arraylayout.recursively_apply(action, behavior, depth_context=depth_context)\n\n return ak._util.wrap(out, ak._util.behavior_of(array, behavior=behavior), highlevel)\n", "path": "src/awkward/operations/ak_fill_none.py" } ]
diff --git a/src/awkward/operations/ak_fill_none.py b/src/awkward/operations/ak_fill_none.py index 0e9c90d745..8b6e7ce3c8 100644 --- a/src/awkward/operations/ak_fill_none.py +++ b/src/awkward/operations/ak_fill_none.py @@ -107,8 +107,8 @@ def maybe_fillna(layout): if axis is None: - def action(layout, depth, depth_context, **kwargs): - layout = maybe_fillna(layout) + def action(layout, continuation, **kwargs): + return maybe_fillna(continuation()) else: diff --git a/tests/test_1823-fill-none-axis-none.py b/tests/test_1823-fill-none-axis-none.py new file mode 100644 index 0000000000..82ca883b57 --- /dev/null +++ b/tests/test_1823-fill-none-axis-none.py @@ -0,0 +1,15 @@ +# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE + +import numpy as np # noqa: F401 +import pytest # noqa: F401 + +import awkward as ak # noqa: F401 + + +def test(): + array = ak.Array([None, [1, 2, 3, [None, {"x": [None, 2], "y": [1, 4]}]]]) + + assert ak.fill_none(array, -1.0, axis=None).to_list() == [ + -1.0, + [1, 2, 3, [-1.0, {"x": [-1.0, 2], "y": [1, 4]}]], + ]
ibis-project__ibis-3710
bug: repr for interval literals doesn't show unit information `interval` literal values don't show any information about their units, so all values show up looking the same: ``` In [4]: import ibis In [5]: ibis.interval(1, unit="s") Out[5]: 1 In [6]: ibis.interval(1, unit="h") Out[6]: 1 ```
[ { "content": "from __future__ import annotations\n\nimport collections\nimport functools\nimport textwrap\nimport types\nfrom typing import Any, Callable, Deque, Iterable, Mapping, Tuple\n\nimport ibis\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nimport ibis.expr.schema as sch\nimport ibis.expr.types as ir\nimport ibis.expr.window as win\nimport ibis.util as util\n\nAliases = Mapping[ops.TableNode, int]\nDeps = Deque[Tuple[int, ops.TableNode]]\n\n\nclass Alias:\n __slots__ = (\"value\",)\n\n def __init__(self, value: int) -> None:\n self.value = value\n\n def __str__(self) -> str:\n return f\"r{self.value}\"\n\n\ndef fmt(expr: ir.Expr) -> str:\n \"\"\"Format `expr`.\n\n Main entry point for the `Expr.__repr__` implementation.\n\n Returns\n -------\n str\n Formatted expression\n \"\"\"\n *deps, root = util.toposort(util.to_op_dag(expr))\n deps = collections.deque(\n (Alias(alias), dep)\n for alias, dep in enumerate(\n dep for dep in deps if isinstance(dep, ops.TableNode)\n )\n )\n\n aliases = {dep: alias for alias, dep in deps}\n pieces = []\n\n while deps:\n alias, node = deps.popleft()\n formatted = fmt_table_op(node, aliases=aliases, deps=deps)\n pieces.append(f\"{alias} := {formatted}\")\n\n pieces.append(\n fmt_root(root, name=expr._safe_name, aliases=aliases, deps=deps)\n )\n depth = ibis.options.repr.depth or 0\n if depth and depth < len(pieces):\n return fmt_truncated(pieces, depth=depth)\n return \"\\n\\n\".join(pieces)\n\n\ndef fmt_truncated(\n pieces: Iterable[str],\n *,\n depth: int,\n sep: str = \"\\n\\n\",\n ellipsis: str = util.VERTICAL_ELLIPSIS,\n) -> str:\n if depth == 1:\n return pieces[-1]\n\n first_n = depth // 2\n last_m = depth - first_n\n return sep.join([*pieces[:first_n], ellipsis, *pieces[-last_m:]])\n\n\ndef selection_maxlen(expressions: Iterable[ir.ValueExpr]) -> int:\n \"\"\"Compute the length of the longest name of input expressions.\n\n Parameters\n ----------\n expressions\n Expressions whose name to compute the maximum length of\n\n Returns\n -------\n int\n Max length\n \"\"\"\n try:\n return max(\n len(name)\n for expr in expressions\n if (name := expr._safe_name) is not None\n )\n except ValueError:\n return 0\n\n\[email protected]\ndef fmt_root(op: ops.Node, *, aliases: Aliases, **_: Any) -> str:\n \"\"\"Fallback formatting implementation.\"\"\"\n raw_parts = fmt_fields(\n op,\n dict.fromkeys(op.argnames, fmt_value),\n aliases=aliases,\n )\n return f\"{op.__class__.__name__}\\n{raw_parts}\"\n\n\n@fmt_root.register\ndef _fmt_root_table_node(op: ops.TableNode, **kwargs: Any) -> str:\n return fmt_table_op(op, **kwargs)\n\n\n@fmt_root.register\ndef _fmt_root_value_op(\n op: ops.ValueOp, *, name: str, aliases: Aliases, **_: Any\n) -> str:\n value = fmt_value(op, aliases=aliases)\n prefix = f\"{name}: \" if name is not None else \"\"\n return f\"{prefix}{value}{type_info(op.to_expr().type())}\"\n\n\n@fmt_root.register\ndef _fmt_foot_sort_key(op: ops.SortKey, *, aliases: Aliases, **_: Any) -> str:\n return fmt_value(op, aliases=aliases)\n\n\[email protected]\ndef fmt_table_op(op: ops.TableNode, **_: Any) -> str:\n assert False, f\"`fmt_table_op` not implemented for operation: {type(op)}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_physical_table(op: ops.PhysicalTable, **_: Any) -> str:\n top = f\"{op.__class__.__name__}: {op.name}\"\n formatted_schema = fmt_schema(op.schema)\n return f\"{top}\\n{formatted_schema}\"\n\n\ndef fmt_schema(schema: sch.Schema) -> str:\n \"\"\"Format `schema`.\n\n Parameters\n ----------\n schema\n Ibis schema to format\n\n Returns\n -------\n str\n Formatted schema\n \"\"\"\n names = schema.names\n maxlen = max(map(len, names))\n cols = [f\"{name:<{maxlen}} {typ}\" for name, typ in schema.items()]\n depth = ibis.options.repr.table_columns\n if depth is not None and depth < len(cols):\n first_column_name = names[0]\n raw = fmt_truncated(\n cols,\n depth=depth,\n sep=\"\\n\",\n ellipsis=util.VERTICAL_ELLIPSIS.center(len(first_column_name)),\n )\n else:\n raw = \"\\n\".join(cols)\n\n return util.indent(raw, spaces=2)\n\n\n@fmt_table_op.register\ndef _fmt_table_op_sql_query_result(op: ops.SQLQueryResult, **_: Any) -> str:\n short_query = textwrap.shorten(\n op.query,\n ibis.options.repr.query_text_length,\n placeholder=f\" {util.HORIZONTAL_ELLIPSIS}\",\n )\n query = f\"query: {short_query!r}\"\n top = op.__class__.__name__\n formatted_schema = fmt_schema(op.schema)\n schema_field = util.indent(f\"schema:\\n{formatted_schema}\", spaces=2)\n return f\"{top}\\n{util.indent(query, spaces=2)}\\n{schema_field}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_view(op: ops.View, *, aliases: Aliases, **_: Any) -> str:\n top = op.__class__.__name__\n formatted_schema = fmt_schema(op.schema)\n schema_field = util.indent(f\"schema:\\n{formatted_schema}\", spaces=2)\n return f\"{top}[{aliases[op.child.op()]}]: {op.name}\\n{schema_field}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_sql_view(\n op: ops.SQLStringView,\n *,\n aliases: Aliases,\n **_: Any,\n) -> str:\n short_query = textwrap.shorten(\n op.query,\n ibis.options.repr.query_text_length,\n placeholder=f\" {util.HORIZONTAL_ELLIPSIS}\",\n )\n query = f\"query: {short_query!r}\"\n top = op.__class__.__name__\n formatted_schema = fmt_schema(op.schema)\n schema_field = util.indent(f\"schema:\\n{formatted_schema}\", spaces=2)\n components = [\n f\"{top}[{aliases[op.child.op()]}]: {op.name}\",\n util.indent(query, spaces=2),\n schema_field,\n ]\n return \"\\n\".join(components)\n\n\[email protected]\ndef fmt_join(op: ops.Join, *, aliases: Aliases) -> tuple[str, str]:\n assert False, f\"join type {type(op)} not implemented\"\n\n\n@fmt_join.register(ops.Join)\ndef _fmt_join(op: ops.Join, *, aliases: Aliases) -> tuple[str, str]:\n # format the operator and its relation inputs\n left = aliases[op.left.op()]\n right = aliases[op.right.op()]\n top = f\"{op.__class__.__name__}[{left}, {right}]\"\n\n # format the join predicates\n # if only one, put it directly after the join on thes same line\n # if more than one put each on a separate line\n preds = op.predicates\n formatted_preds = [fmt_value(pred, aliases=aliases) for pred in preds]\n has_one_pred = len(preds) == 1\n sep = \" \" if has_one_pred else \"\\n\"\n joined_predicates = util.indent(\n \"\\n\".join(formatted_preds),\n spaces=2 * (not has_one_pred),\n )\n trailing_sep = \"\\n\" + \"\\n\" * (not has_one_pred)\n return f\"{top}{sep}{joined_predicates}\", trailing_sep\n\n\n@fmt_join.register(ops.AsOfJoin)\ndef _fmt_asof_join(op: ops.AsOfJoin, *, aliases: Aliases) -> tuple[str, str]:\n left = aliases[op.left.op()]\n right = aliases[op.right.op()]\n top = f\"{op.__class__.__name__}[{left}, {right}]\"\n raw_parts = fmt_fields(\n op,\n dict(predicates=fmt_value, by=fmt_value, tolerance=fmt_value),\n aliases=aliases,\n )\n return f\"{top}\\n{raw_parts}\", \"\\n\\n\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_join(\n op: ops.Join,\n *,\n aliases: Aliases,\n deps: Deps,\n **_: Any,\n) -> str:\n # first, format the current join operation\n result, join_sep = fmt_join(op, aliases=aliases)\n formatted_joins = [result, join_sep]\n\n # process until the first non-Join dependency is popped in other words\n # process all runs of joins\n alias, current = None, None\n if deps:\n alias, current = deps.popleft()\n\n while isinstance(current, ops.Join):\n # copy the alias so that mutations to the value aren't shared\n # format the `current` join\n formatted_join, join_sep = fmt_join(current, aliases=aliases)\n formatted_joins.append(f\"{alias} := {formatted_join}\")\n formatted_joins.append(join_sep)\n\n if not deps:\n break\n\n alias, current = deps.popleft()\n\n if current is not None and not isinstance(current, ops.Join):\n # the last node popped from `deps` isn't a join which means we\n # still need to process it, so we put it at the front of the queue\n deps.appendleft((alias, current))\n\n # we don't want the last trailing separator so remove it from the end\n formatted_joins.pop()\n return \"\".join(formatted_joins)\n\n\n@fmt_table_op.register\ndef _(op: ops.CrossJoin, *, aliases: Aliases, **_: Any) -> str:\n left = aliases[op.left.op()]\n right = aliases[op.right.op()]\n return f\"{op.__class__.__name__}[{left}, {right}]\"\n\n\ndef _fmt_set_op(\n op: ops.SetOp,\n *,\n aliases: Aliases,\n distinct: bool | None = None,\n) -> str:\n args = [str(aliases[op.left.op()]), str(aliases[op.right.op()])]\n if distinct is not None:\n args.append(f\"distinct={distinct}\")\n return f\"{op.__class__.__name__}[{', '.join(args)}]\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_set_op(op: ops.SetOp, *, aliases: Aliases, **_: Any) -> str:\n return _fmt_set_op(op, aliases=aliases)\n\n\n@fmt_table_op.register\ndef _fmt_table_op_union(op: ops.Union, *, aliases: Aliases, **_: Any) -> str:\n return _fmt_set_op(op, aliases=aliases, distinct=op.distinct)\n\n\n@fmt_table_op.register(ops.SelfReference)\n@fmt_table_op.register(ops.Distinct)\ndef _fmt_table_op_self_reference_distinct(\n op: ops.Distinct | ops.SelfReference,\n *,\n aliases: Aliases,\n **_: Any,\n) -> str:\n return f\"{op.__class__.__name__}[{aliases[op.table.op()]}]\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_fillna(op: ops.FillNa, *, aliases: Aliases, **_: Any) -> str:\n top = f\"{op.__class__.__name__}[{aliases[op.table.op()]}]\"\n raw_parts = fmt_fields(op, dict(replacements=fmt_value), aliases=aliases)\n return f\"{top}\\n{raw_parts}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_dropna(op: ops.DropNa, *, aliases: Aliases, **_: Any) -> str:\n top = f\"{op.__class__.__name__}[{aliases[op.table.op()]}]\"\n how = f\"how: {op.how!r}\"\n raw_parts = fmt_fields(op, dict(subset=fmt_value), aliases=aliases)\n return f\"{top}\\n{util.indent(how, spaces=2)}\\n{raw_parts}\"\n\n\ndef fmt_fields(\n op: ops.TableNode,\n fields: Mapping[str, Callable[[Any, Aliases], str]],\n *,\n aliases: Aliases,\n) -> str:\n parts = []\n\n for field, formatter in fields.items():\n if exprs := [\n expr\n for expr in util.promote_list(getattr(op, field))\n if expr is not None\n ]:\n field_fmt = [formatter(expr, aliases=aliases) for expr in exprs]\n\n parts.append(f\"{field}:\")\n parts.append(util.indent(\"\\n\".join(field_fmt), spaces=2))\n\n return util.indent(\"\\n\".join(parts), spaces=2)\n\n\n@fmt_table_op.register\ndef _fmt_table_op_selection(\n op: ops.Selection, *, aliases: Aliases, **_: Any\n) -> str:\n top = f\"{op.__class__.__name__}[{aliases[op.table.op()]}]\"\n raw_parts = fmt_fields(\n op,\n dict(\n selections=functools.partial(\n fmt_selection_column,\n maxlen=selection_maxlen(op.selections),\n ),\n predicates=fmt_value,\n sort_keys=fmt_value,\n ),\n aliases=aliases,\n )\n return f\"{top}\\n{raw_parts}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_aggregation(\n op: ops.Aggregation, *, aliases: Aliases, **_: Any\n) -> str:\n top = f\"{op.__class__.__name__}[{aliases[op.table.op()]}]\"\n raw_parts = fmt_fields(\n op,\n dict(\n metrics=functools.partial(\n fmt_selection_column,\n maxlen=selection_maxlen(op.metrics),\n ),\n by=functools.partial(\n fmt_selection_column,\n maxlen=selection_maxlen(op.by),\n ),\n having=fmt_value,\n predicates=fmt_value,\n sort_keys=fmt_value,\n ),\n aliases=aliases,\n )\n return f\"{top}\\n{raw_parts}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_limit(op: ops.Limit, *, aliases: Aliases, **_: Any) -> str:\n params = [str(aliases[op.table.op()]), f\"n={op.n:d}\"]\n if offset := op.offset:\n params.append(f\"offset={offset:d}\")\n return f\"{op.__class__.__name__}[{', '.join(params)}]\"\n\n\[email protected]\ndef fmt_selection_column(value_expr: ir.ValueExpr, **_: Any) -> str:\n assert False, (\n \"expression type not implemented for \"\n f\"fmt_selection_column: {type(value_expr)}\"\n )\n\n\ndef type_info(datatype: dt.DataType) -> str:\n \"\"\"Format `datatype` for display next to a column.\"\"\"\n return f\" # {datatype}\" if ibis.options.repr.show_types else \"\"\n\n\n@fmt_selection_column.register\ndef _fmt_selection_column_value_expr(\n expr: ir.ValueExpr, *, aliases: Aliases, maxlen: int = 0\n) -> str:\n raw_name = expr._safe_name\n assert raw_name is not None, (\n \"`_safe_name` property should never be None when formatting a \"\n \"selection column expression\"\n )\n name = f\"{raw_name}:\"\n # the additional 1 is for the colon\n aligned_name = f\"{name:<{maxlen + 1}}\"\n value = fmt_value(expr, aliases=aliases)\n return f\"{aligned_name} {value}{type_info(expr.type())}\"\n\n\n@fmt_selection_column.register\ndef _fmt_selection_column_table_expr(\n expr: ir.TableExpr, *, aliases: Aliases, **_: Any\n) -> str:\n return str(aliases[expr.op()])\n\n\n_BIN_OP_CHARS = {\n # comparison operations\n ops.Equals: \"==\",\n ops.NotEquals: \"!=\",\n ops.Less: \"<\",\n ops.LessEqual: \"<=\",\n ops.Greater: \">\",\n ops.GreaterEqual: \">=\",\n # binary operations\n ops.Add: \"+\",\n ops.TimeAdd: \"+\",\n ops.Subtract: \"-\",\n ops.Multiply: \"*\",\n ops.Divide: \"/\",\n ops.FloorDivide: \"//\",\n ops.Modulus: \"%\",\n ops.Power: \"**\",\n ops.And: \"&\",\n ops.Or: \"|\",\n ops.Xor: \"^\",\n}\n\n\[email protected]\ndef fmt_value(obj, **_: Any) -> str:\n \"\"\"Format a value expression or operation.\n\n [`repr`][repr] the object if we don't have a specific formatting rule.\n \"\"\"\n return repr(obj)\n\n\n@fmt_value.register\ndef _fmt_value_function_type(func: types.FunctionType, **_: Any) -> str:\n return func.__name__\n\n\n@fmt_value.register\ndef _fmt_value_expr(expr: ir.Expr, *, aliases: Aliases) -> str:\n \"\"\"Format a value expression.\n\n Forwards the call on to the specific operation dispatch rule.\n \"\"\"\n return fmt_value(expr.op(), aliases=aliases)\n\n\n@fmt_value.register\ndef _fmt_value_node(op: ops.Node, **_: Any) -> str:\n assert False, f\"`fmt_value` not implemented for operation: {type(op)}\"\n\n\n@fmt_value.register\ndef _fmt_value_binary_op(op: ops.BinaryOp, *, aliases: Aliases) -> str:\n left = fmt_value(op.left, aliases=aliases)\n right = fmt_value(op.right, aliases=aliases)\n op_char = _BIN_OP_CHARS[type(op)]\n return f\"{left} {op_char} {right}\"\n\n\n@fmt_value.register\ndef _fmt_value_negate(op: ops.Negate, *, aliases: Aliases) -> str:\n op_name = \"Not\" if isinstance(op.arg.type(), dt.Boolean) else \"Negate\"\n operand = fmt_value(op.arg, aliases=aliases)\n return f\"{op_name}({operand})\"\n\n\n@fmt_value.register\ndef _fmt_value_literal(op: ops.Literal, **_: Any) -> str:\n return repr(op.value)\n\n\n@fmt_value.register\ndef _fmt_value_datatype(datatype: dt.DataType, **_: Any) -> str:\n return str(datatype)\n\n\n@fmt_value.register\ndef _fmt_value_value_op(op: ops.ValueOp, *, aliases: Aliases) -> str:\n args = []\n # loop over argument names and original expression\n for argname, orig_expr in zip(op.argnames, op.args):\n # promote argument to a list, so that we don't accidentially repr\n # entire subtrees when all we want is the formatted argument value\n if exprs := [\n expr for expr in util.promote_list(orig_expr) if expr is not None\n ]:\n # format the individual argument values\n formatted_args = \", \".join(\n fmt_value(expr, aliases=aliases) for expr in exprs\n )\n # if the original argument was a non-string iterable, display it as\n # a list\n value = (\n f\"[{formatted_args}]\"\n if util.is_iterable(orig_expr)\n else formatted_args\n )\n # `arg` and `expr` are noisy, so we ignore printing them as a\n # special case\n if argname not in (\"arg\", \"expr\"):\n formatted = f\"{argname}={value}\"\n else:\n formatted = value\n args.append(formatted)\n\n return f\"{op.__class__.__name__}({', '.join(args)})\"\n\n\n@fmt_value.register\ndef _fmt_value_table_column(op: ops.TableColumn, *, aliases: Aliases) -> str:\n return f\"{aliases[op.table.op()]}.{op.name}\"\n\n\n@fmt_value.register\ndef _fmt_value_scalar_parameter(op: ops.ScalarParameter, **_: Any) -> str:\n return f\"$({op.dtype})\"\n\n\n@fmt_value.register\ndef _fmt_value_sort_key(op: ops.SortKey, *, aliases: Aliases) -> str:\n expr = fmt_value(op.expr, aliases=aliases)\n sort_direction = \" asc\" if op.ascending else \"desc\"\n return f\"{sort_direction}|{expr}\"\n\n\n@fmt_value.register\ndef _fmt_value_physical_table(op: ops.PhysicalTable, **_: Any) -> str:\n \"\"\"Format a table as value.\n\n This function is called when a table is used in a value expression. An\n example is `table.count()`.\n \"\"\"\n return op.name\n\n\n@fmt_value.register\ndef _fmt_value_table_node(\n op: ops.TableNode, *, aliases: Aliases, **_: Any\n) -> str:\n \"\"\"Format a table as value.\n\n This function is called when a table is used in a value expression. An\n example is `table.count()`.\n \"\"\"\n return f\"{aliases[op.table.op()]}\"\n\n\n@fmt_value.register\ndef _fmt_value_string_sql_like(\n op: ops.StringSQLLike, *, aliases: Aliases\n) -> str:\n expr = fmt_value(op.arg, aliases=aliases)\n pattern = fmt_value(op.pattern, aliases=aliases)\n prefix = \"I\" * isinstance(op, ops.StringSQLILike)\n return f\"{expr} {prefix}LIKE {pattern}\"\n\n\n@fmt_value.register\ndef _fmt_value_window(win: win.Window, *, aliases: Aliases) -> str:\n args = []\n for field, value in (\n (\"_group_by\", win._group_by),\n (\"_order_by\", win._order_by),\n (\"preceding\", win.preceding),\n (\"following\", win.following),\n (\"max_lookback\", win.max_lookback),\n (\"how\", win.how),\n ):\n disp_field = field.lstrip(\"_\")\n if value is not None:\n if isinstance(value, tuple):\n # don't show empty sequences\n if not value:\n continue\n elements = \", \".join(\n fmt_value(val, aliases=aliases) for val in value\n )\n formatted = f\"[{elements}]\"\n else:\n formatted = fmt_value(value, aliases=aliases)\n args.append(f\"{disp_field}={formatted}\")\n return f\"{win.__class__.__name__}({', '.join(args)})\"\n", "path": "ibis/expr/format.py" } ]
[ { "content": "from __future__ import annotations\n\nimport collections\nimport functools\nimport textwrap\nimport types\nfrom typing import Any, Callable, Deque, Iterable, Mapping, Tuple\n\nimport ibis\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nimport ibis.expr.schema as sch\nimport ibis.expr.types as ir\nimport ibis.expr.window as win\nimport ibis.util as util\n\nAliases = Mapping[ops.TableNode, int]\nDeps = Deque[Tuple[int, ops.TableNode]]\n\n\nclass Alias:\n __slots__ = (\"value\",)\n\n def __init__(self, value: int) -> None:\n self.value = value\n\n def __str__(self) -> str:\n return f\"r{self.value}\"\n\n\ndef fmt(expr: ir.Expr) -> str:\n \"\"\"Format `expr`.\n\n Main entry point for the `Expr.__repr__` implementation.\n\n Returns\n -------\n str\n Formatted expression\n \"\"\"\n *deps, root = util.toposort(util.to_op_dag(expr))\n deps = collections.deque(\n (Alias(alias), dep)\n for alias, dep in enumerate(\n dep for dep in deps if isinstance(dep, ops.TableNode)\n )\n )\n\n aliases = {dep: alias for alias, dep in deps}\n pieces = []\n\n while deps:\n alias, node = deps.popleft()\n formatted = fmt_table_op(node, aliases=aliases, deps=deps)\n pieces.append(f\"{alias} := {formatted}\")\n\n pieces.append(\n fmt_root(root, name=expr._safe_name, aliases=aliases, deps=deps)\n )\n depth = ibis.options.repr.depth or 0\n if depth and depth < len(pieces):\n return fmt_truncated(pieces, depth=depth)\n return \"\\n\\n\".join(pieces)\n\n\ndef fmt_truncated(\n pieces: Iterable[str],\n *,\n depth: int,\n sep: str = \"\\n\\n\",\n ellipsis: str = util.VERTICAL_ELLIPSIS,\n) -> str:\n if depth == 1:\n return pieces[-1]\n\n first_n = depth // 2\n last_m = depth - first_n\n return sep.join([*pieces[:first_n], ellipsis, *pieces[-last_m:]])\n\n\ndef selection_maxlen(expressions: Iterable[ir.ValueExpr]) -> int:\n \"\"\"Compute the length of the longest name of input expressions.\n\n Parameters\n ----------\n expressions\n Expressions whose name to compute the maximum length of\n\n Returns\n -------\n int\n Max length\n \"\"\"\n try:\n return max(\n len(name)\n for expr in expressions\n if (name := expr._safe_name) is not None\n )\n except ValueError:\n return 0\n\n\[email protected]\ndef fmt_root(op: ops.Node, *, aliases: Aliases, **_: Any) -> str:\n \"\"\"Fallback formatting implementation.\"\"\"\n raw_parts = fmt_fields(\n op,\n dict.fromkeys(op.argnames, fmt_value),\n aliases=aliases,\n )\n return f\"{op.__class__.__name__}\\n{raw_parts}\"\n\n\n@fmt_root.register\ndef _fmt_root_table_node(op: ops.TableNode, **kwargs: Any) -> str:\n return fmt_table_op(op, **kwargs)\n\n\n@fmt_root.register\ndef _fmt_root_value_op(\n op: ops.ValueOp, *, name: str, aliases: Aliases, **_: Any\n) -> str:\n value = fmt_value(op, aliases=aliases)\n prefix = f\"{name}: \" if name is not None else \"\"\n return f\"{prefix}{value}{type_info(op.to_expr().type())}\"\n\n\n@fmt_root.register\ndef _fmt_foot_sort_key(op: ops.SortKey, *, aliases: Aliases, **_: Any) -> str:\n return fmt_value(op, aliases=aliases)\n\n\[email protected]\ndef fmt_table_op(op: ops.TableNode, **_: Any) -> str:\n assert False, f\"`fmt_table_op` not implemented for operation: {type(op)}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_physical_table(op: ops.PhysicalTable, **_: Any) -> str:\n top = f\"{op.__class__.__name__}: {op.name}\"\n formatted_schema = fmt_schema(op.schema)\n return f\"{top}\\n{formatted_schema}\"\n\n\ndef fmt_schema(schema: sch.Schema) -> str:\n \"\"\"Format `schema`.\n\n Parameters\n ----------\n schema\n Ibis schema to format\n\n Returns\n -------\n str\n Formatted schema\n \"\"\"\n names = schema.names\n maxlen = max(map(len, names))\n cols = [f\"{name:<{maxlen}} {typ}\" for name, typ in schema.items()]\n depth = ibis.options.repr.table_columns\n if depth is not None and depth < len(cols):\n first_column_name = names[0]\n raw = fmt_truncated(\n cols,\n depth=depth,\n sep=\"\\n\",\n ellipsis=util.VERTICAL_ELLIPSIS.center(len(first_column_name)),\n )\n else:\n raw = \"\\n\".join(cols)\n\n return util.indent(raw, spaces=2)\n\n\n@fmt_table_op.register\ndef _fmt_table_op_sql_query_result(op: ops.SQLQueryResult, **_: Any) -> str:\n short_query = textwrap.shorten(\n op.query,\n ibis.options.repr.query_text_length,\n placeholder=f\" {util.HORIZONTAL_ELLIPSIS}\",\n )\n query = f\"query: {short_query!r}\"\n top = op.__class__.__name__\n formatted_schema = fmt_schema(op.schema)\n schema_field = util.indent(f\"schema:\\n{formatted_schema}\", spaces=2)\n return f\"{top}\\n{util.indent(query, spaces=2)}\\n{schema_field}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_view(op: ops.View, *, aliases: Aliases, **_: Any) -> str:\n top = op.__class__.__name__\n formatted_schema = fmt_schema(op.schema)\n schema_field = util.indent(f\"schema:\\n{formatted_schema}\", spaces=2)\n return f\"{top}[{aliases[op.child.op()]}]: {op.name}\\n{schema_field}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_sql_view(\n op: ops.SQLStringView,\n *,\n aliases: Aliases,\n **_: Any,\n) -> str:\n short_query = textwrap.shorten(\n op.query,\n ibis.options.repr.query_text_length,\n placeholder=f\" {util.HORIZONTAL_ELLIPSIS}\",\n )\n query = f\"query: {short_query!r}\"\n top = op.__class__.__name__\n formatted_schema = fmt_schema(op.schema)\n schema_field = util.indent(f\"schema:\\n{formatted_schema}\", spaces=2)\n components = [\n f\"{top}[{aliases[op.child.op()]}]: {op.name}\",\n util.indent(query, spaces=2),\n schema_field,\n ]\n return \"\\n\".join(components)\n\n\[email protected]\ndef fmt_join(op: ops.Join, *, aliases: Aliases) -> tuple[str, str]:\n assert False, f\"join type {type(op)} not implemented\"\n\n\n@fmt_join.register(ops.Join)\ndef _fmt_join(op: ops.Join, *, aliases: Aliases) -> tuple[str, str]:\n # format the operator and its relation inputs\n left = aliases[op.left.op()]\n right = aliases[op.right.op()]\n top = f\"{op.__class__.__name__}[{left}, {right}]\"\n\n # format the join predicates\n # if only one, put it directly after the join on thes same line\n # if more than one put each on a separate line\n preds = op.predicates\n formatted_preds = [fmt_value(pred, aliases=aliases) for pred in preds]\n has_one_pred = len(preds) == 1\n sep = \" \" if has_one_pred else \"\\n\"\n joined_predicates = util.indent(\n \"\\n\".join(formatted_preds),\n spaces=2 * (not has_one_pred),\n )\n trailing_sep = \"\\n\" + \"\\n\" * (not has_one_pred)\n return f\"{top}{sep}{joined_predicates}\", trailing_sep\n\n\n@fmt_join.register(ops.AsOfJoin)\ndef _fmt_asof_join(op: ops.AsOfJoin, *, aliases: Aliases) -> tuple[str, str]:\n left = aliases[op.left.op()]\n right = aliases[op.right.op()]\n top = f\"{op.__class__.__name__}[{left}, {right}]\"\n raw_parts = fmt_fields(\n op,\n dict(predicates=fmt_value, by=fmt_value, tolerance=fmt_value),\n aliases=aliases,\n )\n return f\"{top}\\n{raw_parts}\", \"\\n\\n\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_join(\n op: ops.Join,\n *,\n aliases: Aliases,\n deps: Deps,\n **_: Any,\n) -> str:\n # first, format the current join operation\n result, join_sep = fmt_join(op, aliases=aliases)\n formatted_joins = [result, join_sep]\n\n # process until the first non-Join dependency is popped in other words\n # process all runs of joins\n alias, current = None, None\n if deps:\n alias, current = deps.popleft()\n\n while isinstance(current, ops.Join):\n # copy the alias so that mutations to the value aren't shared\n # format the `current` join\n formatted_join, join_sep = fmt_join(current, aliases=aliases)\n formatted_joins.append(f\"{alias} := {formatted_join}\")\n formatted_joins.append(join_sep)\n\n if not deps:\n break\n\n alias, current = deps.popleft()\n\n if current is not None and not isinstance(current, ops.Join):\n # the last node popped from `deps` isn't a join which means we\n # still need to process it, so we put it at the front of the queue\n deps.appendleft((alias, current))\n\n # we don't want the last trailing separator so remove it from the end\n formatted_joins.pop()\n return \"\".join(formatted_joins)\n\n\n@fmt_table_op.register\ndef _(op: ops.CrossJoin, *, aliases: Aliases, **_: Any) -> str:\n left = aliases[op.left.op()]\n right = aliases[op.right.op()]\n return f\"{op.__class__.__name__}[{left}, {right}]\"\n\n\ndef _fmt_set_op(\n op: ops.SetOp,\n *,\n aliases: Aliases,\n distinct: bool | None = None,\n) -> str:\n args = [str(aliases[op.left.op()]), str(aliases[op.right.op()])]\n if distinct is not None:\n args.append(f\"distinct={distinct}\")\n return f\"{op.__class__.__name__}[{', '.join(args)}]\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_set_op(op: ops.SetOp, *, aliases: Aliases, **_: Any) -> str:\n return _fmt_set_op(op, aliases=aliases)\n\n\n@fmt_table_op.register\ndef _fmt_table_op_union(op: ops.Union, *, aliases: Aliases, **_: Any) -> str:\n return _fmt_set_op(op, aliases=aliases, distinct=op.distinct)\n\n\n@fmt_table_op.register(ops.SelfReference)\n@fmt_table_op.register(ops.Distinct)\ndef _fmt_table_op_self_reference_distinct(\n op: ops.Distinct | ops.SelfReference,\n *,\n aliases: Aliases,\n **_: Any,\n) -> str:\n return f\"{op.__class__.__name__}[{aliases[op.table.op()]}]\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_fillna(op: ops.FillNa, *, aliases: Aliases, **_: Any) -> str:\n top = f\"{op.__class__.__name__}[{aliases[op.table.op()]}]\"\n raw_parts = fmt_fields(op, dict(replacements=fmt_value), aliases=aliases)\n return f\"{top}\\n{raw_parts}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_dropna(op: ops.DropNa, *, aliases: Aliases, **_: Any) -> str:\n top = f\"{op.__class__.__name__}[{aliases[op.table.op()]}]\"\n how = f\"how: {op.how!r}\"\n raw_parts = fmt_fields(op, dict(subset=fmt_value), aliases=aliases)\n return f\"{top}\\n{util.indent(how, spaces=2)}\\n{raw_parts}\"\n\n\ndef fmt_fields(\n op: ops.TableNode,\n fields: Mapping[str, Callable[[Any, Aliases], str]],\n *,\n aliases: Aliases,\n) -> str:\n parts = []\n\n for field, formatter in fields.items():\n if exprs := [\n expr\n for expr in util.promote_list(getattr(op, field))\n if expr is not None\n ]:\n field_fmt = [formatter(expr, aliases=aliases) for expr in exprs]\n\n parts.append(f\"{field}:\")\n parts.append(util.indent(\"\\n\".join(field_fmt), spaces=2))\n\n return util.indent(\"\\n\".join(parts), spaces=2)\n\n\n@fmt_table_op.register\ndef _fmt_table_op_selection(\n op: ops.Selection, *, aliases: Aliases, **_: Any\n) -> str:\n top = f\"{op.__class__.__name__}[{aliases[op.table.op()]}]\"\n raw_parts = fmt_fields(\n op,\n dict(\n selections=functools.partial(\n fmt_selection_column,\n maxlen=selection_maxlen(op.selections),\n ),\n predicates=fmt_value,\n sort_keys=fmt_value,\n ),\n aliases=aliases,\n )\n return f\"{top}\\n{raw_parts}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_aggregation(\n op: ops.Aggregation, *, aliases: Aliases, **_: Any\n) -> str:\n top = f\"{op.__class__.__name__}[{aliases[op.table.op()]}]\"\n raw_parts = fmt_fields(\n op,\n dict(\n metrics=functools.partial(\n fmt_selection_column,\n maxlen=selection_maxlen(op.metrics),\n ),\n by=functools.partial(\n fmt_selection_column,\n maxlen=selection_maxlen(op.by),\n ),\n having=fmt_value,\n predicates=fmt_value,\n sort_keys=fmt_value,\n ),\n aliases=aliases,\n )\n return f\"{top}\\n{raw_parts}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_limit(op: ops.Limit, *, aliases: Aliases, **_: Any) -> str:\n params = [str(aliases[op.table.op()]), f\"n={op.n:d}\"]\n if offset := op.offset:\n params.append(f\"offset={offset:d}\")\n return f\"{op.__class__.__name__}[{', '.join(params)}]\"\n\n\[email protected]\ndef fmt_selection_column(value_expr: ir.ValueExpr, **_: Any) -> str:\n assert False, (\n \"expression type not implemented for \"\n f\"fmt_selection_column: {type(value_expr)}\"\n )\n\n\ndef type_info(datatype: dt.DataType) -> str:\n \"\"\"Format `datatype` for display next to a column.\"\"\"\n return f\" # {datatype}\" if ibis.options.repr.show_types else \"\"\n\n\n@fmt_selection_column.register\ndef _fmt_selection_column_value_expr(\n expr: ir.ValueExpr, *, aliases: Aliases, maxlen: int = 0\n) -> str:\n raw_name = expr._safe_name\n assert raw_name is not None, (\n \"`_safe_name` property should never be None when formatting a \"\n \"selection column expression\"\n )\n name = f\"{raw_name}:\"\n # the additional 1 is for the colon\n aligned_name = f\"{name:<{maxlen + 1}}\"\n value = fmt_value(expr, aliases=aliases)\n return f\"{aligned_name} {value}{type_info(expr.type())}\"\n\n\n@fmt_selection_column.register\ndef _fmt_selection_column_table_expr(\n expr: ir.TableExpr, *, aliases: Aliases, **_: Any\n) -> str:\n return str(aliases[expr.op()])\n\n\n_BIN_OP_CHARS = {\n # comparison operations\n ops.Equals: \"==\",\n ops.NotEquals: \"!=\",\n ops.Less: \"<\",\n ops.LessEqual: \"<=\",\n ops.Greater: \">\",\n ops.GreaterEqual: \">=\",\n # binary operations\n ops.Add: \"+\",\n ops.TimeAdd: \"+\",\n ops.Subtract: \"-\",\n ops.Multiply: \"*\",\n ops.Divide: \"/\",\n ops.FloorDivide: \"//\",\n ops.Modulus: \"%\",\n ops.Power: \"**\",\n ops.And: \"&\",\n ops.Or: \"|\",\n ops.Xor: \"^\",\n}\n\n\[email protected]\ndef fmt_value(obj, **_: Any) -> str:\n \"\"\"Format a value expression or operation.\n\n [`repr`][repr] the object if we don't have a specific formatting rule.\n \"\"\"\n return repr(obj)\n\n\n@fmt_value.register\ndef _fmt_value_function_type(func: types.FunctionType, **_: Any) -> str:\n return func.__name__\n\n\n@fmt_value.register\ndef _fmt_value_expr(expr: ir.Expr, *, aliases: Aliases) -> str:\n \"\"\"Format a value expression.\n\n Forwards the call on to the specific operation dispatch rule.\n \"\"\"\n return fmt_value(expr.op(), aliases=aliases)\n\n\n@fmt_value.register\ndef _fmt_value_node(op: ops.Node, **_: Any) -> str:\n assert False, f\"`fmt_value` not implemented for operation: {type(op)}\"\n\n\n@fmt_value.register\ndef _fmt_value_binary_op(op: ops.BinaryOp, *, aliases: Aliases) -> str:\n left = fmt_value(op.left, aliases=aliases)\n right = fmt_value(op.right, aliases=aliases)\n op_char = _BIN_OP_CHARS[type(op)]\n return f\"{left} {op_char} {right}\"\n\n\n@fmt_value.register\ndef _fmt_value_negate(op: ops.Negate, *, aliases: Aliases) -> str:\n op_name = \"Not\" if isinstance(op.arg.type(), dt.Boolean) else \"Negate\"\n operand = fmt_value(op.arg, aliases=aliases)\n return f\"{op_name}({operand})\"\n\n\n@fmt_value.register\ndef _fmt_value_literal(op: ops.Literal, **_: Any) -> str:\n if isinstance(op.dtype, dt.Interval):\n return f\"{op.value} {op.dtype.unit}\"\n return repr(op.value)\n\n\n@fmt_value.register\ndef _fmt_value_datatype(datatype: dt.DataType, **_: Any) -> str:\n return str(datatype)\n\n\n@fmt_value.register\ndef _fmt_value_value_op(op: ops.ValueOp, *, aliases: Aliases) -> str:\n args = []\n # loop over argument names and original expression\n for argname, orig_expr in zip(op.argnames, op.args):\n # promote argument to a list, so that we don't accidentially repr\n # entire subtrees when all we want is the formatted argument value\n if exprs := [\n expr for expr in util.promote_list(orig_expr) if expr is not None\n ]:\n # format the individual argument values\n formatted_args = \", \".join(\n fmt_value(expr, aliases=aliases) for expr in exprs\n )\n # if the original argument was a non-string iterable, display it as\n # a list\n value = (\n f\"[{formatted_args}]\"\n if util.is_iterable(orig_expr)\n else formatted_args\n )\n # `arg` and `expr` are noisy, so we ignore printing them as a\n # special case\n if argname not in (\"arg\", \"expr\"):\n formatted = f\"{argname}={value}\"\n else:\n formatted = value\n args.append(formatted)\n\n return f\"{op.__class__.__name__}({', '.join(args)})\"\n\n\n@fmt_value.register\ndef _fmt_value_table_column(op: ops.TableColumn, *, aliases: Aliases) -> str:\n return f\"{aliases[op.table.op()]}.{op.name}\"\n\n\n@fmt_value.register\ndef _fmt_value_scalar_parameter(op: ops.ScalarParameter, **_: Any) -> str:\n return f\"$({op.dtype})\"\n\n\n@fmt_value.register\ndef _fmt_value_sort_key(op: ops.SortKey, *, aliases: Aliases) -> str:\n expr = fmt_value(op.expr, aliases=aliases)\n sort_direction = \" asc\" if op.ascending else \"desc\"\n return f\"{sort_direction}|{expr}\"\n\n\n@fmt_value.register\ndef _fmt_value_physical_table(op: ops.PhysicalTable, **_: Any) -> str:\n \"\"\"Format a table as value.\n\n This function is called when a table is used in a value expression. An\n example is `table.count()`.\n \"\"\"\n return op.name\n\n\n@fmt_value.register\ndef _fmt_value_table_node(\n op: ops.TableNode, *, aliases: Aliases, **_: Any\n) -> str:\n \"\"\"Format a table as value.\n\n This function is called when a table is used in a value expression. An\n example is `table.count()`.\n \"\"\"\n return f\"{aliases[op.table.op()]}\"\n\n\n@fmt_value.register\ndef _fmt_value_string_sql_like(\n op: ops.StringSQLLike, *, aliases: Aliases\n) -> str:\n expr = fmt_value(op.arg, aliases=aliases)\n pattern = fmt_value(op.pattern, aliases=aliases)\n prefix = \"I\" * isinstance(op, ops.StringSQLILike)\n return f\"{expr} {prefix}LIKE {pattern}\"\n\n\n@fmt_value.register\ndef _fmt_value_window(win: win.Window, *, aliases: Aliases) -> str:\n args = []\n for field, value in (\n (\"_group_by\", win._group_by),\n (\"_order_by\", win._order_by),\n (\"preceding\", win.preceding),\n (\"following\", win.following),\n (\"max_lookback\", win.max_lookback),\n (\"how\", win.how),\n ):\n disp_field = field.lstrip(\"_\")\n if value is not None:\n if isinstance(value, tuple):\n # don't show empty sequences\n if not value:\n continue\n elements = \", \".join(\n fmt_value(val, aliases=aliases) for val in value\n )\n formatted = f\"[{elements}]\"\n else:\n formatted = fmt_value(value, aliases=aliases)\n args.append(f\"{disp_field}={formatted}\")\n return f\"{win.__class__.__name__}({', '.join(args)})\"\n", "path": "ibis/expr/format.py" } ]
diff --git a/ibis/expr/format.py b/ibis/expr/format.py index a590de6455bb..abe0b7260d30 100644 --- a/ibis/expr/format.py +++ b/ibis/expr/format.py @@ -534,6 +534,8 @@ def _fmt_value_negate(op: ops.Negate, *, aliases: Aliases) -> str: @fmt_value.register def _fmt_value_literal(op: ops.Literal, **_: Any) -> str: + if isinstance(op.dtype, dt.Interval): + return f"{op.value} {op.dtype.unit}" return repr(op.value)
feast-dev__feast-1742
Dependency PyYAML 5.3.* has vulnerability issues ## Expected Behavior According to [CVE-2020-14343](https://nvd.nist.gov/vuln/detail/CVE-2020-14343): > A vulnerability was discovered in the PyYAML library in versions before 5.4, where it is susceptible to arbitrary code execution when it processes untrusted YAML files through the full_load method or with the FullLoader loader. Applications that use the library to process untrusted input may be vulnerable to this flaw. This flaw allows an attacker to execute arbitrary code on the system by abusing the python/object/new constructor. This flaw is due to an incomplete fix for CVE-2020-1747. See CVE-2020-14343. ## Current Behavior Feast Python SDK requires `PyYAML==5.3.*` version. This not only affects Feast, but also any app depending on it, since dependencies are shared. ## Steps to reproduce N/A ### Specifications N/A ## Possible Solution Bump PyYAML to a ">=5.4" version.
[ { "content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\nimport re\nimport subprocess\n\nfrom distutils.cmd import Command\nfrom setuptools import find_packages\n\ntry:\n from setuptools import setup\n from setuptools.command.install import install\n from setuptools.command.develop import develop\n from setuptools.command.egg_info import egg_info\n from setuptools.command.sdist import sdist\n from setuptools.command.build_py import build_py\nexcept ImportError:\n from distutils.core import setup\n from distutils.command.install import install\n from distutils.command.build_py import build_py\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.7.0\"\n\nREQUIRED = [\n \"Click==7.*\",\n \"colorama>=0.3.9\",\n \"fastavro>=1.1.0\",\n \"google-api-core>=1.23.0\",\n \"googleapis-common-protos==1.52.*\",\n \"grpcio>=1.34.0\",\n \"Jinja2>=2.0.0\",\n \"jsonschema\",\n \"mmh3\",\n \"pandas>=1.0.0\",\n \"pandavro==1.5.*\",\n \"protobuf>=3.10\",\n \"pyarrow>=2.0.0\",\n \"pydantic>=1.0.0\",\n \"PyYAML==5.3.*\",\n \"tabulate==0.8.*\",\n \"tenacity>=7.*\",\n \"toml==0.10.*\",\n \"tqdm==4.*\",\n]\n\nGCP_REQUIRED = [\n \"google-cloud-bigquery>=2.0.*\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-datastore>=2.1.*\",\n \"google-cloud-storage>=1.34.*\",\n \"google-cloud-core==1.4.*\",\n]\n\nREDIS_REQUIRED = [\n \"redis-py-cluster==2.1.2\",\n]\n\nAWS_REQUIRED = [\n \"boto3==1.17.*\",\n]\n\nCI_REQUIRED = [\n \"cryptography==3.3.2\",\n \"flake8\",\n \"black==19.10b0\",\n \"isort>=5\",\n \"grpcio-tools==1.34.0\",\n \"grpcio-testing==1.34.0\",\n \"mock==2.0.0\",\n \"moto\",\n \"mypy==0.790\",\n \"mypy-protobuf==1.24\",\n \"avro==1.10.0\",\n \"gcsfs\",\n \"urllib3>=1.25.4\",\n \"pytest==6.0.0\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"pytest-lazy-fixture==0.6.3\",\n \"pytest-timeout==1.4.2\",\n \"pytest-ordering==0.6.*\",\n \"pytest-mock==1.10.4\",\n \"Sphinx!=4.0.0\",\n \"sphinx-rtd-theme\",\n \"adlfs==0.5.9\",\n \"firebase-admin==4.5.2\",\n \"pre-commit\",\n \"assertpy==1.1\",\n \"google-cloud-bigquery>=2.0.*\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-datastore>=2.1.*\",\n \"google-cloud-storage>=1.20.*\",\n \"google-cloud-core==1.4.*\",\n \"redis-py-cluster==2.1.2\",\n \"boto3==1.17.*\",\n]\n\n\n# README file from Feast repo root directory\nrepo_root = (\n subprocess.Popen([\"git\", \"rev-parse\", \"--show-toplevel\"], stdout=subprocess.PIPE)\n .communicate()[0]\n .rstrip()\n .decode(\"utf-8\")\n)\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(README_FILE, \"r\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n# Regex modified from default tag regex in:\n# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\nTAG_REGEX = re.compile(\n r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n)\n\n\nclass BuildProtoCommand(Command):\n description = \"Builds the proto files into python files.\"\n\n def initialize_options(self):\n self.protoc = [\"python\", \"-m\", \"grpc_tools.protoc\"] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.this_package = os.path.join(os.path.dirname(__file__) or os.getcwd(), 'feast/protos')\n self.sub_folders = [\"core\", \"serving\", \"types\", \"storage\"]\n\n def finalize_options(self):\n pass\n\n def _generate_protos(self, path):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n\n subprocess.check_call(self.protoc + [\n '-I', self.proto_folder,\n '--python_out', self.this_package,\n '--grpc_python_out', self.this_package,\n '--mypy_out', self.this_package] + proto_files)\n\n def run(self):\n for sub_folder in self.sub_folders:\n self._generate_protos(f'feast/{sub_folder}/*.proto')\n\n from pathlib import Path\n\n for path in Path('feast/protos').rglob('*.py'):\n for folder in self.sub_folders:\n # Read in the file\n with open(path, 'r') as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(f'from feast.{folder}', f'from feast.protos.feast.{folder}')\n\n # Write the file out again\n with open(path, 'w') as file:\n file.write(filedata)\n\n\nclass BuildCommand(build_py):\n \"\"\"Custom build command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n build_py.run(self)\n\n\nclass DevelopCommand(develop):\n \"\"\"Custom develop command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n develop.run(self)\n\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\n \"dev\": [\"mypy-protobuf==1.*\", \"grpcio-testing==1.*\"],\n \"ci\": CI_REQUIRED,\n \"gcp\": GCP_REQUIRED,\n \"aws\": AWS_REQUIRED,\n \"redis\": REDIS_REQUIRED,\n },\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version={\"root\": \"../..\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX},\n setup_requires=[\"setuptools_scm\", \"grpcio\", \"grpcio-tools==1.34.0\", \"mypy-protobuf\", \"sphinx!=4.0.0\"],\n package_data={\n \"\": [\n \"protos/feast/**/*.proto\",\n \"protos/feast/third_party/grpc/health/v1/*.proto\",\n \"protos/tensorflow_metadata/proto/v0/*.proto\",\n \"feast/protos/feast/**/*.py\",\n \"tensorflow_metadata/proto/v0/*.py\"\n ],\n },\n cmdclass={\n \"build_proto\": BuildProtoCommand,\n \"build_py\": BuildCommand,\n \"develop\": DevelopCommand,\n },\n)\n", "path": "sdk/python/setup.py" } ]
[ { "content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\nimport re\nimport subprocess\n\nfrom distutils.cmd import Command\nfrom setuptools import find_packages\n\ntry:\n from setuptools import setup\n from setuptools.command.install import install\n from setuptools.command.develop import develop\n from setuptools.command.egg_info import egg_info\n from setuptools.command.sdist import sdist\n from setuptools.command.build_py import build_py\nexcept ImportError:\n from distutils.core import setup\n from distutils.command.install import install\n from distutils.command.build_py import build_py\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.7.0\"\n\nREQUIRED = [\n \"Click==7.*\",\n \"colorama>=0.3.9\",\n \"fastavro>=1.1.0\",\n \"google-api-core>=1.23.0\",\n \"googleapis-common-protos==1.52.*\",\n \"grpcio>=1.34.0\",\n \"Jinja2>=2.0.0\",\n \"jsonschema\",\n \"mmh3\",\n \"pandas>=1.0.0\",\n \"pandavro==1.5.*\",\n \"protobuf>=3.10\",\n \"pyarrow>=2.0.0\",\n \"pydantic>=1.0.0\",\n \"PyYAML>=5.4.*\",\n \"tabulate==0.8.*\",\n \"tenacity>=7.*\",\n \"toml==0.10.*\",\n \"tqdm==4.*\",\n]\n\nGCP_REQUIRED = [\n \"google-cloud-bigquery>=2.0.*\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-datastore>=2.1.*\",\n \"google-cloud-storage>=1.34.*\",\n \"google-cloud-core==1.4.*\",\n]\n\nREDIS_REQUIRED = [\n \"redis-py-cluster==2.1.2\",\n]\n\nAWS_REQUIRED = [\n \"boto3==1.17.*\",\n]\n\nCI_REQUIRED = [\n \"cryptography==3.3.2\",\n \"flake8\",\n \"black==19.10b0\",\n \"isort>=5\",\n \"grpcio-tools==1.34.0\",\n \"grpcio-testing==1.34.0\",\n \"mock==2.0.0\",\n \"moto\",\n \"mypy==0.790\",\n \"mypy-protobuf==1.24\",\n \"avro==1.10.0\",\n \"gcsfs\",\n \"urllib3>=1.25.4\",\n \"pytest==6.0.0\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"pytest-lazy-fixture==0.6.3\",\n \"pytest-timeout==1.4.2\",\n \"pytest-ordering==0.6.*\",\n \"pytest-mock==1.10.4\",\n \"Sphinx!=4.0.0\",\n \"sphinx-rtd-theme\",\n \"adlfs==0.5.9\",\n \"firebase-admin==4.5.2\",\n \"pre-commit\",\n \"assertpy==1.1\",\n \"google-cloud-bigquery>=2.0.*\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-datastore>=2.1.*\",\n \"google-cloud-storage>=1.20.*\",\n \"google-cloud-core==1.4.*\",\n \"redis-py-cluster==2.1.2\",\n \"boto3==1.17.*\",\n]\n\n\n# README file from Feast repo root directory\nrepo_root = (\n subprocess.Popen([\"git\", \"rev-parse\", \"--show-toplevel\"], stdout=subprocess.PIPE)\n .communicate()[0]\n .rstrip()\n .decode(\"utf-8\")\n)\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(README_FILE, \"r\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n# Regex modified from default tag regex in:\n# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\nTAG_REGEX = re.compile(\n r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n)\n\n\nclass BuildProtoCommand(Command):\n description = \"Builds the proto files into python files.\"\n\n def initialize_options(self):\n self.protoc = [\"python\", \"-m\", \"grpc_tools.protoc\"] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.this_package = os.path.join(os.path.dirname(__file__) or os.getcwd(), 'feast/protos')\n self.sub_folders = [\"core\", \"serving\", \"types\", \"storage\"]\n\n def finalize_options(self):\n pass\n\n def _generate_protos(self, path):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n\n subprocess.check_call(self.protoc + [\n '-I', self.proto_folder,\n '--python_out', self.this_package,\n '--grpc_python_out', self.this_package,\n '--mypy_out', self.this_package] + proto_files)\n\n def run(self):\n for sub_folder in self.sub_folders:\n self._generate_protos(f'feast/{sub_folder}/*.proto')\n\n from pathlib import Path\n\n for path in Path('feast/protos').rglob('*.py'):\n for folder in self.sub_folders:\n # Read in the file\n with open(path, 'r') as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(f'from feast.{folder}', f'from feast.protos.feast.{folder}')\n\n # Write the file out again\n with open(path, 'w') as file:\n file.write(filedata)\n\n\nclass BuildCommand(build_py):\n \"\"\"Custom build command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n build_py.run(self)\n\n\nclass DevelopCommand(develop):\n \"\"\"Custom develop command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n develop.run(self)\n\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\n \"dev\": [\"mypy-protobuf==1.*\", \"grpcio-testing==1.*\"],\n \"ci\": CI_REQUIRED,\n \"gcp\": GCP_REQUIRED,\n \"aws\": AWS_REQUIRED,\n \"redis\": REDIS_REQUIRED,\n },\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version={\"root\": \"../..\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX},\n setup_requires=[\"setuptools_scm\", \"grpcio\", \"grpcio-tools==1.34.0\", \"mypy-protobuf\", \"sphinx!=4.0.0\"],\n package_data={\n \"\": [\n \"protos/feast/**/*.proto\",\n \"protos/feast/third_party/grpc/health/v1/*.proto\",\n \"protos/tensorflow_metadata/proto/v0/*.proto\",\n \"feast/protos/feast/**/*.py\",\n \"tensorflow_metadata/proto/v0/*.py\"\n ],\n },\n cmdclass={\n \"build_proto\": BuildProtoCommand,\n \"build_py\": BuildCommand,\n \"develop\": DevelopCommand,\n },\n)\n", "path": "sdk/python/setup.py" } ]
diff --git a/sdk/python/setup.py b/sdk/python/setup.py index cae6c1d802b..1b8cfc0e687 100644 --- a/sdk/python/setup.py +++ b/sdk/python/setup.py @@ -52,7 +52,7 @@ "protobuf>=3.10", "pyarrow>=2.0.0", "pydantic>=1.0.0", - "PyYAML==5.3.*", + "PyYAML>=5.4.*", "tabulate==0.8.*", "tenacity>=7.*", "toml==0.10.*",
bokeh__bokeh-10106
[BUG] `cd sphinx; make serve` doesn't work #### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages) Bokeh 2.0.2-76-ga417746c9 #### Description of expected behavior and the observed behavior The page at https://docs.bokeh.org/en/latest/docs/dev_guide/documentation.html mentions that it's possible to run `make serve` to serve the documentation locally. But running it results in: ``` Exception in thread Thread-2: Traceback (most recent call last): File "/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/threading.py", line 917, in _bootstrap_inner self.run() File "/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/threading.py", line 865, in run self._target(*self._args, **self._kwargs) File "docserver.py", line 43, in open_browser webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new="tab") File "/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/webbrowser.py", line 78, in open if browser.open(url, new, autoraise): File "/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/webbrowser.py", line 251, in open "expected 0, 1, or 2, got %s" % new) webbrowser.Error: Bad 'new' parameter to open(); expected 0, 1, or 2, got tab ``` Not sure where `"tab"` has come from, but it has been there forever.
[ { "content": "import os\nimport sys\nimport threading\nimport time\nimport webbrowser\n\nimport flask\nimport tornado\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\nfrom tornado.wsgi import WSGIContainer\n\n_basedir = os.path.join(\"..\", os.path.dirname(__file__))\n\napp = flask.Flask(__name__, static_folder=\"/unused\")\nPORT=5009\nhttp_server = HTTPServer(WSGIContainer(app))\n\[email protected]('/')\ndef welcome():\n return \"\"\"\n <h1>Welcome to the Bokeh documentation server</h1>\n You probably want to go to <a href=\"/en/latest/index.html\"> Index</a>\n \"\"\"\n\[email protected]('/versions.json')\ndef send_versions():\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx\"), \"test_versions.json\")\n\[email protected]('/alert.html')\ndef send_alert():\n return os.environ.get(\"BOKEH_DOCS_ALERT\", \"\")\n\[email protected]('/en/latest/<path:filename>')\ndef send_docs(filename):\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx/build/html/\"), filename)\n\ndef open_browser():\n # Child process\n time.sleep(0.5)\n webbrowser.open(\"http://localhost:%d/en/latest/index.html\" % PORT, new=\"tab\")\n\ndata = {}\n\ndef serve_http():\n data['ioloop'] = IOLoop()\n http_server.listen(PORT)\n IOLoop.current().start()\n\ndef shutdown_server():\n ioloop = data['ioloop']\n ioloop.add_callback(ioloop.stop)\n print(\"Asked Server to shut down.\")\n\ndef ui():\n try:\n time.sleep(0.5)\n input(\"Press <ENTER> to exit...\\n\") # lgtm [py/use-of-input]\n except KeyboardInterrupt:\n pass\n\nif __name__ == \"__main__\":\n\n if tornado.version_info[0] == 4:\n print('docserver.py script requires tornado 5 or higher')\n sys.exit(1)\n\n print(\"\\nStarting Bokeh plot server on port %d...\" % PORT)\n print(\"Visit http://localhost:%d/en/latest/index.html to see plots\\n\" % PORT)\n\n t_server = threading.Thread(target=serve_http)\n t_server.start()\n t_browser = threading.Thread(target=open_browser)\n t_browser.start()\n\n ui()\n\n shutdown_server()\n t_server.join()\n t_browser.join()\n print(\"Server shut down.\")\n", "path": "sphinx/docserver.py" } ]
[ { "content": "import os\nimport sys\nimport threading\nimport time\nimport webbrowser\n\nimport flask\nimport tornado\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\nfrom tornado.wsgi import WSGIContainer\n\n_basedir = os.path.join(\"..\", os.path.dirname(__file__))\n\napp = flask.Flask(__name__, static_folder=\"/unused\")\nPORT=5009\nhttp_server = HTTPServer(WSGIContainer(app))\n\[email protected]('/')\ndef welcome():\n return \"\"\"\n <h1>Welcome to the Bokeh documentation server</h1>\n You probably want to go to <a href=\"/en/latest/index.html\"> Index</a>\n \"\"\"\n\[email protected]('/versions.json')\ndef send_versions():\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx\"), \"test_versions.json\")\n\[email protected]('/alert.html')\ndef send_alert():\n return os.environ.get(\"BOKEH_DOCS_ALERT\", \"\")\n\[email protected]('/en/latest/<path:filename>')\ndef send_docs(filename):\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx/build/html/\"), filename)\n\ndef open_browser():\n # Child process\n time.sleep(0.5)\n webbrowser.open(\"http://localhost:%d/en/latest/index.html\" % PORT, new=2)\n\ndata = {}\n\ndef serve_http():\n data['ioloop'] = IOLoop()\n http_server.listen(PORT)\n IOLoop.current().start()\n\ndef shutdown_server():\n ioloop = data['ioloop']\n ioloop.add_callback(ioloop.stop)\n print(\"Asked Server to shut down.\")\n\ndef ui():\n try:\n time.sleep(0.5)\n input(\"Press <ENTER> to exit...\\n\") # lgtm [py/use-of-input]\n except KeyboardInterrupt:\n pass\n\nif __name__ == \"__main__\":\n\n if tornado.version_info[0] == 4:\n print('docserver.py script requires tornado 5 or higher')\n sys.exit(1)\n\n print(\"\\nStarting Bokeh plot server on port %d...\" % PORT)\n print(\"Visit http://localhost:%d/en/latest/index.html to see plots\\n\" % PORT)\n\n t_server = threading.Thread(target=serve_http)\n t_server.start()\n t_browser = threading.Thread(target=open_browser)\n t_browser.start()\n\n ui()\n\n shutdown_server()\n t_server.join()\n t_browser.join()\n print(\"Server shut down.\")\n", "path": "sphinx/docserver.py" } ]
diff --git a/sphinx/docserver.py b/sphinx/docserver.py index 74c780c5fc5..fbfdd3ffff0 100644 --- a/sphinx/docserver.py +++ b/sphinx/docserver.py @@ -40,7 +40,7 @@ def send_docs(filename): def open_browser(): # Child process time.sleep(0.5) - webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new="tab") + webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new=2) data = {}
getredash__redash-1118
When archiving a query, delete related alerts Related: #731 . When archiving a query, delete related alerts Related: #731 .
[ { "content": "import json\nfrom flask_login import UserMixin, AnonymousUserMixin\nimport hashlib\nimport logging\nimport os\nimport threading\nimport time\nimport datetime\nimport itertools\nfrom funcy import project\n\nimport peewee\nfrom passlib.apps import custom_app_context as pwd_context\nfrom playhouse.gfk import GFKField, BaseModel\nfrom playhouse.postgres_ext import ArrayField, DateTimeTZField\nfrom permissions import has_access, view_only\n\nfrom redash import utils, settings, redis_connection\nfrom redash.query_runner import get_query_runner, get_configuration_schema_for_query_runner_type\nfrom redash.destinations import get_destination, get_configuration_schema_for_destination_type\nfrom redash.metrics.database import MeteredPostgresqlExtDatabase, MeteredModel\nfrom redash.utils import generate_token\nfrom redash.utils.configuration import ConfigurationContainer\n\n\nclass Database(object):\n def __init__(self):\n self.database_config = dict(settings.DATABASE_CONFIG)\n self.database_config['register_hstore'] = False\n self.database_name = self.database_config.pop('name')\n self.database = MeteredPostgresqlExtDatabase(self.database_name, **self.database_config)\n self.app = None\n self.pid = os.getpid()\n\n def init_app(self, app):\n self.app = app\n self.register_handlers()\n\n def connect_db(self):\n self._check_pid()\n self.database.reset_metrics()\n self.database.connect()\n\n def close_db(self, exc):\n self._check_pid()\n if not self.database.is_closed():\n self.database.close()\n\n def _check_pid(self):\n current_pid = os.getpid()\n if self.pid != current_pid:\n logging.info(\"New pid detected (%d!=%d); resetting database lock.\", self.pid, current_pid)\n self.pid = os.getpid()\n self.database._conn_lock = threading.Lock()\n\n def register_handlers(self):\n self.app.before_request(self.connect_db)\n self.app.teardown_request(self.close_db)\n\n\ndb = Database()\n\n\n# Support for cast operation on database fields\[email protected]()\ndef cast(self, as_type):\n return peewee.Expression(self, '::', peewee.SQL(as_type))\n\n\nclass JSONField(peewee.TextField):\n def db_value(self, value):\n return json.dumps(value)\n\n def python_value(self, value):\n if not value:\n return value\n return json.loads(value)\n\n\nclass BaseModel(MeteredModel):\n class Meta:\n database = db.database\n\n @classmethod\n def get_by_id(cls, model_id):\n return cls.get(cls.id == model_id)\n\n def pre_save(self, created):\n pass\n\n def post_save(self, created):\n # Handler for post_save operations. Overriding if needed.\n pass\n\n def save(self, *args, **kwargs):\n pk_value = self._get_pk_value()\n created = kwargs.get('force_insert', False) or not bool(pk_value)\n self.pre_save(created)\n super(BaseModel, self).save(*args, **kwargs)\n self.post_save(created)\n\n def update_instance(self, **kwargs):\n for k, v in kwargs.items():\n # setattr(model_instance, field_name, field_obj.python_value(value))\n setattr(self, k, v)\n\n # We have to run pre-save before calculating dirty_fields. We end up running it twice,\n # but pre_save calls should be very quick so it's not big of an issue.\n # An alternative can be to recalculate dirty_fields, but it felt more error prone.\n self.pre_save(False)\n\n self.save(only=self.dirty_fields)\n\n\nclass ModelTimestampsMixin(BaseModel):\n updated_at = DateTimeTZField(default=datetime.datetime.now)\n created_at = DateTimeTZField(default=datetime.datetime.now)\n\n def pre_save(self, created):\n super(ModelTimestampsMixin, self).pre_save(created)\n\n self.updated_at = datetime.datetime.now()\n\n\nclass BelongsToOrgMixin(object):\n @classmethod\n def get_by_id_and_org(cls, object_id, org):\n return cls.get(cls.id == object_id, cls.org == org)\n\n\nclass PermissionsCheckMixin(object):\n def has_permission(self, permission):\n return self.has_permissions((permission,))\n\n def has_permissions(self, permissions):\n has_permissions = reduce(lambda a, b: a and b,\n map(lambda permission: permission in self.permissions,\n permissions),\n True)\n\n return has_permissions\n\n\nclass AnonymousUser(AnonymousUserMixin, PermissionsCheckMixin):\n @property\n def permissions(self):\n return []\n\n\nclass ApiUser(UserMixin, PermissionsCheckMixin):\n def __init__(self, api_key, org, groups, name=None):\n self.object = None\n if isinstance(api_key, basestring):\n self.id = api_key\n self.name = name\n else:\n self.id = api_key.api_key\n self.name = \"ApiKey: {}\".format(api_key.id)\n self.object = api_key.object\n self.groups = groups\n self.org = org\n\n def __repr__(self):\n return u\"<{}>\".format(self.name)\n\n @property\n def permissions(self):\n return ['view_query']\n\n\nclass Organization(ModelTimestampsMixin, BaseModel):\n SETTING_GOOGLE_APPS_DOMAINS = 'google_apps_domains'\n SETTING_IS_PUBLIC = \"is_public\"\n\n id = peewee.PrimaryKeyField()\n name = peewee.CharField()\n slug = peewee.CharField(unique=True)\n settings = JSONField()\n\n class Meta:\n db_table = 'organizations'\n\n def __repr__(self):\n return u\"<Organization: {}, {}>\".format(self.id, self.name)\n\n # When Organization is used with LocalProxy (like the current_org helper), peewee doesn't recognize it as a Model\n # and might call int() on it. This method makes sure it works.\n def __int__(self):\n return self.id\n\n @classmethod\n def get_by_slug(cls, slug):\n return cls.get(cls.slug == slug)\n\n @property\n def default_group(self):\n return self.groups.where(Group.name=='default', Group.type==Group.BUILTIN_GROUP).first()\n\n @property\n def google_apps_domains(self):\n return self.settings.get(self.SETTING_GOOGLE_APPS_DOMAINS, [])\n\n @property\n def is_public(self):\n return self.settings.get(self.SETTING_IS_PUBLIC, False)\n\n @property\n def admin_group(self):\n return self.groups.where(Group.name=='admin', Group.type==Group.BUILTIN_GROUP).first()\n\n def has_user(self, email):\n return self.users.where(User.email==email).count() == 1\n\n\nclass Group(BaseModel, BelongsToOrgMixin):\n DEFAULT_PERMISSIONS = ['create_dashboard', 'create_query', 'edit_dashboard', 'edit_query',\n 'view_query', 'view_source', 'execute_query', 'list_users', 'schedule_query',\n 'list_dashboards', 'list_alerts', 'list_data_sources']\n\n BUILTIN_GROUP = 'builtin'\n REGULAR_GROUP = 'regular'\n\n id = peewee.PrimaryKeyField()\n org = peewee.ForeignKeyField(Organization, related_name=\"groups\")\n type = peewee.CharField(default=REGULAR_GROUP)\n name = peewee.CharField(max_length=100)\n permissions = ArrayField(peewee.CharField, default=DEFAULT_PERMISSIONS)\n created_at = DateTimeTZField(default=datetime.datetime.now)\n\n class Meta:\n db_table = 'groups'\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'permissions': self.permissions,\n 'type': self.type,\n 'created_at': self.created_at\n }\n\n @classmethod\n def all(cls, org):\n return cls.select().where(cls.org==org)\n\n @classmethod\n def members(cls, group_id):\n return User.select().where(peewee.SQL(\"%s = ANY(groups)\", group_id))\n\n @classmethod\n def find_by_name(cls, org, group_names):\n result = cls.select().where(cls.org == org, cls.name << group_names)\n return list(result)\n\n def __unicode__(self):\n return unicode(self.id)\n\n\nclass User(ModelTimestampsMixin, BaseModel, BelongsToOrgMixin, UserMixin, PermissionsCheckMixin):\n id = peewee.PrimaryKeyField()\n org = peewee.ForeignKeyField(Organization, related_name=\"users\")\n name = peewee.CharField(max_length=320)\n email = peewee.CharField(max_length=320)\n password_hash = peewee.CharField(max_length=128, null=True)\n groups = ArrayField(peewee.IntegerField, null=True)\n api_key = peewee.CharField(max_length=40, unique=True)\n\n class Meta:\n db_table = 'users'\n\n indexes = (\n (('org', 'email'), True),\n )\n\n def __init__(self, *args, **kwargs):\n super(User, self).__init__(*args, **kwargs)\n\n def to_dict(self, with_api_key=False):\n d = {\n 'id': self.id,\n 'name': self.name,\n 'email': self.email,\n 'gravatar_url': self.gravatar_url,\n 'groups': self.groups,\n 'updated_at': self.updated_at,\n 'created_at': self.created_at\n }\n\n if self.password_hash is None:\n d['auth_type'] = 'external'\n else:\n d['auth_type'] = 'password'\n\n if with_api_key:\n d['api_key'] = self.api_key\n\n return d\n\n def pre_save(self, created):\n super(User, self).pre_save(created)\n\n if not self.api_key:\n self.api_key = generate_token(40)\n\n @property\n def gravatar_url(self):\n email_md5 = hashlib.md5(self.email.lower()).hexdigest()\n return \"https://www.gravatar.com/avatar/%s?s=40\" % email_md5\n\n @property\n def permissions(self):\n # TODO: this should be cached.\n return list(itertools.chain(*[g.permissions for g in\n Group.select().where(Group.id << self.groups)]))\n\n @classmethod\n def get_by_email_and_org(cls, email, org):\n return cls.get(cls.email == email, cls.org == org)\n\n @classmethod\n def get_by_api_key_and_org(cls, api_key, org):\n return cls.get(cls.api_key == api_key, cls.org == org)\n\n @classmethod\n def all(cls, org):\n return cls.select().where(cls.org == org)\n\n @classmethod\n def find_by_email(cls, email):\n return cls.select().where(cls.email == email)\n\n def __unicode__(self):\n return u'%s (%s)' % (self.name, self.email)\n\n def hash_password(self, password):\n self.password_hash = pwd_context.encrypt(password)\n\n def verify_password(self, password):\n return self.password_hash and pwd_context.verify(password, self.password_hash)\n\n def update_group_assignments(self, group_names):\n groups = Group.find_by_name(self.org, group_names)\n groups.append(self.org.default_group)\n self.groups = map(lambda g: g.id, groups)\n self.save()\n\n\nclass ConfigurationField(peewee.TextField):\n def db_value(self, value):\n return value.to_json()\n\n def python_value(self, value):\n return ConfigurationContainer.from_json(value)\n\n\nclass DataSource(BelongsToOrgMixin, BaseModel):\n id = peewee.PrimaryKeyField()\n org = peewee.ForeignKeyField(Organization, related_name=\"data_sources\")\n name = peewee.CharField()\n type = peewee.CharField()\n options = ConfigurationField()\n queue_name = peewee.CharField(default=\"queries\")\n scheduled_queue_name = peewee.CharField(default=\"scheduled_queries\")\n created_at = DateTimeTZField(default=datetime.datetime.now)\n\n class Meta:\n db_table = 'data_sources'\n\n indexes = (\n (('org', 'name'), True),\n )\n\n def to_dict(self, all=False, with_permissions=False):\n d = {\n 'id': self.id,\n 'name': self.name,\n 'type': self.type,\n 'syntax': self.query_runner.syntax,\n 'paused': self.paused,\n 'pause_reason': self.pause_reason\n }\n\n if all:\n schema = get_configuration_schema_for_query_runner_type(self.type)\n self.options.set_schema(schema)\n d['options'] = self.options.to_dict(mask_secrets=True)\n d['queue_name'] = self.queue_name\n d['scheduled_queue_name'] = self.scheduled_queue_name\n d['groups'] = self.groups\n\n if with_permissions:\n d['view_only'] = self.data_source_groups.view_only\n\n return d\n\n def __unicode__(self):\n return self.name\n\n @classmethod\n def create_with_group(cls, *args, **kwargs):\n data_source = cls.create(*args, **kwargs)\n DataSourceGroup.create(data_source=data_source, group=data_source.org.default_group)\n return data_source\n\n def get_schema(self, refresh=False):\n key = \"data_source:schema:{}\".format(self.id)\n\n cache = None\n if not refresh:\n cache = redis_connection.get(key)\n\n if cache is None:\n query_runner = self.query_runner\n schema = sorted(query_runner.get_schema(get_stats=refresh), key=lambda t: t['name'])\n\n redis_connection.set(key, json.dumps(schema))\n else:\n schema = json.loads(cache)\n\n return schema\n\n def _pause_key(self):\n return 'ds:{}:pause'.format(self.id)\n\n @property\n def paused(self):\n return redis_connection.exists(self._pause_key())\n\n @property\n def pause_reason(self):\n return redis_connection.get(self._pause_key())\n\n def pause(self, reason=None):\n redis_connection.set(self._pause_key(), reason)\n\n def resume(self):\n redis_connection.delete(self._pause_key())\n\n def add_group(self, group, view_only=False):\n dsg = DataSourceGroup.create(group=group, data_source=self, view_only=view_only)\n setattr(self, 'data_source_groups', dsg)\n\n def remove_group(self, group):\n DataSourceGroup.delete().where(DataSourceGroup.group==group, DataSourceGroup.data_source==self).execute()\n\n def update_group_permission(self, group, view_only):\n dsg = DataSourceGroup.get(DataSourceGroup.group==group, DataSourceGroup.data_source==self)\n dsg.view_only = view_only\n dsg.save()\n setattr(self, 'data_source_groups', dsg)\n\n @property\n def query_runner(self):\n return get_query_runner(self.type, self.options)\n\n @classmethod\n def all(cls, org, groups=None):\n data_sources = cls.select().where(cls.org==org).order_by(cls.id.asc())\n\n if groups:\n data_sources = data_sources.join(DataSourceGroup).where(DataSourceGroup.group << groups)\n\n return data_sources\n\n @property\n def groups(self):\n groups = DataSourceGroup.select().where(DataSourceGroup.data_source==self)\n return dict(map(lambda g: (g.group_id, g.view_only), groups))\n\n\nclass DataSourceGroup(BaseModel):\n data_source = peewee.ForeignKeyField(DataSource)\n group = peewee.ForeignKeyField(Group, related_name=\"data_sources\")\n view_only = peewee.BooleanField(default=False)\n\n class Meta:\n db_table = \"data_source_groups\"\n\n\nclass QueryResult(BaseModel, BelongsToOrgMixin):\n id = peewee.PrimaryKeyField()\n org = peewee.ForeignKeyField(Organization)\n data_source = peewee.ForeignKeyField(DataSource)\n query_hash = peewee.CharField(max_length=32, index=True)\n query = peewee.TextField()\n data = peewee.TextField()\n runtime = peewee.FloatField()\n retrieved_at = DateTimeTZField()\n\n class Meta:\n db_table = 'query_results'\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'query_hash': self.query_hash,\n 'query': self.query,\n 'data': json.loads(self.data),\n 'data_source_id': self.data_source_id,\n 'runtime': self.runtime,\n 'retrieved_at': self.retrieved_at\n }\n\n @classmethod\n def unused(cls, days=7):\n age_threshold = datetime.datetime.now() - datetime.timedelta(days=days)\n\n unused_results = cls.select().where(Query.id == None, cls.retrieved_at < age_threshold)\\\n .join(Query, join_type=peewee.JOIN_LEFT_OUTER)\n\n return unused_results\n\n @classmethod\n def get_latest(cls, data_source, query, max_age=0):\n query_hash = utils.gen_query_hash(query)\n\n if max_age == -1:\n query = cls.select().where(cls.query_hash == query_hash,\n cls.data_source == data_source).order_by(cls.retrieved_at.desc())\n else:\n query = cls.select().where(cls.query_hash == query_hash, cls.data_source == data_source,\n peewee.SQL(\"retrieved_at + interval '%s second' >= now() at time zone 'utc'\",\n max_age)).order_by(cls.retrieved_at.desc())\n\n return query.first()\n\n @classmethod\n def store_result(cls, org_id, data_source_id, query_hash, query, data, run_time, retrieved_at):\n query_result = cls.create(org=org_id,\n query_hash=query_hash,\n query=query,\n runtime=run_time,\n data_source=data_source_id,\n retrieved_at=retrieved_at,\n data=data)\n\n logging.info(\"Inserted query (%s) data; id=%s\", query_hash, query_result.id)\n\n sql = \"UPDATE queries SET latest_query_data_id = %s WHERE query_hash = %s AND data_source_id = %s RETURNING id\"\n query_ids = [row[0] for row in db.database.execute_sql(sql, params=(query_result.id, query_hash, data_source_id))]\n\n # TODO: when peewee with update & returning support is released, we can get back to using this code:\n # updated_count = Query.update(latest_query_data=query_result).\\\n # where(Query.query_hash==query_hash, Query.data_source==data_source_id).\\\n # execute()\n\n logging.info(\"Updated %s queries with result (%s).\", len(query_ids), query_hash)\n\n return query_result, query_ids\n\n def __unicode__(self):\n return u\"%d | %s | %s\" % (self.id, self.query_hash, self.retrieved_at)\n\n @property\n def groups(self):\n return self.data_source.groups\n\n\ndef should_schedule_next(previous_iteration, now, schedule):\n if schedule.isdigit():\n ttl = int(schedule)\n next_iteration = previous_iteration + datetime.timedelta(seconds=ttl)\n else:\n hour, minute = schedule.split(':')\n hour, minute = int(hour), int(minute)\n\n # The following logic is needed for cases like the following:\n # - The query scheduled to run at 23:59.\n # - The scheduler wakes up at 00:01.\n # - Using naive implementation of comparing timestamps, it will skip the execution.\n normalized_previous_iteration = previous_iteration.replace(hour=hour, minute=minute)\n if normalized_previous_iteration > previous_iteration:\n previous_iteration = normalized_previous_iteration - datetime.timedelta(days=1)\n\n next_iteration = (previous_iteration + datetime.timedelta(days=1)).replace(hour=hour, minute=minute)\n\n return now > next_iteration\n\n\nclass Query(ModelTimestampsMixin, BaseModel, BelongsToOrgMixin):\n id = peewee.PrimaryKeyField()\n org = peewee.ForeignKeyField(Organization, related_name=\"queries\")\n data_source = peewee.ForeignKeyField(DataSource, null=True)\n latest_query_data = peewee.ForeignKeyField(QueryResult, null=True)\n name = peewee.CharField(max_length=255)\n description = peewee.CharField(max_length=4096, null=True)\n query = peewee.TextField()\n query_hash = peewee.CharField(max_length=32)\n api_key = peewee.CharField(max_length=40)\n user = peewee.ForeignKeyField(User)\n last_modified_by = peewee.ForeignKeyField(User, null=True, related_name=\"modified_queries\")\n is_archived = peewee.BooleanField(default=False, index=True)\n schedule = peewee.CharField(max_length=10, null=True)\n options = JSONField(default={})\n\n class Meta:\n db_table = 'queries'\n\n def to_dict(self, with_stats=False, with_visualizations=False, with_user=True, with_last_modified_by=True):\n d = {\n 'id': self.id,\n 'latest_query_data_id': self._data.get('latest_query_data', None),\n 'name': self.name,\n 'description': self.description,\n 'query': self.query,\n 'query_hash': self.query_hash,\n 'schedule': self.schedule,\n 'api_key': self.api_key,\n 'is_archived': self.is_archived,\n 'updated_at': self.updated_at,\n 'created_at': self.created_at,\n 'data_source_id': self.data_source_id,\n 'options': self.options\n }\n\n if with_user:\n d['user'] = self.user.to_dict()\n else:\n d['user_id'] = self.user_id\n\n if with_last_modified_by:\n d['last_modified_by'] = self.last_modified_by.to_dict() if self.last_modified_by is not None else None\n else:\n d['last_modified_by_id'] = self.last_modified_by_id\n\n if with_stats:\n d['retrieved_at'] = self.retrieved_at\n d['runtime'] = self.runtime\n\n if with_visualizations:\n d['visualizations'] = [vis.to_dict(with_query=False)\n for vis in self.visualizations]\n\n return d\n\n def archive(self):\n self.is_archived = True\n self.schedule = None\n\n for vis in self.visualizations:\n for w in vis.widgets:\n w.delete_instance()\n\n self.save()\n\n @classmethod\n def all_queries(cls, groups):\n q = Query.select(Query, User, QueryResult.retrieved_at, QueryResult.runtime)\\\n .join(QueryResult, join_type=peewee.JOIN_LEFT_OUTER)\\\n .switch(Query).join(User)\\\n .join(DataSourceGroup, on=(Query.data_source==DataSourceGroup.data_source))\\\n .where(Query.is_archived==False)\\\n .where(DataSourceGroup.group << groups)\\\n .group_by(Query.id, User.id, QueryResult.id, QueryResult.retrieved_at, QueryResult.runtime)\\\n .order_by(cls.created_at.desc())\n\n return q\n\n @classmethod\n def outdated_queries(cls):\n queries = cls.select(cls, QueryResult.retrieved_at, DataSource)\\\n .join(QueryResult)\\\n .switch(Query).join(DataSource)\\\n .where(cls.schedule != None)\n\n now = utils.utcnow()\n outdated_queries = {}\n for query in queries:\n if should_schedule_next(query.latest_query_data.retrieved_at, now, query.schedule):\n key = \"{}:{}\".format(query.query_hash, query.data_source.id)\n outdated_queries[key] = query\n\n return outdated_queries.values()\n\n @classmethod\n def search(cls, term, groups):\n # TODO: This is very naive implementation of search, to be replaced with PostgreSQL full-text-search solution.\n\n where = (cls.name**u\"%{}%\".format(term)) | (cls.description**u\"%{}%\".format(term))\n\n if term.isdigit():\n where |= cls.id == term\n\n where &= cls.is_archived == False\n\n query_ids = cls.select(peewee.fn.Distinct(cls.id))\\\n .join(DataSourceGroup, on=(Query.data_source==DataSourceGroup.data_source)) \\\n .where(where) \\\n .where(DataSourceGroup.group << groups)\n\n return cls.select().where(cls.id << query_ids)\n\n\n @classmethod\n def recent(cls, groups, user_id=None, limit=20):\n query = cls.select(Query, User).where(Event.created_at > peewee.SQL(\"current_date - 7\")).\\\n join(Event, on=(Query.id == Event.object_id.cast('integer'))). \\\n join(DataSourceGroup, on=(Query.data_source==DataSourceGroup.data_source)). \\\n switch(Query).join(User).\\\n where(Event.action << ('edit', 'execute', 'edit_name', 'edit_description', 'view_source')).\\\n where(~(Event.object_id >> None)).\\\n where(Event.object_type == 'query'). \\\n where(DataSourceGroup.group << groups).\\\n where(cls.is_archived == False).\\\n group_by(Event.object_id, Query.id, User.id).\\\n order_by(peewee.SQL(\"count(0) desc\"))\n\n if user_id:\n query = query.where(Event.user == user_id)\n\n query = query.limit(limit)\n\n return query\n\n def pre_save(self, created):\n super(Query, self).pre_save(created)\n self.query_hash = utils.gen_query_hash(self.query)\n self._set_api_key()\n\n if self.last_modified_by is None:\n self.last_modified_by = self.user\n\n def post_save(self, created):\n if created:\n self._create_default_visualizations()\n\n def _create_default_visualizations(self):\n table_visualization = Visualization(query=self, name=\"Table\",\n description='',\n type=\"TABLE\", options=\"{}\")\n table_visualization.save()\n\n def _set_api_key(self):\n if not self.api_key:\n self.api_key = hashlib.sha1(\n u''.join((str(time.time()), self.query, str(self.user_id), self.name)).encode('utf-8')).hexdigest()\n\n @property\n def runtime(self):\n return self.latest_query_data.runtime\n\n @property\n def retrieved_at(self):\n return self.latest_query_data.retrieved_at\n\n @property\n def groups(self):\n if self.data_source is None:\n return {}\n\n return self.data_source.groups\n\n def __unicode__(self):\n return unicode(self.id)\n\n\nclass Alert(ModelTimestampsMixin, BaseModel):\n UNKNOWN_STATE = 'unknown'\n OK_STATE = 'ok'\n TRIGGERED_STATE = 'triggered'\n\n id = peewee.PrimaryKeyField()\n name = peewee.CharField()\n query = peewee.ForeignKeyField(Query, related_name='alerts')\n user = peewee.ForeignKeyField(User, related_name='alerts')\n options = JSONField()\n state = peewee.CharField(default=UNKNOWN_STATE)\n last_triggered_at = DateTimeTZField(null=True)\n rearm = peewee.IntegerField(null=True)\n\n class Meta:\n db_table = 'alerts'\n\n @classmethod\n def all(cls, groups):\n return cls.select(Alert, User, Query)\\\n .join(Query)\\\n .join(DataSourceGroup, on=(Query.data_source==DataSourceGroup.data_source))\\\n .where(DataSourceGroup.group << groups)\\\n .switch(Alert)\\\n .join(User)\\\n .group_by(Alert, User, Query)\n\n @classmethod\n def get_by_id_and_org(cls, id, org):\n return cls.select(Alert, User, Query).join(Query).switch(Alert).join(User).where(cls.id==id, Query.org==org).get()\n\n def to_dict(self, full=True):\n d = {\n 'id': self.id,\n 'name': self.name,\n 'options': self.options,\n 'state': self.state,\n 'last_triggered_at': self.last_triggered_at,\n 'updated_at': self.updated_at,\n 'created_at': self.created_at,\n 'rearm': self.rearm\n }\n\n if full:\n d['query'] = self.query.to_dict()\n d['user'] = self.user.to_dict()\n else:\n d['query_id'] = self.query_id\n d['user_id'] = self.user_id\n\n return d\n\n def evaluate(self):\n data = json.loads(self.query.latest_query_data.data)\n # todo: safe guard for empty\n value = data['rows'][0][self.options['column']]\n op = self.options['op']\n\n if op == 'greater than' and value > self.options['value']:\n new_state = self.TRIGGERED_STATE\n elif op == 'less than' and value < self.options['value']:\n new_state = self.TRIGGERED_STATE\n elif op == 'equals' and value == self.options['value']:\n new_state = self.TRIGGERED_STATE\n else:\n new_state = self.OK_STATE\n\n return new_state\n\n def subscribers(self):\n return User.select().join(AlertSubscription).where(AlertSubscription.alert==self)\n\n @property\n def groups(self):\n return self.query.groups\n\n\nclass Dashboard(ModelTimestampsMixin, BaseModel, BelongsToOrgMixin):\n id = peewee.PrimaryKeyField()\n org = peewee.ForeignKeyField(Organization, related_name=\"dashboards\")\n slug = peewee.CharField(max_length=140, index=True)\n name = peewee.CharField(max_length=100)\n user = peewee.ForeignKeyField(User)\n layout = peewee.TextField()\n dashboard_filters_enabled = peewee.BooleanField(default=False)\n is_archived = peewee.BooleanField(default=False, index=True)\n\n class Meta:\n db_table = 'dashboards'\n\n def to_dict(self, with_widgets=False, user=None):\n layout = json.loads(self.layout)\n\n if with_widgets:\n widget_list = Widget.select(Widget, Visualization, Query, User)\\\n .where(Widget.dashboard == self.id)\\\n .join(Visualization, join_type=peewee.JOIN_LEFT_OUTER)\\\n .join(Query, join_type=peewee.JOIN_LEFT_OUTER)\\\n .join(User, join_type=peewee.JOIN_LEFT_OUTER)\n\n widgets = {}\n\n for w in widget_list:\n if w.visualization_id is None:\n widgets[w.id] = w.to_dict()\n elif user and has_access(w.visualization.query.groups, user, view_only):\n widgets[w.id] = w.to_dict()\n else:\n widgets[w.id] = project(w.to_dict(),\n ('id', 'width', 'dashboard_id', 'options', 'created_at', 'updated_at'))\n widgets[w.id]['restricted'] = True\n\n # The following is a workaround for cases when the widget object gets deleted without the dashboard layout\n # updated. This happens for users with old databases that didn't have a foreign key relationship between\n # visualizations and widgets.\n # It's temporary until better solution is implemented (we probably should move the position information\n # to the widget).\n widgets_layout = []\n for row in layout:\n new_row = []\n for widget_id in row:\n widget = widgets.get(widget_id, None)\n if widget:\n new_row.append(widget)\n\n widgets_layout.append(new_row)\n else:\n widgets_layout = None\n\n return {\n 'id': self.id,\n 'slug': self.slug,\n 'name': self.name,\n 'user_id': self.user_id,\n 'layout': layout,\n 'dashboard_filters_enabled': self.dashboard_filters_enabled,\n 'widgets': widgets_layout,\n 'is_archived': self.is_archived,\n 'updated_at': self.updated_at,\n 'created_at': self.created_at\n }\n\n @classmethod\n def all(cls, org, groups, user_id):\n query = cls.select().\\\n join(Widget, peewee.JOIN_LEFT_OUTER, on=(Dashboard.id == Widget.dashboard)). \\\n join(Visualization, peewee.JOIN_LEFT_OUTER, on=(Widget.visualization == Visualization.id)). \\\n join(Query, peewee.JOIN_LEFT_OUTER, on=(Visualization.query == Query.id)). \\\n join(DataSourceGroup, peewee.JOIN_LEFT_OUTER, on=(Query.data_source == DataSourceGroup.data_source)). \\\n where(Dashboard.is_archived == False). \\\n where((DataSourceGroup.group << groups) |\n (Dashboard.user == user_id) |\n (~(Widget.dashboard >> None) & (Widget.visualization >> None))). \\\n where(Dashboard.org == org). \\\n group_by(Dashboard.id)\n\n return query\n\n @classmethod\n def recent(cls, org, groups, user_id, for_user=False, limit=20):\n query = cls.select().where(Event.created_at > peewee.SQL(\"current_date - 7\")). \\\n join(Event, peewee.JOIN_LEFT_OUTER, on=(Dashboard.id == Event.object_id.cast('integer'))). \\\n join(Widget, peewee.JOIN_LEFT_OUTER, on=(Dashboard.id == Widget.dashboard)). \\\n join(Visualization, peewee.JOIN_LEFT_OUTER, on=(Widget.visualization == Visualization.id)). \\\n join(Query, peewee.JOIN_LEFT_OUTER, on=(Visualization.query == Query.id)). \\\n join(DataSourceGroup, peewee.JOIN_LEFT_OUTER, on=(Query.data_source == DataSourceGroup.data_source)). \\\n where(Event.action << ('edit', 'view')). \\\n where(~(Event.object_id >> None)). \\\n where(Event.object_type == 'dashboard'). \\\n where(Dashboard.is_archived == False). \\\n where(Dashboard.org == org). \\\n where((DataSourceGroup.group << groups) |\n (Dashboard.user == user_id) |\n (~(Widget.dashboard >> None) & (Widget.visualization >> None))). \\\n group_by(Event.object_id, Dashboard.id). \\\n order_by(peewee.SQL(\"count(0) desc\"))\n\n if for_user:\n query = query.where(Event.user == user_id)\n\n query = query.limit(limit)\n\n return query\n\n @classmethod\n def get_by_slug_and_org(cls, slug, org):\n return cls.get(cls.slug == slug, cls.org==org)\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = utils.slugify(self.name)\n\n tries = 1\n while self.select().where(Dashboard.slug == self.slug).first() is not None:\n self.slug = utils.slugify(self.name) + \"_{0}\".format(tries)\n tries += 1\n\n super(Dashboard, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u\"%s=%s\" % (self.id, self.name)\n\n\nclass Visualization(ModelTimestampsMixin, BaseModel):\n id = peewee.PrimaryKeyField()\n type = peewee.CharField(max_length=100)\n query = peewee.ForeignKeyField(Query, related_name='visualizations')\n name = peewee.CharField(max_length=255)\n description = peewee.CharField(max_length=4096, null=True)\n options = peewee.TextField()\n\n class Meta:\n db_table = 'visualizations'\n\n def to_dict(self, with_query=True):\n d = {\n 'id': self.id,\n 'type': self.type,\n 'name': self.name,\n 'description': self.description,\n 'options': json.loads(self.options),\n 'updated_at': self.updated_at,\n 'created_at': self.created_at\n }\n\n if with_query:\n d['query'] = self.query.to_dict()\n\n return d\n\n @classmethod\n def get_by_id_and_org(cls, visualization_id, org):\n return cls.select(Visualization, Query).join(Query).where(cls.id == visualization_id,\n Query.org == org).get()\n\n def __unicode__(self):\n return u\"%s %s\" % (self.id, self.type)\n\n\nclass Widget(ModelTimestampsMixin, BaseModel):\n id = peewee.PrimaryKeyField()\n visualization = peewee.ForeignKeyField(Visualization, related_name='widgets', null=True)\n text = peewee.TextField(null=True)\n width = peewee.IntegerField()\n options = peewee.TextField()\n dashboard = peewee.ForeignKeyField(Dashboard, related_name='widgets', index=True)\n\n # unused; kept for backward compatability:\n type = peewee.CharField(max_length=100, null=True)\n query_id = peewee.IntegerField(null=True)\n\n class Meta:\n db_table = 'widgets'\n\n def to_dict(self):\n d = {\n 'id': self.id,\n 'width': self.width,\n 'options': json.loads(self.options),\n 'dashboard_id': self.dashboard_id,\n 'text': self.text,\n 'updated_at': self.updated_at,\n 'created_at': self.created_at\n }\n\n if self.visualization and self.visualization.id:\n d['visualization'] = self.visualization.to_dict()\n\n return d\n\n def __unicode__(self):\n return u\"%s\" % self.id\n\n @classmethod\n def get_by_id_and_org(cls, widget_id, org):\n return cls.select(cls, Dashboard).join(Dashboard).where(cls.id == widget_id, Dashboard.org == org).get()\n\n def delete_instance(self, *args, **kwargs):\n layout = json.loads(self.dashboard.layout)\n layout = map(lambda row: filter(lambda w: w != self.id, row), layout)\n layout = filter(lambda row: len(row) > 0, layout)\n self.dashboard.layout = json.dumps(layout)\n self.dashboard.save()\n super(Widget, self).delete_instance(*args, **kwargs)\n\n\nclass Event(BaseModel):\n org = peewee.ForeignKeyField(Organization, related_name=\"events\")\n user = peewee.ForeignKeyField(User, related_name=\"events\", null=True)\n action = peewee.CharField()\n object_type = peewee.CharField()\n object_id = peewee.CharField(null=True)\n additional_properties = peewee.TextField(null=True)\n created_at = DateTimeTZField(default=datetime.datetime.now)\n\n class Meta:\n db_table = 'events'\n\n def __unicode__(self):\n return u\"%s,%s,%s,%s\" % (self.user_id, self.action, self.object_type, self.object_id)\n\n @classmethod\n def record(cls, event):\n org = event.pop('org_id')\n user = event.pop('user_id', None)\n action = event.pop('action')\n object_type = event.pop('object_type')\n object_id = event.pop('object_id', None)\n\n created_at = datetime.datetime.utcfromtimestamp(event.pop('timestamp'))\n additional_properties = json.dumps(event)\n\n event = cls.create(org=org, user=user, action=action, object_type=object_type, object_id=object_id,\n additional_properties=additional_properties, created_at=created_at)\n\n return event\n\n\nclass ApiKey(ModelTimestampsMixin, BaseModel):\n org = peewee.ForeignKeyField(Organization)\n api_key = peewee.CharField(index=True, default=lambda: generate_token(40))\n active = peewee.BooleanField(default=True)\n object_type = peewee.CharField()\n object_id = peewee.IntegerField()\n object = GFKField('object_type', 'object_id')\n created_by = peewee.ForeignKeyField(User, null=True)\n\n class Meta:\n db_table = 'api_keys'\n indexes = (\n (('object_type', 'object_id'), False),\n )\n\n @classmethod\n def get_by_api_key(cls, api_key):\n return cls.get(cls.api_key==api_key, cls.active==True)\n\n @classmethod\n def get_by_object(cls, object):\n return cls.select().where(cls.object_type==object._meta.db_table, cls.object_id==object.id, cls.active==True).first()\n\n @classmethod\n def create_for_object(cls, object, user):\n return cls.create(org=user.org, object=object, created_by=user)\n\n\nclass NotificationDestination(BelongsToOrgMixin, BaseModel):\n\n id = peewee.PrimaryKeyField()\n org = peewee.ForeignKeyField(Organization, related_name=\"notification_destinations\")\n user = peewee.ForeignKeyField(User, related_name=\"notification_destinations\")\n name = peewee.CharField()\n type = peewee.CharField()\n options = ConfigurationField()\n created_at = DateTimeTZField(default=datetime.datetime.now)\n\n class Meta:\n db_table = 'notification_destinations'\n\n indexes = (\n (('org', 'name'), True),\n )\n\n def to_dict(self, all=False):\n d = {\n 'id': self.id,\n 'name': self.name,\n 'type': self.type,\n 'icon': self.destination.icon()\n }\n\n if all:\n schema = get_configuration_schema_for_destination_type(self.type)\n self.options.set_schema(schema)\n d['options'] = self.options.to_dict(mask_secrets=True)\n\n return d\n\n def __unicode__(self):\n return self.name\n\n @property\n def destination(self):\n return get_destination(self.type, self.options)\n\n @classmethod\n def all(cls, org):\n notification_destinations = cls.select().where(cls.org==org).order_by(cls.id.asc())\n\n return notification_destinations\n\n def notify(self, alert, query, user, new_state, app, host):\n schema = get_configuration_schema_for_destination_type(self.type)\n self.options.set_schema(schema)\n return self.destination.notify(alert, query, user, new_state,\n app, host, self.options)\n\n\nclass AlertSubscription(ModelTimestampsMixin, BaseModel):\n user = peewee.ForeignKeyField(User)\n destination = peewee.ForeignKeyField(NotificationDestination, null=True)\n alert = peewee.ForeignKeyField(Alert, related_name=\"subscriptions\")\n\n class Meta:\n db_table = 'alert_subscriptions'\n\n indexes = (\n (('destination', 'alert'), True),\n )\n\n def to_dict(self):\n d = {\n 'id': self.id,\n 'user': self.user.to_dict(),\n 'alert_id': self.alert_id\n }\n\n if self.destination:\n d['destination'] = self.destination.to_dict()\n\n return d\n\n @classmethod\n def all(cls, alert_id):\n return AlertSubscription.select(AlertSubscription, User).join(User).where(AlertSubscription.alert==alert_id)\n\n def notify(self, alert, query, user, new_state, app, host):\n if self.destination:\n return self.destination.notify(alert, query, user, new_state,\n app, host)\n else:\n # User email subscription, so create an email destination object\n config = {'email': self.user.email}\n schema = get_configuration_schema_for_destination_type('email')\n options = ConfigurationContainer(json.dumps(config), schema)\n destination = get_destination('email', options)\n return destination.notify(alert, query, user, new_state,\n app, host, options)\n\n\nall_models = (Organization, Group, DataSource, DataSourceGroup, User, QueryResult, Query, Alert, Dashboard, Visualization, Widget, Event, NotificationDestination, AlertSubscription, ApiKey)\n\n\ndef init_db():\n default_org = Organization.create(name=\"Default\", slug='default', settings={})\n admin_group = Group.create(name='admin', permissions=['admin', 'super_admin'], org=default_org, type=Group.BUILTIN_GROUP)\n default_group = Group.create(name='default', permissions=Group.DEFAULT_PERMISSIONS, org=default_org, type=Group.BUILTIN_GROUP)\n\n return default_org, admin_group, default_group\n\n\ndef create_db(create_tables, drop_tables):\n db.connect_db()\n\n for model in all_models:\n if drop_tables and model.table_exists():\n model.drop_table(cascade=True)\n\n if create_tables and not model.table_exists():\n model.create_table()\n\n db.close_db(None)\n", "path": "redash/models.py" } ]
[ { "content": "import json\nfrom flask_login import UserMixin, AnonymousUserMixin\nimport hashlib\nimport logging\nimport os\nimport threading\nimport time\nimport datetime\nimport itertools\nfrom funcy import project\n\nimport peewee\nfrom passlib.apps import custom_app_context as pwd_context\nfrom playhouse.gfk import GFKField, BaseModel\nfrom playhouse.postgres_ext import ArrayField, DateTimeTZField\nfrom permissions import has_access, view_only\n\nfrom redash import utils, settings, redis_connection\nfrom redash.query_runner import get_query_runner, get_configuration_schema_for_query_runner_type\nfrom redash.destinations import get_destination, get_configuration_schema_for_destination_type\nfrom redash.metrics.database import MeteredPostgresqlExtDatabase, MeteredModel\nfrom redash.utils import generate_token\nfrom redash.utils.configuration import ConfigurationContainer\n\n\nclass Database(object):\n def __init__(self):\n self.database_config = dict(settings.DATABASE_CONFIG)\n self.database_config['register_hstore'] = False\n self.database_name = self.database_config.pop('name')\n self.database = MeteredPostgresqlExtDatabase(self.database_name, **self.database_config)\n self.app = None\n self.pid = os.getpid()\n\n def init_app(self, app):\n self.app = app\n self.register_handlers()\n\n def connect_db(self):\n self._check_pid()\n self.database.reset_metrics()\n self.database.connect()\n\n def close_db(self, exc):\n self._check_pid()\n if not self.database.is_closed():\n self.database.close()\n\n def _check_pid(self):\n current_pid = os.getpid()\n if self.pid != current_pid:\n logging.info(\"New pid detected (%d!=%d); resetting database lock.\", self.pid, current_pid)\n self.pid = os.getpid()\n self.database._conn_lock = threading.Lock()\n\n def register_handlers(self):\n self.app.before_request(self.connect_db)\n self.app.teardown_request(self.close_db)\n\n\ndb = Database()\n\n\n# Support for cast operation on database fields\[email protected]()\ndef cast(self, as_type):\n return peewee.Expression(self, '::', peewee.SQL(as_type))\n\n\nclass JSONField(peewee.TextField):\n def db_value(self, value):\n return json.dumps(value)\n\n def python_value(self, value):\n if not value:\n return value\n return json.loads(value)\n\n\nclass BaseModel(MeteredModel):\n class Meta:\n database = db.database\n\n @classmethod\n def get_by_id(cls, model_id):\n return cls.get(cls.id == model_id)\n\n def pre_save(self, created):\n pass\n\n def post_save(self, created):\n # Handler for post_save operations. Overriding if needed.\n pass\n\n def save(self, *args, **kwargs):\n pk_value = self._get_pk_value()\n created = kwargs.get('force_insert', False) or not bool(pk_value)\n self.pre_save(created)\n super(BaseModel, self).save(*args, **kwargs)\n self.post_save(created)\n\n def update_instance(self, **kwargs):\n for k, v in kwargs.items():\n # setattr(model_instance, field_name, field_obj.python_value(value))\n setattr(self, k, v)\n\n # We have to run pre-save before calculating dirty_fields. We end up running it twice,\n # but pre_save calls should be very quick so it's not big of an issue.\n # An alternative can be to recalculate dirty_fields, but it felt more error prone.\n self.pre_save(False)\n\n self.save(only=self.dirty_fields)\n\n\nclass ModelTimestampsMixin(BaseModel):\n updated_at = DateTimeTZField(default=datetime.datetime.now)\n created_at = DateTimeTZField(default=datetime.datetime.now)\n\n def pre_save(self, created):\n super(ModelTimestampsMixin, self).pre_save(created)\n\n self.updated_at = datetime.datetime.now()\n\n\nclass BelongsToOrgMixin(object):\n @classmethod\n def get_by_id_and_org(cls, object_id, org):\n return cls.get(cls.id == object_id, cls.org == org)\n\n\nclass PermissionsCheckMixin(object):\n def has_permission(self, permission):\n return self.has_permissions((permission,))\n\n def has_permissions(self, permissions):\n has_permissions = reduce(lambda a, b: a and b,\n map(lambda permission: permission in self.permissions,\n permissions),\n True)\n\n return has_permissions\n\n\nclass AnonymousUser(AnonymousUserMixin, PermissionsCheckMixin):\n @property\n def permissions(self):\n return []\n\n\nclass ApiUser(UserMixin, PermissionsCheckMixin):\n def __init__(self, api_key, org, groups, name=None):\n self.object = None\n if isinstance(api_key, basestring):\n self.id = api_key\n self.name = name\n else:\n self.id = api_key.api_key\n self.name = \"ApiKey: {}\".format(api_key.id)\n self.object = api_key.object\n self.groups = groups\n self.org = org\n\n def __repr__(self):\n return u\"<{}>\".format(self.name)\n\n @property\n def permissions(self):\n return ['view_query']\n\n\nclass Organization(ModelTimestampsMixin, BaseModel):\n SETTING_GOOGLE_APPS_DOMAINS = 'google_apps_domains'\n SETTING_IS_PUBLIC = \"is_public\"\n\n id = peewee.PrimaryKeyField()\n name = peewee.CharField()\n slug = peewee.CharField(unique=True)\n settings = JSONField()\n\n class Meta:\n db_table = 'organizations'\n\n def __repr__(self):\n return u\"<Organization: {}, {}>\".format(self.id, self.name)\n\n # When Organization is used with LocalProxy (like the current_org helper), peewee doesn't recognize it as a Model\n # and might call int() on it. This method makes sure it works.\n def __int__(self):\n return self.id\n\n @classmethod\n def get_by_slug(cls, slug):\n return cls.get(cls.slug == slug)\n\n @property\n def default_group(self):\n return self.groups.where(Group.name=='default', Group.type==Group.BUILTIN_GROUP).first()\n\n @property\n def google_apps_domains(self):\n return self.settings.get(self.SETTING_GOOGLE_APPS_DOMAINS, [])\n\n @property\n def is_public(self):\n return self.settings.get(self.SETTING_IS_PUBLIC, False)\n\n @property\n def admin_group(self):\n return self.groups.where(Group.name=='admin', Group.type==Group.BUILTIN_GROUP).first()\n\n def has_user(self, email):\n return self.users.where(User.email==email).count() == 1\n\n\nclass Group(BaseModel, BelongsToOrgMixin):\n DEFAULT_PERMISSIONS = ['create_dashboard', 'create_query', 'edit_dashboard', 'edit_query',\n 'view_query', 'view_source', 'execute_query', 'list_users', 'schedule_query',\n 'list_dashboards', 'list_alerts', 'list_data_sources']\n\n BUILTIN_GROUP = 'builtin'\n REGULAR_GROUP = 'regular'\n\n id = peewee.PrimaryKeyField()\n org = peewee.ForeignKeyField(Organization, related_name=\"groups\")\n type = peewee.CharField(default=REGULAR_GROUP)\n name = peewee.CharField(max_length=100)\n permissions = ArrayField(peewee.CharField, default=DEFAULT_PERMISSIONS)\n created_at = DateTimeTZField(default=datetime.datetime.now)\n\n class Meta:\n db_table = 'groups'\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'permissions': self.permissions,\n 'type': self.type,\n 'created_at': self.created_at\n }\n\n @classmethod\n def all(cls, org):\n return cls.select().where(cls.org==org)\n\n @classmethod\n def members(cls, group_id):\n return User.select().where(peewee.SQL(\"%s = ANY(groups)\", group_id))\n\n @classmethod\n def find_by_name(cls, org, group_names):\n result = cls.select().where(cls.org == org, cls.name << group_names)\n return list(result)\n\n def __unicode__(self):\n return unicode(self.id)\n\n\nclass User(ModelTimestampsMixin, BaseModel, BelongsToOrgMixin, UserMixin, PermissionsCheckMixin):\n id = peewee.PrimaryKeyField()\n org = peewee.ForeignKeyField(Organization, related_name=\"users\")\n name = peewee.CharField(max_length=320)\n email = peewee.CharField(max_length=320)\n password_hash = peewee.CharField(max_length=128, null=True)\n groups = ArrayField(peewee.IntegerField, null=True)\n api_key = peewee.CharField(max_length=40, unique=True)\n\n class Meta:\n db_table = 'users'\n\n indexes = (\n (('org', 'email'), True),\n )\n\n def __init__(self, *args, **kwargs):\n super(User, self).__init__(*args, **kwargs)\n\n def to_dict(self, with_api_key=False):\n d = {\n 'id': self.id,\n 'name': self.name,\n 'email': self.email,\n 'gravatar_url': self.gravatar_url,\n 'groups': self.groups,\n 'updated_at': self.updated_at,\n 'created_at': self.created_at\n }\n\n if self.password_hash is None:\n d['auth_type'] = 'external'\n else:\n d['auth_type'] = 'password'\n\n if with_api_key:\n d['api_key'] = self.api_key\n\n return d\n\n def pre_save(self, created):\n super(User, self).pre_save(created)\n\n if not self.api_key:\n self.api_key = generate_token(40)\n\n @property\n def gravatar_url(self):\n email_md5 = hashlib.md5(self.email.lower()).hexdigest()\n return \"https://www.gravatar.com/avatar/%s?s=40\" % email_md5\n\n @property\n def permissions(self):\n # TODO: this should be cached.\n return list(itertools.chain(*[g.permissions for g in\n Group.select().where(Group.id << self.groups)]))\n\n @classmethod\n def get_by_email_and_org(cls, email, org):\n return cls.get(cls.email == email, cls.org == org)\n\n @classmethod\n def get_by_api_key_and_org(cls, api_key, org):\n return cls.get(cls.api_key == api_key, cls.org == org)\n\n @classmethod\n def all(cls, org):\n return cls.select().where(cls.org == org)\n\n @classmethod\n def find_by_email(cls, email):\n return cls.select().where(cls.email == email)\n\n def __unicode__(self):\n return u'%s (%s)' % (self.name, self.email)\n\n def hash_password(self, password):\n self.password_hash = pwd_context.encrypt(password)\n\n def verify_password(self, password):\n return self.password_hash and pwd_context.verify(password, self.password_hash)\n\n def update_group_assignments(self, group_names):\n groups = Group.find_by_name(self.org, group_names)\n groups.append(self.org.default_group)\n self.groups = map(lambda g: g.id, groups)\n self.save()\n\n\nclass ConfigurationField(peewee.TextField):\n def db_value(self, value):\n return value.to_json()\n\n def python_value(self, value):\n return ConfigurationContainer.from_json(value)\n\n\nclass DataSource(BelongsToOrgMixin, BaseModel):\n id = peewee.PrimaryKeyField()\n org = peewee.ForeignKeyField(Organization, related_name=\"data_sources\")\n name = peewee.CharField()\n type = peewee.CharField()\n options = ConfigurationField()\n queue_name = peewee.CharField(default=\"queries\")\n scheduled_queue_name = peewee.CharField(default=\"scheduled_queries\")\n created_at = DateTimeTZField(default=datetime.datetime.now)\n\n class Meta:\n db_table = 'data_sources'\n\n indexes = (\n (('org', 'name'), True),\n )\n\n def to_dict(self, all=False, with_permissions=False):\n d = {\n 'id': self.id,\n 'name': self.name,\n 'type': self.type,\n 'syntax': self.query_runner.syntax,\n 'paused': self.paused,\n 'pause_reason': self.pause_reason\n }\n\n if all:\n schema = get_configuration_schema_for_query_runner_type(self.type)\n self.options.set_schema(schema)\n d['options'] = self.options.to_dict(mask_secrets=True)\n d['queue_name'] = self.queue_name\n d['scheduled_queue_name'] = self.scheduled_queue_name\n d['groups'] = self.groups\n\n if with_permissions:\n d['view_only'] = self.data_source_groups.view_only\n\n return d\n\n def __unicode__(self):\n return self.name\n\n @classmethod\n def create_with_group(cls, *args, **kwargs):\n data_source = cls.create(*args, **kwargs)\n DataSourceGroup.create(data_source=data_source, group=data_source.org.default_group)\n return data_source\n\n def get_schema(self, refresh=False):\n key = \"data_source:schema:{}\".format(self.id)\n\n cache = None\n if not refresh:\n cache = redis_connection.get(key)\n\n if cache is None:\n query_runner = self.query_runner\n schema = sorted(query_runner.get_schema(get_stats=refresh), key=lambda t: t['name'])\n\n redis_connection.set(key, json.dumps(schema))\n else:\n schema = json.loads(cache)\n\n return schema\n\n def _pause_key(self):\n return 'ds:{}:pause'.format(self.id)\n\n @property\n def paused(self):\n return redis_connection.exists(self._pause_key())\n\n @property\n def pause_reason(self):\n return redis_connection.get(self._pause_key())\n\n def pause(self, reason=None):\n redis_connection.set(self._pause_key(), reason)\n\n def resume(self):\n redis_connection.delete(self._pause_key())\n\n def add_group(self, group, view_only=False):\n dsg = DataSourceGroup.create(group=group, data_source=self, view_only=view_only)\n setattr(self, 'data_source_groups', dsg)\n\n def remove_group(self, group):\n DataSourceGroup.delete().where(DataSourceGroup.group==group, DataSourceGroup.data_source==self).execute()\n\n def update_group_permission(self, group, view_only):\n dsg = DataSourceGroup.get(DataSourceGroup.group==group, DataSourceGroup.data_source==self)\n dsg.view_only = view_only\n dsg.save()\n setattr(self, 'data_source_groups', dsg)\n\n @property\n def query_runner(self):\n return get_query_runner(self.type, self.options)\n\n @classmethod\n def all(cls, org, groups=None):\n data_sources = cls.select().where(cls.org==org).order_by(cls.id.asc())\n\n if groups:\n data_sources = data_sources.join(DataSourceGroup).where(DataSourceGroup.group << groups)\n\n return data_sources\n\n @property\n def groups(self):\n groups = DataSourceGroup.select().where(DataSourceGroup.data_source==self)\n return dict(map(lambda g: (g.group_id, g.view_only), groups))\n\n\nclass DataSourceGroup(BaseModel):\n data_source = peewee.ForeignKeyField(DataSource)\n group = peewee.ForeignKeyField(Group, related_name=\"data_sources\")\n view_only = peewee.BooleanField(default=False)\n\n class Meta:\n db_table = \"data_source_groups\"\n\n\nclass QueryResult(BaseModel, BelongsToOrgMixin):\n id = peewee.PrimaryKeyField()\n org = peewee.ForeignKeyField(Organization)\n data_source = peewee.ForeignKeyField(DataSource)\n query_hash = peewee.CharField(max_length=32, index=True)\n query = peewee.TextField()\n data = peewee.TextField()\n runtime = peewee.FloatField()\n retrieved_at = DateTimeTZField()\n\n class Meta:\n db_table = 'query_results'\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'query_hash': self.query_hash,\n 'query': self.query,\n 'data': json.loads(self.data),\n 'data_source_id': self.data_source_id,\n 'runtime': self.runtime,\n 'retrieved_at': self.retrieved_at\n }\n\n @classmethod\n def unused(cls, days=7):\n age_threshold = datetime.datetime.now() - datetime.timedelta(days=days)\n\n unused_results = cls.select().where(Query.id == None, cls.retrieved_at < age_threshold)\\\n .join(Query, join_type=peewee.JOIN_LEFT_OUTER)\n\n return unused_results\n\n @classmethod\n def get_latest(cls, data_source, query, max_age=0):\n query_hash = utils.gen_query_hash(query)\n\n if max_age == -1:\n query = cls.select().where(cls.query_hash == query_hash,\n cls.data_source == data_source).order_by(cls.retrieved_at.desc())\n else:\n query = cls.select().where(cls.query_hash == query_hash, cls.data_source == data_source,\n peewee.SQL(\"retrieved_at + interval '%s second' >= now() at time zone 'utc'\",\n max_age)).order_by(cls.retrieved_at.desc())\n\n return query.first()\n\n @classmethod\n def store_result(cls, org_id, data_source_id, query_hash, query, data, run_time, retrieved_at):\n query_result = cls.create(org=org_id,\n query_hash=query_hash,\n query=query,\n runtime=run_time,\n data_source=data_source_id,\n retrieved_at=retrieved_at,\n data=data)\n\n logging.info(\"Inserted query (%s) data; id=%s\", query_hash, query_result.id)\n\n sql = \"UPDATE queries SET latest_query_data_id = %s WHERE query_hash = %s AND data_source_id = %s RETURNING id\"\n query_ids = [row[0] for row in db.database.execute_sql(sql, params=(query_result.id, query_hash, data_source_id))]\n\n # TODO: when peewee with update & returning support is released, we can get back to using this code:\n # updated_count = Query.update(latest_query_data=query_result).\\\n # where(Query.query_hash==query_hash, Query.data_source==data_source_id).\\\n # execute()\n\n logging.info(\"Updated %s queries with result (%s).\", len(query_ids), query_hash)\n\n return query_result, query_ids\n\n def __unicode__(self):\n return u\"%d | %s | %s\" % (self.id, self.query_hash, self.retrieved_at)\n\n @property\n def groups(self):\n return self.data_source.groups\n\n\ndef should_schedule_next(previous_iteration, now, schedule):\n if schedule.isdigit():\n ttl = int(schedule)\n next_iteration = previous_iteration + datetime.timedelta(seconds=ttl)\n else:\n hour, minute = schedule.split(':')\n hour, minute = int(hour), int(minute)\n\n # The following logic is needed for cases like the following:\n # - The query scheduled to run at 23:59.\n # - The scheduler wakes up at 00:01.\n # - Using naive implementation of comparing timestamps, it will skip the execution.\n normalized_previous_iteration = previous_iteration.replace(hour=hour, minute=minute)\n if normalized_previous_iteration > previous_iteration:\n previous_iteration = normalized_previous_iteration - datetime.timedelta(days=1)\n\n next_iteration = (previous_iteration + datetime.timedelta(days=1)).replace(hour=hour, minute=minute)\n\n return now > next_iteration\n\n\nclass Query(ModelTimestampsMixin, BaseModel, BelongsToOrgMixin):\n id = peewee.PrimaryKeyField()\n org = peewee.ForeignKeyField(Organization, related_name=\"queries\")\n data_source = peewee.ForeignKeyField(DataSource, null=True)\n latest_query_data = peewee.ForeignKeyField(QueryResult, null=True)\n name = peewee.CharField(max_length=255)\n description = peewee.CharField(max_length=4096, null=True)\n query = peewee.TextField()\n query_hash = peewee.CharField(max_length=32)\n api_key = peewee.CharField(max_length=40)\n user = peewee.ForeignKeyField(User)\n last_modified_by = peewee.ForeignKeyField(User, null=True, related_name=\"modified_queries\")\n is_archived = peewee.BooleanField(default=False, index=True)\n schedule = peewee.CharField(max_length=10, null=True)\n options = JSONField(default={})\n\n class Meta:\n db_table = 'queries'\n\n def to_dict(self, with_stats=False, with_visualizations=False, with_user=True, with_last_modified_by=True):\n d = {\n 'id': self.id,\n 'latest_query_data_id': self._data.get('latest_query_data', None),\n 'name': self.name,\n 'description': self.description,\n 'query': self.query,\n 'query_hash': self.query_hash,\n 'schedule': self.schedule,\n 'api_key': self.api_key,\n 'is_archived': self.is_archived,\n 'updated_at': self.updated_at,\n 'created_at': self.created_at,\n 'data_source_id': self.data_source_id,\n 'options': self.options\n }\n\n if with_user:\n d['user'] = self.user.to_dict()\n else:\n d['user_id'] = self.user_id\n\n if with_last_modified_by:\n d['last_modified_by'] = self.last_modified_by.to_dict() if self.last_modified_by is not None else None\n else:\n d['last_modified_by_id'] = self.last_modified_by_id\n\n if with_stats:\n d['retrieved_at'] = self.retrieved_at\n d['runtime'] = self.runtime\n\n if with_visualizations:\n d['visualizations'] = [vis.to_dict(with_query=False)\n for vis in self.visualizations]\n\n return d\n\n def archive(self):\n self.is_archived = True\n self.schedule = None\n\n for vis in self.visualizations:\n for w in vis.widgets:\n w.delete_instance()\n\n for alert in self.alerts:\n alert.delete_instance(recursive=True)\n\n self.save()\n\n @classmethod\n def all_queries(cls, groups):\n q = Query.select(Query, User, QueryResult.retrieved_at, QueryResult.runtime)\\\n .join(QueryResult, join_type=peewee.JOIN_LEFT_OUTER)\\\n .switch(Query).join(User)\\\n .join(DataSourceGroup, on=(Query.data_source==DataSourceGroup.data_source))\\\n .where(Query.is_archived==False)\\\n .where(DataSourceGroup.group << groups)\\\n .group_by(Query.id, User.id, QueryResult.id, QueryResult.retrieved_at, QueryResult.runtime)\\\n .order_by(cls.created_at.desc())\n\n return q\n\n @classmethod\n def outdated_queries(cls):\n queries = cls.select(cls, QueryResult.retrieved_at, DataSource)\\\n .join(QueryResult)\\\n .switch(Query).join(DataSource)\\\n .where(cls.schedule != None)\n\n now = utils.utcnow()\n outdated_queries = {}\n for query in queries:\n if should_schedule_next(query.latest_query_data.retrieved_at, now, query.schedule):\n key = \"{}:{}\".format(query.query_hash, query.data_source.id)\n outdated_queries[key] = query\n\n return outdated_queries.values()\n\n @classmethod\n def search(cls, term, groups):\n # TODO: This is very naive implementation of search, to be replaced with PostgreSQL full-text-search solution.\n\n where = (cls.name**u\"%{}%\".format(term)) | (cls.description**u\"%{}%\".format(term))\n\n if term.isdigit():\n where |= cls.id == term\n\n where &= cls.is_archived == False\n\n query_ids = cls.select(peewee.fn.Distinct(cls.id))\\\n .join(DataSourceGroup, on=(Query.data_source==DataSourceGroup.data_source)) \\\n .where(where) \\\n .where(DataSourceGroup.group << groups)\n\n return cls.select().where(cls.id << query_ids)\n\n\n @classmethod\n def recent(cls, groups, user_id=None, limit=20):\n query = cls.select(Query, User).where(Event.created_at > peewee.SQL(\"current_date - 7\")).\\\n join(Event, on=(Query.id == Event.object_id.cast('integer'))). \\\n join(DataSourceGroup, on=(Query.data_source==DataSourceGroup.data_source)). \\\n switch(Query).join(User).\\\n where(Event.action << ('edit', 'execute', 'edit_name', 'edit_description', 'view_source')).\\\n where(~(Event.object_id >> None)).\\\n where(Event.object_type == 'query'). \\\n where(DataSourceGroup.group << groups).\\\n where(cls.is_archived == False).\\\n group_by(Event.object_id, Query.id, User.id).\\\n order_by(peewee.SQL(\"count(0) desc\"))\n\n if user_id:\n query = query.where(Event.user == user_id)\n\n query = query.limit(limit)\n\n return query\n\n def pre_save(self, created):\n super(Query, self).pre_save(created)\n self.query_hash = utils.gen_query_hash(self.query)\n self._set_api_key()\n\n if self.last_modified_by is None:\n self.last_modified_by = self.user\n\n def post_save(self, created):\n if created:\n self._create_default_visualizations()\n\n def _create_default_visualizations(self):\n table_visualization = Visualization(query=self, name=\"Table\",\n description='',\n type=\"TABLE\", options=\"{}\")\n table_visualization.save()\n\n def _set_api_key(self):\n if not self.api_key:\n self.api_key = hashlib.sha1(\n u''.join((str(time.time()), self.query, str(self.user_id), self.name)).encode('utf-8')).hexdigest()\n\n @property\n def runtime(self):\n return self.latest_query_data.runtime\n\n @property\n def retrieved_at(self):\n return self.latest_query_data.retrieved_at\n\n @property\n def groups(self):\n if self.data_source is None:\n return {}\n\n return self.data_source.groups\n\n def __unicode__(self):\n return unicode(self.id)\n\n\nclass Alert(ModelTimestampsMixin, BaseModel):\n UNKNOWN_STATE = 'unknown'\n OK_STATE = 'ok'\n TRIGGERED_STATE = 'triggered'\n\n id = peewee.PrimaryKeyField()\n name = peewee.CharField()\n query = peewee.ForeignKeyField(Query, related_name='alerts')\n user = peewee.ForeignKeyField(User, related_name='alerts')\n options = JSONField()\n state = peewee.CharField(default=UNKNOWN_STATE)\n last_triggered_at = DateTimeTZField(null=True)\n rearm = peewee.IntegerField(null=True)\n\n class Meta:\n db_table = 'alerts'\n\n @classmethod\n def all(cls, groups):\n return cls.select(Alert, User, Query)\\\n .join(Query)\\\n .join(DataSourceGroup, on=(Query.data_source==DataSourceGroup.data_source))\\\n .where(DataSourceGroup.group << groups)\\\n .switch(Alert)\\\n .join(User)\\\n .group_by(Alert, User, Query)\n\n @classmethod\n def get_by_id_and_org(cls, id, org):\n return cls.select(Alert, User, Query).join(Query).switch(Alert).join(User).where(cls.id==id, Query.org==org).get()\n\n def to_dict(self, full=True):\n d = {\n 'id': self.id,\n 'name': self.name,\n 'options': self.options,\n 'state': self.state,\n 'last_triggered_at': self.last_triggered_at,\n 'updated_at': self.updated_at,\n 'created_at': self.created_at,\n 'rearm': self.rearm\n }\n\n if full:\n d['query'] = self.query.to_dict()\n d['user'] = self.user.to_dict()\n else:\n d['query_id'] = self.query_id\n d['user_id'] = self.user_id\n\n return d\n\n def evaluate(self):\n data = json.loads(self.query.latest_query_data.data)\n # todo: safe guard for empty\n value = data['rows'][0][self.options['column']]\n op = self.options['op']\n\n if op == 'greater than' and value > self.options['value']:\n new_state = self.TRIGGERED_STATE\n elif op == 'less than' and value < self.options['value']:\n new_state = self.TRIGGERED_STATE\n elif op == 'equals' and value == self.options['value']:\n new_state = self.TRIGGERED_STATE\n else:\n new_state = self.OK_STATE\n\n return new_state\n\n def subscribers(self):\n return User.select().join(AlertSubscription).where(AlertSubscription.alert==self)\n\n @property\n def groups(self):\n return self.query.groups\n\n\nclass Dashboard(ModelTimestampsMixin, BaseModel, BelongsToOrgMixin):\n id = peewee.PrimaryKeyField()\n org = peewee.ForeignKeyField(Organization, related_name=\"dashboards\")\n slug = peewee.CharField(max_length=140, index=True)\n name = peewee.CharField(max_length=100)\n user = peewee.ForeignKeyField(User)\n layout = peewee.TextField()\n dashboard_filters_enabled = peewee.BooleanField(default=False)\n is_archived = peewee.BooleanField(default=False, index=True)\n\n class Meta:\n db_table = 'dashboards'\n\n def to_dict(self, with_widgets=False, user=None):\n layout = json.loads(self.layout)\n\n if with_widgets:\n widget_list = Widget.select(Widget, Visualization, Query, User)\\\n .where(Widget.dashboard == self.id)\\\n .join(Visualization, join_type=peewee.JOIN_LEFT_OUTER)\\\n .join(Query, join_type=peewee.JOIN_LEFT_OUTER)\\\n .join(User, join_type=peewee.JOIN_LEFT_OUTER)\n\n widgets = {}\n\n for w in widget_list:\n if w.visualization_id is None:\n widgets[w.id] = w.to_dict()\n elif user and has_access(w.visualization.query.groups, user, view_only):\n widgets[w.id] = w.to_dict()\n else:\n widgets[w.id] = project(w.to_dict(),\n ('id', 'width', 'dashboard_id', 'options', 'created_at', 'updated_at'))\n widgets[w.id]['restricted'] = True\n\n # The following is a workaround for cases when the widget object gets deleted without the dashboard layout\n # updated. This happens for users with old databases that didn't have a foreign key relationship between\n # visualizations and widgets.\n # It's temporary until better solution is implemented (we probably should move the position information\n # to the widget).\n widgets_layout = []\n for row in layout:\n new_row = []\n for widget_id in row:\n widget = widgets.get(widget_id, None)\n if widget:\n new_row.append(widget)\n\n widgets_layout.append(new_row)\n else:\n widgets_layout = None\n\n return {\n 'id': self.id,\n 'slug': self.slug,\n 'name': self.name,\n 'user_id': self.user_id,\n 'layout': layout,\n 'dashboard_filters_enabled': self.dashboard_filters_enabled,\n 'widgets': widgets_layout,\n 'is_archived': self.is_archived,\n 'updated_at': self.updated_at,\n 'created_at': self.created_at\n }\n\n @classmethod\n def all(cls, org, groups, user_id):\n query = cls.select().\\\n join(Widget, peewee.JOIN_LEFT_OUTER, on=(Dashboard.id == Widget.dashboard)). \\\n join(Visualization, peewee.JOIN_LEFT_OUTER, on=(Widget.visualization == Visualization.id)). \\\n join(Query, peewee.JOIN_LEFT_OUTER, on=(Visualization.query == Query.id)). \\\n join(DataSourceGroup, peewee.JOIN_LEFT_OUTER, on=(Query.data_source == DataSourceGroup.data_source)). \\\n where(Dashboard.is_archived == False). \\\n where((DataSourceGroup.group << groups) |\n (Dashboard.user == user_id) |\n (~(Widget.dashboard >> None) & (Widget.visualization >> None))). \\\n where(Dashboard.org == org). \\\n group_by(Dashboard.id)\n\n return query\n\n @classmethod\n def recent(cls, org, groups, user_id, for_user=False, limit=20):\n query = cls.select().where(Event.created_at > peewee.SQL(\"current_date - 7\")). \\\n join(Event, peewee.JOIN_LEFT_OUTER, on=(Dashboard.id == Event.object_id.cast('integer'))). \\\n join(Widget, peewee.JOIN_LEFT_OUTER, on=(Dashboard.id == Widget.dashboard)). \\\n join(Visualization, peewee.JOIN_LEFT_OUTER, on=(Widget.visualization == Visualization.id)). \\\n join(Query, peewee.JOIN_LEFT_OUTER, on=(Visualization.query == Query.id)). \\\n join(DataSourceGroup, peewee.JOIN_LEFT_OUTER, on=(Query.data_source == DataSourceGroup.data_source)). \\\n where(Event.action << ('edit', 'view')). \\\n where(~(Event.object_id >> None)). \\\n where(Event.object_type == 'dashboard'). \\\n where(Dashboard.is_archived == False). \\\n where(Dashboard.org == org). \\\n where((DataSourceGroup.group << groups) |\n (Dashboard.user == user_id) |\n (~(Widget.dashboard >> None) & (Widget.visualization >> None))). \\\n group_by(Event.object_id, Dashboard.id). \\\n order_by(peewee.SQL(\"count(0) desc\"))\n\n if for_user:\n query = query.where(Event.user == user_id)\n\n query = query.limit(limit)\n\n return query\n\n @classmethod\n def get_by_slug_and_org(cls, slug, org):\n return cls.get(cls.slug == slug, cls.org==org)\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = utils.slugify(self.name)\n\n tries = 1\n while self.select().where(Dashboard.slug == self.slug).first() is not None:\n self.slug = utils.slugify(self.name) + \"_{0}\".format(tries)\n tries += 1\n\n super(Dashboard, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u\"%s=%s\" % (self.id, self.name)\n\n\nclass Visualization(ModelTimestampsMixin, BaseModel):\n id = peewee.PrimaryKeyField()\n type = peewee.CharField(max_length=100)\n query = peewee.ForeignKeyField(Query, related_name='visualizations')\n name = peewee.CharField(max_length=255)\n description = peewee.CharField(max_length=4096, null=True)\n options = peewee.TextField()\n\n class Meta:\n db_table = 'visualizations'\n\n def to_dict(self, with_query=True):\n d = {\n 'id': self.id,\n 'type': self.type,\n 'name': self.name,\n 'description': self.description,\n 'options': json.loads(self.options),\n 'updated_at': self.updated_at,\n 'created_at': self.created_at\n }\n\n if with_query:\n d['query'] = self.query.to_dict()\n\n return d\n\n @classmethod\n def get_by_id_and_org(cls, visualization_id, org):\n return cls.select(Visualization, Query).join(Query).where(cls.id == visualization_id,\n Query.org == org).get()\n\n def __unicode__(self):\n return u\"%s %s\" % (self.id, self.type)\n\n\nclass Widget(ModelTimestampsMixin, BaseModel):\n id = peewee.PrimaryKeyField()\n visualization = peewee.ForeignKeyField(Visualization, related_name='widgets', null=True)\n text = peewee.TextField(null=True)\n width = peewee.IntegerField()\n options = peewee.TextField()\n dashboard = peewee.ForeignKeyField(Dashboard, related_name='widgets', index=True)\n\n # unused; kept for backward compatability:\n type = peewee.CharField(max_length=100, null=True)\n query_id = peewee.IntegerField(null=True)\n\n class Meta:\n db_table = 'widgets'\n\n def to_dict(self):\n d = {\n 'id': self.id,\n 'width': self.width,\n 'options': json.loads(self.options),\n 'dashboard_id': self.dashboard_id,\n 'text': self.text,\n 'updated_at': self.updated_at,\n 'created_at': self.created_at\n }\n\n if self.visualization and self.visualization.id:\n d['visualization'] = self.visualization.to_dict()\n\n return d\n\n def __unicode__(self):\n return u\"%s\" % self.id\n\n @classmethod\n def get_by_id_and_org(cls, widget_id, org):\n return cls.select(cls, Dashboard).join(Dashboard).where(cls.id == widget_id, Dashboard.org == org).get()\n\n def delete_instance(self, *args, **kwargs):\n layout = json.loads(self.dashboard.layout)\n layout = map(lambda row: filter(lambda w: w != self.id, row), layout)\n layout = filter(lambda row: len(row) > 0, layout)\n self.dashboard.layout = json.dumps(layout)\n self.dashboard.save()\n super(Widget, self).delete_instance(*args, **kwargs)\n\n\nclass Event(BaseModel):\n org = peewee.ForeignKeyField(Organization, related_name=\"events\")\n user = peewee.ForeignKeyField(User, related_name=\"events\", null=True)\n action = peewee.CharField()\n object_type = peewee.CharField()\n object_id = peewee.CharField(null=True)\n additional_properties = peewee.TextField(null=True)\n created_at = DateTimeTZField(default=datetime.datetime.now)\n\n class Meta:\n db_table = 'events'\n\n def __unicode__(self):\n return u\"%s,%s,%s,%s\" % (self.user_id, self.action, self.object_type, self.object_id)\n\n @classmethod\n def record(cls, event):\n org = event.pop('org_id')\n user = event.pop('user_id', None)\n action = event.pop('action')\n object_type = event.pop('object_type')\n object_id = event.pop('object_id', None)\n\n created_at = datetime.datetime.utcfromtimestamp(event.pop('timestamp'))\n additional_properties = json.dumps(event)\n\n event = cls.create(org=org, user=user, action=action, object_type=object_type, object_id=object_id,\n additional_properties=additional_properties, created_at=created_at)\n\n return event\n\n\nclass ApiKey(ModelTimestampsMixin, BaseModel):\n org = peewee.ForeignKeyField(Organization)\n api_key = peewee.CharField(index=True, default=lambda: generate_token(40))\n active = peewee.BooleanField(default=True)\n object_type = peewee.CharField()\n object_id = peewee.IntegerField()\n object = GFKField('object_type', 'object_id')\n created_by = peewee.ForeignKeyField(User, null=True)\n\n class Meta:\n db_table = 'api_keys'\n indexes = (\n (('object_type', 'object_id'), False),\n )\n\n @classmethod\n def get_by_api_key(cls, api_key):\n return cls.get(cls.api_key==api_key, cls.active==True)\n\n @classmethod\n def get_by_object(cls, object):\n return cls.select().where(cls.object_type==object._meta.db_table, cls.object_id==object.id, cls.active==True).first()\n\n @classmethod\n def create_for_object(cls, object, user):\n return cls.create(org=user.org, object=object, created_by=user)\n\n\nclass NotificationDestination(BelongsToOrgMixin, BaseModel):\n\n id = peewee.PrimaryKeyField()\n org = peewee.ForeignKeyField(Organization, related_name=\"notification_destinations\")\n user = peewee.ForeignKeyField(User, related_name=\"notification_destinations\")\n name = peewee.CharField()\n type = peewee.CharField()\n options = ConfigurationField()\n created_at = DateTimeTZField(default=datetime.datetime.now)\n\n class Meta:\n db_table = 'notification_destinations'\n\n indexes = (\n (('org', 'name'), True),\n )\n\n def to_dict(self, all=False):\n d = {\n 'id': self.id,\n 'name': self.name,\n 'type': self.type,\n 'icon': self.destination.icon()\n }\n\n if all:\n schema = get_configuration_schema_for_destination_type(self.type)\n self.options.set_schema(schema)\n d['options'] = self.options.to_dict(mask_secrets=True)\n\n return d\n\n def __unicode__(self):\n return self.name\n\n @property\n def destination(self):\n return get_destination(self.type, self.options)\n\n @classmethod\n def all(cls, org):\n notification_destinations = cls.select().where(cls.org==org).order_by(cls.id.asc())\n\n return notification_destinations\n\n def notify(self, alert, query, user, new_state, app, host):\n schema = get_configuration_schema_for_destination_type(self.type)\n self.options.set_schema(schema)\n return self.destination.notify(alert, query, user, new_state,\n app, host, self.options)\n\n\nclass AlertSubscription(ModelTimestampsMixin, BaseModel):\n user = peewee.ForeignKeyField(User)\n destination = peewee.ForeignKeyField(NotificationDestination, null=True)\n alert = peewee.ForeignKeyField(Alert, related_name=\"subscriptions\")\n\n class Meta:\n db_table = 'alert_subscriptions'\n\n indexes = (\n (('destination', 'alert'), True),\n )\n\n def to_dict(self):\n d = {\n 'id': self.id,\n 'user': self.user.to_dict(),\n 'alert_id': self.alert_id\n }\n\n if self.destination:\n d['destination'] = self.destination.to_dict()\n\n return d\n\n @classmethod\n def all(cls, alert_id):\n return AlertSubscription.select(AlertSubscription, User).join(User).where(AlertSubscription.alert==alert_id)\n\n def notify(self, alert, query, user, new_state, app, host):\n if self.destination:\n return self.destination.notify(alert, query, user, new_state,\n app, host)\n else:\n # User email subscription, so create an email destination object\n config = {'email': self.user.email}\n schema = get_configuration_schema_for_destination_type('email')\n options = ConfigurationContainer(json.dumps(config), schema)\n destination = get_destination('email', options)\n return destination.notify(alert, query, user, new_state,\n app, host, options)\n\n\nall_models = (Organization, Group, DataSource, DataSourceGroup, User, QueryResult, Query, Alert, Dashboard, Visualization, Widget, Event, NotificationDestination, AlertSubscription, ApiKey)\n\n\ndef init_db():\n default_org = Organization.create(name=\"Default\", slug='default', settings={})\n admin_group = Group.create(name='admin', permissions=['admin', 'super_admin'], org=default_org, type=Group.BUILTIN_GROUP)\n default_group = Group.create(name='default', permissions=Group.DEFAULT_PERMISSIONS, org=default_org, type=Group.BUILTIN_GROUP)\n\n return default_org, admin_group, default_group\n\n\ndef create_db(create_tables, drop_tables):\n db.connect_db()\n\n for model in all_models:\n if drop_tables and model.table_exists():\n model.drop_table(cascade=True)\n\n if create_tables and not model.table_exists():\n model.create_table()\n\n db.close_db(None)\n", "path": "redash/models.py" } ]
diff --git a/rd_ui/app/views/query.html b/rd_ui/app/views/query.html index 9be65037af..812d37076d 100644 --- a/rd_ui/app/views/query.html +++ b/rd_ui/app/views/query.html @@ -8,7 +8,7 @@ <h4 class="modal-title">Query Archive</h4> </div> <div class="modal-body"> Are you sure you want to archive this query? - <br/> All dashboard widgets created with its visualizations will be deleted. + <br/> All alerts and dashboard widgets created with its visualizations will be deleted. </div> <div class="modal-footer"> <button type="button" class="btn btn-default" data-dismiss="modal">No</button> diff --git a/redash/models.py b/redash/models.py index 5cd51abf21..9b9071d0fd 100644 --- a/redash/models.py +++ b/redash/models.py @@ -641,6 +641,9 @@ def archive(self): for w in vis.widgets: w.delete_instance() + for alert in self.alerts: + alert.delete_instance(recursive=True) + self.save() @classmethod diff --git a/tests/factories.py b/tests/factories.py index b3dfae46a4..c91c584293 100644 --- a/tests/factories.py +++ b/tests/factories.py @@ -185,6 +185,14 @@ def create_alert(self, **kwargs): args.update(**kwargs) return alert_factory.create(**args) + def create_alert_subscription(self, **kwargs): + args = { + 'user': self.user + } + + args.update(**kwargs) + return alert_subscription_factory.create(**args) + def create_data_source(self, **kwargs): args = { 'org': self.org @@ -274,6 +282,3 @@ def create_api_key(self, **kwargs): def create_destination(self, **kwargs): return destination_factory.create(**kwargs) - - def create_alert_subscription(self, **kwargs): - return alert_subscription_factory.create(**kwargs) diff --git a/tests/test_models.py b/tests/test_models.py index 12c5d5ae24..3304613391 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -276,6 +276,16 @@ def test_removes_scheduling(self): self.assertEqual(None, query.schedule) + def test_deletes_alerts(self): + subscription = self.factory.create_alert_subscription() + query = subscription.alert.query + + query.archive() + + self.assertRaises(models.Alert.DoesNotExist, models.Alert.get_by_id, subscription.alert.id) + self.assertRaises(models.AlertSubscription.DoesNotExist, models.AlertSubscription.get_by_id, subscription.id) + + class DataSourceTest(BaseTestCase): def test_get_schema(self): return_value = [{'name': 'table', 'columns': []}]
apache__airflow-14774
[Smart sensor] Runtime error: dictionary changed size during iteration <!-- Welcome to Apache Airflow! For a smooth issue process, try to answer the following questions. Don't worry if they're not all applicable; just try to include what you can :-) If you need to include code snippets or logs, please put them in fenced code blocks. If they're super-long, please use the details tag like <details><summary>super-long log</summary> lots of stuff </details> Please delete these comment blocks before submitting the issue. --> <!-- IMPORTANT!!! PLEASE CHECK "SIMILAR TO X EXISTING ISSUES" OPTION IF VISIBLE NEXT TO "SUBMIT NEW ISSUE" BUTTON!!! PLEASE CHECK IF THIS ISSUE HAS BEEN REPORTED PREVIOUSLY USING SEARCH!!! Please complete the next sections or the issue will be closed. These questions are the first thing we need to know to understand the context. --> **What happened**: <!-- (please include exact error messages if you can) --> Smart Sensor TI crashes with a Runtime error. Here's the logs: ``` RuntimeError: dictionary changed size during iteration File "airflow/sentry.py", line 159, in wrapper return func(task_instance, *args, session=session, **kwargs) File "airflow/models/taskinstance.py", line 1112, in _run_raw_task self._prepare_and_execute_task_with_callbacks(context, task) File "airflow/models/taskinstance.py", line 1285, in _prepare_and_execute_task_with_callbacks result = self._execute_task(context, task_copy) File "airflow/models/taskinstance.py", line 1315, in _execute_task result = task_copy.execute(context=context) File "airflow/sensors/smart_sensor.py", line 736, in execute self.flush_cached_sensor_poke_results() File "airflow/sensors/smart_sensor.py", line 681, in flush_cached_sensor_poke_results for ti_key, sensor_exception in self.cached_sensor_exceptions.items(): ``` **What you expected to happen**: <!-- What do you think went wrong? --> Smart sensor should always execute without any runtime error. **How to reproduce it**: I haven't been able to reproduce it consistently since it sometimes works and sometimes errors. **Anything else we need to know**: It's a really noisy error in Sentry. In just 4 days, 3.8k events were reported in Sentry. <!-- How often does this problem occur? Once? Every time etc? Any relevant logs to include? Put them here in side a detail tag: <details><summary>x.log</summary> lots of stuff </details> -->
[ { "content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nimport datetime\nimport json\nimport logging\nimport traceback\nfrom logging.config import DictConfigurator # type: ignore\nfrom time import sleep\n\nfrom sqlalchemy import and_, or_, tuple_\n\nfrom airflow.exceptions import AirflowException, AirflowTaskTimeout\nfrom airflow.models import BaseOperator, SensorInstance, SkipMixin, TaskInstance\nfrom airflow.settings import LOGGING_CLASS_PATH\nfrom airflow.stats import Stats\nfrom airflow.utils import helpers, timezone\nfrom airflow.utils.email import send_email\nfrom airflow.utils.log.logging_mixin import set_context\nfrom airflow.utils.module_loading import import_string\nfrom airflow.utils.net import get_hostname\nfrom airflow.utils.session import provide_session\nfrom airflow.utils.state import PokeState, State\nfrom airflow.utils.timeout import timeout\n\nconfig = import_string(LOGGING_CLASS_PATH)\nhandler_config = config['handlers']['task']\ntry:\n formatter_config = config['formatters'][handler_config['formatter']]\nexcept Exception as err: # pylint: disable=broad-except\n formatter_config = None\n print(err)\ndictConfigurator = DictConfigurator(config)\n\n\nclass SensorWork:\n \"\"\"\n This class stores a sensor work with decoded context value. It is only used\n inside of smart sensor. Create a sensor work based on sensor instance record.\n A sensor work object has the following attributes:\n `dag_id`: sensor_instance dag_id.\n `task_id`: sensor_instance task_id.\n `execution_date`: sensor_instance execution_date.\n `try_number`: sensor_instance try_number\n `poke_context`: Decoded poke_context for the sensor task.\n `execution_context`: Decoded execution_context.\n `hashcode`: This is the signature of poking job.\n `operator`: The sensor operator class.\n `op_classpath`: The sensor operator class path\n `encoded_poke_context`: The raw data from sensor_instance poke_context column.\n `log`: The sensor work logger which will mock the corresponding task instance log.\n\n :param si: The sensor_instance ORM object.\n \"\"\"\n\n def __init__(self, si):\n self.dag_id = si.dag_id\n self.task_id = si.task_id\n self.execution_date = si.execution_date\n self.try_number = si.try_number\n\n self.poke_context = json.loads(si.poke_context) if si.poke_context else {}\n self.execution_context = json.loads(si.execution_context) if si.execution_context else {}\n try:\n self.log = self._get_sensor_logger(si)\n except Exception as e: # pylint: disable=broad-except\n self.log = None\n print(e)\n self.hashcode = si.hashcode\n self.start_date = si.start_date\n self.operator = si.operator\n self.op_classpath = si.op_classpath\n self.encoded_poke_context = si.poke_context\n\n def __eq__(self, other):\n if not isinstance(other, SensorWork):\n return NotImplemented\n\n return (\n self.dag_id == other.dag_id\n and self.task_id == other.task_id\n and self.execution_date == other.execution_date\n and self.try_number == other.try_number\n )\n\n @staticmethod\n def create_new_task_handler():\n \"\"\"\n Create task log handler for a sensor work.\n :return: log handler\n \"\"\"\n from airflow.utils.log.secrets_masker import _secrets_masker # noqa\n\n handler_config_copy = {k: handler_config[k] for k in handler_config}\n del handler_config_copy['filters']\n\n formatter_config_copy = {k: formatter_config[k] for k in formatter_config}\n handler = dictConfigurator.configure_handler(handler_config_copy)\n formatter = dictConfigurator.configure_formatter(formatter_config_copy)\n handler.setFormatter(formatter)\n\n # We want to share the _global_ filterer instance, not create a new one\n handler.addFilter(_secrets_masker())\n return handler\n\n def _get_sensor_logger(self, si):\n \"\"\"Return logger for a sensor instance object.\"\"\"\n # The created log_id is used inside of smart sensor as the key to fetch\n # the corresponding in memory log handler.\n si.raw = False # Otherwise set_context will fail\n log_id = \"-\".join(\n [si.dag_id, si.task_id, si.execution_date.strftime(\"%Y_%m_%dT%H_%M_%S_%f\"), str(si.try_number)]\n )\n logger = logging.getLogger('airflow.task' + '.' + log_id)\n\n if len(logger.handlers) == 0:\n handler = self.create_new_task_handler()\n logger.addHandler(handler)\n set_context(logger, si)\n\n line_break = \"-\" * 120\n logger.info(line_break)\n logger.info(\n \"Processing sensor task %s in smart sensor service on host: %s\", self.ti_key, get_hostname()\n )\n logger.info(line_break)\n return logger\n\n def close_sensor_logger(self):\n \"\"\"Close log handler for a sensor work.\"\"\"\n for handler in self.log.handlers:\n try:\n handler.close()\n except Exception as e: # pylint: disable=broad-except\n print(e)\n\n @property\n def ti_key(self):\n \"\"\"Key for the task instance that maps to the sensor work.\"\"\"\n return self.dag_id, self.task_id, self.execution_date\n\n @property\n def cache_key(self):\n \"\"\"Key used to query in smart sensor for cached sensor work.\"\"\"\n return self.operator, self.encoded_poke_context\n\n\nclass CachedPokeWork:\n \"\"\"\n Wrapper class for the poke work inside smart sensor. It saves\n the sensor_task used to poke and recent poke result state.\n state: poke state.\n sensor_task: The cached object for executing the poke function.\n last_poke_time: The latest time this cached work being called.\n to_flush: If we should flush the cached work.\n \"\"\"\n\n def __init__(self):\n self.state = None\n self.sensor_task = None\n self.last_poke_time = None\n self.to_flush = False\n\n def set_state(self, state):\n \"\"\"\n Set state for cached poke work.\n :param state: The sensor_instance state.\n \"\"\"\n self.state = state\n self.last_poke_time = timezone.utcnow()\n\n def clear_state(self):\n \"\"\"Clear state for cached poke work.\"\"\"\n self.state = None\n\n def set_to_flush(self):\n \"\"\"Mark this poke work to be popped from cached dict after current loop.\"\"\"\n self.to_flush = True\n\n def is_expired(self):\n \"\"\"\n The cached task object expires if there is no poke for 20 minutes.\n :return: Boolean\n \"\"\"\n return self.to_flush or (timezone.utcnow() - self.last_poke_time).total_seconds() > 1200\n\n\nclass SensorExceptionInfo:\n \"\"\"\n Hold sensor exception information and the type of exception. For possible transient\n infra failure, give the task more chance to retry before fail it.\n \"\"\"\n\n def __init__(\n self,\n exception_info,\n is_infra_failure=False,\n infra_failure_retry_window=datetime.timedelta(minutes=130),\n ):\n self._exception_info = exception_info\n self._is_infra_failure = is_infra_failure\n self._infra_failure_retry_window = infra_failure_retry_window\n\n self._infra_failure_timeout = None\n self.set_infra_failure_timeout()\n self.fail_current_run = self.should_fail_current_run()\n\n def set_latest_exception(self, exception_info, is_infra_failure=False):\n \"\"\"\n This function set the latest exception information for sensor exception. If the exception\n implies an infra failure, this function will check the recorded infra failure timeout\n which was set at the first infra failure exception arrives. There is a 6 hours window\n for retry without failing current run.\n\n :param exception_info: Details of the exception information.\n :param is_infra_failure: If current exception was caused by transient infra failure.\n There is a retry window _infra_failure_retry_window that the smart sensor will\n retry poke function without failing current task run.\n \"\"\"\n self._exception_info = exception_info\n self._is_infra_failure = is_infra_failure\n\n self.set_infra_failure_timeout()\n self.fail_current_run = self.should_fail_current_run()\n\n def set_infra_failure_timeout(self):\n \"\"\"\n Set the time point when the sensor should be failed if it kept getting infra\n failure.\n :return:\n \"\"\"\n # Only set the infra_failure_timeout if there is no existing one\n if not self._is_infra_failure:\n self._infra_failure_timeout = None\n elif self._infra_failure_timeout is None:\n self._infra_failure_timeout = timezone.utcnow() + self._infra_failure_retry_window\n\n def should_fail_current_run(self):\n \"\"\"\n :return: Should the sensor fail\n :type: boolean\n \"\"\"\n return not self.is_infra_failure or timezone.utcnow() > self._infra_failure_timeout\n\n @property\n def exception_info(self):\n \"\"\":return: exception msg.\"\"\"\n return self._exception_info\n\n @property\n def is_infra_failure(self):\n \"\"\"\n\n :return: If the exception is an infra failure\n :type: boolean\n \"\"\"\n return self._is_infra_failure\n\n def is_expired(self):\n \"\"\"\n :return: If current exception need to be kept.\n :type: boolean\n \"\"\"\n if not self._is_infra_failure:\n return True\n return timezone.utcnow() > self._infra_failure_timeout + datetime.timedelta(minutes=30)\n\n\nclass SmartSensorOperator(BaseOperator, SkipMixin):\n \"\"\"\n Smart sensor operators are derived from this class.\n\n Smart Sensor operators keep refresh a dictionary by visiting DB.\n Taking qualified active sensor tasks. Different from sensor operator,\n Smart sensor operators poke for all sensor tasks in the dictionary at\n a time interval. When a criteria is met or fail by time out, it update\n all sensor task state in task_instance table\n\n :param soft_fail: Set to true to mark the task as SKIPPED on failure\n :type soft_fail: bool\n :param poke_interval: Time in seconds that the job should wait in\n between each tries.\n :type poke_interval: int\n :param smart_sensor_timeout: Time, in seconds before the internal sensor\n job times out if poke_timeout is not defined.\n :type smart_sensor_timeout: float\n :param shard_min: shard code lower bound (inclusive)\n :type shard_min: int\n :param shard_max: shard code upper bound (exclusive)\n :type shard_max: int\n :param poke_timeout: Time, in seconds before the task times out and fails.\n :type poke_timeout: float\n \"\"\"\n\n ui_color = '#e6f1f2'\n\n def __init__(\n self,\n poke_interval=180,\n smart_sensor_timeout=60 * 60 * 24 * 7,\n soft_fail=False,\n shard_min=0,\n shard_max=100000,\n poke_timeout=6.0,\n *args,\n **kwargs,\n ):\n super().__init__(*args, **kwargs)\n # super(SmartSensorOperator, self).__init__(*args, **kwargs)\n self.poke_interval = poke_interval\n self.soft_fail = soft_fail\n self.timeout = smart_sensor_timeout\n self._validate_input_values()\n self.hostname = \"\"\n\n self.sensor_works = []\n self.cached_dedup_works = {}\n self.cached_sensor_exceptions = {}\n\n self.max_tis_per_query = 50\n self.shard_min = shard_min\n self.shard_max = shard_max\n self.poke_timeout = poke_timeout\n\n def _validate_input_values(self):\n if not isinstance(self.poke_interval, (int, float)) or self.poke_interval < 0:\n raise AirflowException(\"The poke_interval must be a non-negative number\")\n if not isinstance(self.timeout, (int, float)) or self.timeout < 0:\n raise AirflowException(\"The timeout must be a non-negative number\")\n\n @provide_session\n def _load_sensor_works(self, session=None):\n \"\"\"\n Refresh sensor instances need to be handled by this operator. Create smart sensor\n internal object based on the information persisted in the sensor_instance table.\n\n \"\"\"\n SI = SensorInstance\n with Stats.timer() as timer:\n query = (\n session.query(SI)\n .filter(SI.state == State.SENSING)\n .filter(SI.shardcode < self.shard_max, SI.shardcode >= self.shard_min)\n )\n tis = query.all()\n\n self.log.info(\"Performance query %s tis, time: %.3f\", len(tis), timer.duration)\n\n # Query without checking dagrun state might keep some failed dag_run tasks alive.\n # Join with DagRun table will be very slow based on the number of sensor tasks we\n # need to handle. We query all smart tasks in this operator\n # and expect scheduler correct the states in _change_state_for_tis_without_dagrun()\n\n sensor_works = []\n for ti in tis:\n try:\n sensor_works.append(SensorWork(ti))\n except Exception: # pylint: disable=broad-except\n self.log.exception(\"Exception at creating sensor work for ti %s\", ti.key)\n\n self.log.info(\"%d tasks detected.\", len(sensor_works))\n\n new_sensor_works = [x for x in sensor_works if x not in self.sensor_works]\n\n self._update_ti_hostname(new_sensor_works)\n\n self.sensor_works = sensor_works\n\n @provide_session\n def _update_ti_hostname(self, sensor_works, session=None):\n \"\"\"\n Update task instance hostname for new sensor works.\n\n :param sensor_works: Smart sensor internal object for a sensor task.\n :param session: The sqlalchemy session.\n \"\"\"\n TI = TaskInstance\n\n def update_ti_hostname_with_count(count, sensor_works):\n # Using or_ instead of in_ here to prevent from full table scan.\n if session.bind.dialect.name == 'mssql':\n ti_filter = or_(\n and_(\n TI.dag_id == ti_key.dag_id,\n TI.task_id == ti_key.task_id,\n TI.execution_date == ti_key.execution_date,\n )\n for ti_key in sensor_works\n )\n else:\n ti_keys = [(x.dag_id, x.task_id, x.execution_date) for x in sensor_works]\n ti_filter = or_(\n tuple_(TI.dag_id, TI.task_id, TI.execution_date) == ti_key for ti_key in ti_keys\n )\n tis = session.query(TI).filter(ti_filter).all()\n\n for ti in tis:\n ti.hostname = self.hostname\n session.commit()\n\n return count + len(sensor_works)\n\n count = helpers.reduce_in_chunks(\n update_ti_hostname_with_count, sensor_works, 0, self.max_tis_per_query\n )\n if count:\n self.log.info(\"Updated hostname on %s tis.\", count)\n\n @provide_session\n def _mark_multi_state(self, operator, poke_hash, encoded_poke_context, state, session=None):\n \"\"\"\n Mark state for multiple tasks in the task_instance table to a new state if they have\n the same signature as the poke_hash.\n\n :param operator: The sensor's operator class name.\n :param poke_hash: The hash code generated from sensor's poke context.\n :param encoded_poke_context: The raw encoded poke_context.\n :param state: Set multiple sensor tasks to this state.\n :param session: The sqlalchemy session.\n \"\"\"\n\n def mark_state(ti, sensor_instance):\n ti.state = state\n sensor_instance.state = state\n if state in State.finished:\n ti.end_date = end_date\n ti.set_duration()\n\n SI = SensorInstance\n TI = TaskInstance\n\n count_marked = 0\n try:\n query_result = (\n session.query(TI, SI)\n .join(\n TI,\n and_(\n TI.dag_id == SI.dag_id,\n TI.task_id == SI.task_id,\n TI.execution_date == SI.execution_date,\n ),\n )\n .filter(SI.state == State.SENSING)\n .filter(SI.hashcode == poke_hash)\n .filter(SI.operator == operator)\n .with_for_update()\n .all()\n )\n\n end_date = timezone.utcnow()\n for ti, sensor_instance in query_result:\n if sensor_instance.poke_context != encoded_poke_context:\n continue\n\n ti.hostname = self.hostname\n if ti.state == State.SENSING:\n mark_state(ti=ti, sensor_instance=sensor_instance)\n count_marked += 1\n else:\n # ti.state != State.SENSING\n sensor_instance.state = ti.state\n\n session.commit()\n\n except Exception: # pylint: disable=broad-except\n self.log.warning(\n \"Exception _mark_multi_state in smart sensor for hashcode %s\",\n str(poke_hash), # cast to str in advance for highlighting\n exc_info=True,\n )\n self.log.info(\"Marked %s tasks out of %s to state %s\", count_marked, len(query_result), state)\n\n @provide_session\n def _retry_or_fail_task(self, sensor_work, error, session=None):\n \"\"\"\n Change single task state for sensor task. For final state, set the end_date.\n Since smart sensor take care all retries in one process. Failed sensor tasks\n logically experienced all retries and the try_number should be set to max_tries.\n\n :param sensor_work: The sensor_work with exception.\n :type sensor_work: SensorWork\n :param error: The error message for this sensor_work.\n :type error: str.\n :param session: The sqlalchemy session.\n \"\"\"\n\n def email_alert(task_instance, error_info):\n try:\n subject, html_content, _ = task_instance.get_email_subject_content(error_info)\n email = sensor_work.execution_context.get('email')\n\n send_email(email, subject, html_content)\n except Exception: # pylint: disable=broad-except\n sensor_work.log.warning(\"Exception alerting email.\", exc_info=True)\n\n def handle_failure(sensor_work, ti):\n if sensor_work.execution_context.get('retries') and ti.try_number <= ti.max_tries:\n # retry\n ti.state = State.UP_FOR_RETRY\n if sensor_work.execution_context.get('email_on_retry') and sensor_work.execution_context.get(\n 'email'\n ):\n sensor_work.log.info(\"%s sending email alert for retry\", sensor_work.ti_key)\n email_alert(ti, error)\n else:\n ti.state = State.FAILED\n if sensor_work.execution_context.get(\n 'email_on_failure'\n ) and sensor_work.execution_context.get('email'):\n sensor_work.log.info(\"%s sending email alert for failure\", sensor_work.ti_key)\n email_alert(ti, error)\n\n try:\n dag_id, task_id, execution_date = sensor_work.ti_key\n TI = TaskInstance\n SI = SensorInstance\n sensor_instance = (\n session.query(SI)\n .filter(SI.dag_id == dag_id, SI.task_id == task_id, SI.execution_date == execution_date)\n .with_for_update()\n .first()\n )\n\n if sensor_instance.hashcode != sensor_work.hashcode:\n # Return without setting state\n return\n\n ti = (\n session.query(TI)\n .filter(TI.dag_id == dag_id, TI.task_id == task_id, TI.execution_date == execution_date)\n .with_for_update()\n .first()\n )\n\n if ti:\n if ti.state == State.SENSING:\n ti.hostname = self.hostname\n handle_failure(sensor_work, ti)\n\n sensor_instance.state = State.FAILED\n ti.end_date = timezone.utcnow()\n ti.set_duration()\n else:\n sensor_instance.state = ti.state\n session.merge(sensor_instance)\n session.merge(ti)\n session.commit()\n\n sensor_work.log.info(\n \"Task %s got an error: %s. Set the state to failed. Exit.\", str(sensor_work.ti_key), error\n )\n sensor_work.close_sensor_logger()\n\n except AirflowException:\n sensor_work.log.warning(\"Exception on failing %s\", sensor_work.ti_key, exc_info=True)\n\n def _check_and_handle_ti_timeout(self, sensor_work):\n \"\"\"\n Check if a sensor task in smart sensor is timeout. Could be either sensor operator timeout\n or general operator execution_timeout.\n\n :param sensor_work: SensorWork\n \"\"\"\n task_timeout = sensor_work.execution_context.get('timeout', self.timeout)\n task_execution_timeout = sensor_work.execution_context.get('execution_timeout')\n if task_execution_timeout:\n task_timeout = min(task_timeout, task_execution_timeout)\n\n if (timezone.utcnow() - sensor_work.start_date).total_seconds() > task_timeout:\n error = \"Sensor Timeout\"\n sensor_work.log.exception(error)\n self._retry_or_fail_task(sensor_work, error)\n\n def _handle_poke_exception(self, sensor_work):\n \"\"\"\n Fail task if accumulated exceptions exceeds retries.\n\n :param sensor_work: SensorWork\n \"\"\"\n sensor_exception = self.cached_sensor_exceptions.get(sensor_work.cache_key)\n error = sensor_exception.exception_info\n sensor_work.log.exception(\"Handling poke exception: %s\", error)\n\n if sensor_exception.fail_current_run:\n if sensor_exception.is_infra_failure:\n sensor_work.log.exception(\n \"Task %s failed by infra failure in smart sensor.\", sensor_work.ti_key\n )\n # There is a risk for sensor object cached in smart sensor keep throwing\n # exception and cause an infra failure. To make sure the sensor tasks after\n # retry will not fall into same object and have endless infra failure,\n # we mark the sensor task after an infra failure so that it can be popped\n # before next poke loop.\n cache_key = sensor_work.cache_key\n self.cached_dedup_works[cache_key].set_to_flush()\n else:\n sensor_work.log.exception(\"Task %s failed by exceptions.\", sensor_work.ti_key)\n self._retry_or_fail_task(sensor_work, error)\n else:\n sensor_work.log.info(\"Exception detected, retrying without failing current run.\")\n self._check_and_handle_ti_timeout(sensor_work)\n\n def _process_sensor_work_with_cached_state(self, sensor_work, state):\n if state == PokeState.LANDED:\n sensor_work.log.info(\"Task %s succeeded\", str(sensor_work.ti_key))\n sensor_work.close_sensor_logger()\n\n if state == PokeState.NOT_LANDED:\n # Handle timeout if connection valid but not landed yet\n self._check_and_handle_ti_timeout(sensor_work)\n elif state == PokeState.POKE_EXCEPTION:\n self._handle_poke_exception(sensor_work)\n\n def _execute_sensor_work(self, sensor_work):\n ti_key = sensor_work.ti_key\n log = sensor_work.log or self.log\n log.info(\"Sensing ti: %s\", str(ti_key))\n log.info(\"Poking with arguments: %s\", sensor_work.encoded_poke_context)\n\n cache_key = sensor_work.cache_key\n if cache_key not in self.cached_dedup_works:\n # create an empty cached_work for a new cache_key\n self.cached_dedup_works[cache_key] = CachedPokeWork()\n\n cached_work = self.cached_dedup_works[cache_key]\n\n if cached_work.state is not None:\n # Have a valid cached state, don't poke twice in certain time interval\n self._process_sensor_work_with_cached_state(sensor_work, cached_work.state)\n return\n\n try:\n with timeout(seconds=self.poke_timeout):\n if self.poke(sensor_work):\n # Got a landed signal, mark all tasks waiting for this partition\n cached_work.set_state(PokeState.LANDED)\n\n self._mark_multi_state(\n sensor_work.operator,\n sensor_work.hashcode,\n sensor_work.encoded_poke_context,\n State.SUCCESS,\n )\n\n log.info(\"Task %s succeeded\", str(ti_key))\n sensor_work.close_sensor_logger()\n else:\n # Not landed yet. Handle possible timeout\n cached_work.set_state(PokeState.NOT_LANDED)\n self._check_and_handle_ti_timeout(sensor_work)\n\n self.cached_sensor_exceptions.pop(cache_key, None)\n except Exception as e: # pylint: disable=broad-except\n # The retry_infra_failure decorator inside hive_hooks will raise exception with\n # is_infra_failure == True. Long poking timeout here is also considered an infra\n # failure. Other exceptions should fail.\n is_infra_failure = getattr(e, 'is_infra_failure', False) or isinstance(e, AirflowTaskTimeout)\n exception_info = traceback.format_exc()\n cached_work.set_state(PokeState.POKE_EXCEPTION)\n\n if cache_key in self.cached_sensor_exceptions:\n self.cached_sensor_exceptions[cache_key].set_latest_exception(\n exception_info, is_infra_failure=is_infra_failure\n )\n else:\n self.cached_sensor_exceptions[cache_key] = SensorExceptionInfo(\n exception_info, is_infra_failure=is_infra_failure\n )\n\n self._handle_poke_exception(sensor_work)\n\n def flush_cached_sensor_poke_results(self):\n \"\"\"Flush outdated cached sensor states saved in previous loop.\"\"\"\n for key, cached_work in self.cached_dedup_works.copy().items():\n if cached_work.is_expired():\n self.cached_dedup_works.pop(key, None)\n else:\n cached_work.state = None\n\n for ti_key, sensor_exception in self.cached_sensor_exceptions.copy().items():\n if sensor_exception.fail_current_run or sensor_exception.is_expired():\n self.cached_sensor_exceptions.pop(ti_key, None)\n\n def poke(self, sensor_work):\n \"\"\"\n Function that the sensors defined while deriving this class should\n override.\n\n \"\"\"\n cached_work = self.cached_dedup_works[sensor_work.cache_key]\n if not cached_work.sensor_task:\n init_args = dict(list(sensor_work.poke_context.items()) + [('task_id', sensor_work.task_id)])\n operator_class = import_string(sensor_work.op_classpath)\n cached_work.sensor_task = operator_class(**init_args)\n\n return cached_work.sensor_task.poke(sensor_work.poke_context)\n\n def _emit_loop_stats(self):\n try:\n count_poke = 0\n count_poke_success = 0\n count_poke_exception = 0\n count_exception_failures = 0\n count_infra_failure = 0\n for cached_work in self.cached_dedup_works.values():\n if cached_work.state is None:\n continue\n count_poke += 1\n if cached_work.state == PokeState.LANDED:\n count_poke_success += 1\n elif cached_work.state == PokeState.POKE_EXCEPTION:\n count_poke_exception += 1\n for cached_exception in self.cached_sensor_exceptions.values():\n if cached_exception.is_infra_failure and cached_exception.fail_current_run:\n count_infra_failure += 1\n if cached_exception.fail_current_run:\n count_exception_failures += 1\n\n Stats.gauge(\"smart_sensor_operator.poked_tasks\", count_poke)\n Stats.gauge(\"smart_sensor_operator.poked_success\", count_poke_success)\n Stats.gauge(\"smart_sensor_operator.poked_exception\", count_poke_exception)\n Stats.gauge(\"smart_sensor_operator.exception_failures\", count_exception_failures)\n Stats.gauge(\"smart_sensor_operator.infra_failures\", count_infra_failure)\n except Exception: # pylint: disable=broad-except\n self.log.exception(\"Exception at getting loop stats %s\")\n\n def execute(self, context):\n started_at = timezone.utcnow()\n\n self.hostname = get_hostname()\n while True:\n poke_start_time = timezone.utcnow()\n\n self.flush_cached_sensor_poke_results()\n\n self._load_sensor_works()\n self.log.info(\"Loaded %s sensor_works\", len(self.sensor_works))\n Stats.gauge(\"smart_sensor_operator.loaded_tasks\", len(self.sensor_works))\n\n for sensor_work in self.sensor_works:\n self._execute_sensor_work(sensor_work)\n\n duration = (timezone.utcnow() - poke_start_time).total_seconds()\n\n self.log.info(\"Taking %s to execute %s tasks.\", duration, len(self.sensor_works))\n\n Stats.timing(\"smart_sensor_operator.loop_duration\", duration)\n Stats.gauge(\"smart_sensor_operator.executed_tasks\", len(self.sensor_works))\n self._emit_loop_stats()\n\n if duration < self.poke_interval:\n sleep(self.poke_interval - duration)\n if (timezone.utcnow() - started_at).total_seconds() > self.timeout:\n self.log.info(\"Time is out for smart sensor.\")\n return\n\n def on_kill(self):\n pass\n\n\nif __name__ == '__main__':\n SmartSensorOperator(task_id='test').execute({})\n", "path": "airflow/sensors/smart_sensor.py" } ]
[ { "content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nimport datetime\nimport json\nimport logging\nimport traceback\nfrom logging.config import DictConfigurator # type: ignore\nfrom time import sleep\n\nfrom sqlalchemy import and_, or_, tuple_\n\nfrom airflow.exceptions import AirflowException, AirflowTaskTimeout\nfrom airflow.models import BaseOperator, SensorInstance, SkipMixin, TaskInstance\nfrom airflow.settings import LOGGING_CLASS_PATH\nfrom airflow.stats import Stats\nfrom airflow.utils import helpers, timezone\nfrom airflow.utils.email import send_email\nfrom airflow.utils.log.logging_mixin import set_context\nfrom airflow.utils.module_loading import import_string\nfrom airflow.utils.net import get_hostname\nfrom airflow.utils.session import provide_session\nfrom airflow.utils.state import PokeState, State\nfrom airflow.utils.timeout import timeout\n\nconfig = import_string(LOGGING_CLASS_PATH)\nhandler_config = config['handlers']['task']\ntry:\n formatter_config = config['formatters'][handler_config['formatter']]\nexcept Exception as err: # pylint: disable=broad-except\n formatter_config = None\n print(err)\ndictConfigurator = DictConfigurator(config)\n\n\nclass SensorWork:\n \"\"\"\n This class stores a sensor work with decoded context value. It is only used\n inside of smart sensor. Create a sensor work based on sensor instance record.\n A sensor work object has the following attributes:\n `dag_id`: sensor_instance dag_id.\n `task_id`: sensor_instance task_id.\n `execution_date`: sensor_instance execution_date.\n `try_number`: sensor_instance try_number\n `poke_context`: Decoded poke_context for the sensor task.\n `execution_context`: Decoded execution_context.\n `hashcode`: This is the signature of poking job.\n `operator`: The sensor operator class.\n `op_classpath`: The sensor operator class path\n `encoded_poke_context`: The raw data from sensor_instance poke_context column.\n `log`: The sensor work logger which will mock the corresponding task instance log.\n\n :param si: The sensor_instance ORM object.\n \"\"\"\n\n def __init__(self, si):\n self.dag_id = si.dag_id\n self.task_id = si.task_id\n self.execution_date = si.execution_date\n self.try_number = si.try_number\n\n self.poke_context = json.loads(si.poke_context) if si.poke_context else {}\n self.execution_context = json.loads(si.execution_context) if si.execution_context else {}\n try:\n self.log = self._get_sensor_logger(si)\n except Exception as e: # pylint: disable=broad-except\n self.log = None\n print(e)\n self.hashcode = si.hashcode\n self.start_date = si.start_date\n self.operator = si.operator\n self.op_classpath = si.op_classpath\n self.encoded_poke_context = si.poke_context\n\n def __eq__(self, other):\n if not isinstance(other, SensorWork):\n return NotImplemented\n\n return (\n self.dag_id == other.dag_id\n and self.task_id == other.task_id\n and self.execution_date == other.execution_date\n and self.try_number == other.try_number\n )\n\n @staticmethod\n def create_new_task_handler():\n \"\"\"\n Create task log handler for a sensor work.\n :return: log handler\n \"\"\"\n from airflow.utils.log.secrets_masker import _secrets_masker # noqa\n\n handler_config_copy = {k: handler_config[k] for k in handler_config}\n del handler_config_copy['filters']\n\n formatter_config_copy = {k: formatter_config[k] for k in formatter_config}\n handler = dictConfigurator.configure_handler(handler_config_copy)\n formatter = dictConfigurator.configure_formatter(formatter_config_copy)\n handler.setFormatter(formatter)\n\n # We want to share the _global_ filterer instance, not create a new one\n handler.addFilter(_secrets_masker())\n return handler\n\n def _get_sensor_logger(self, si):\n \"\"\"Return logger for a sensor instance object.\"\"\"\n # The created log_id is used inside of smart sensor as the key to fetch\n # the corresponding in memory log handler.\n si.raw = False # Otherwise set_context will fail\n log_id = \"-\".join(\n [si.dag_id, si.task_id, si.execution_date.strftime(\"%Y_%m_%dT%H_%M_%S_%f\"), str(si.try_number)]\n )\n logger = logging.getLogger('airflow.task' + '.' + log_id)\n\n if len(logger.handlers) == 0:\n handler = self.create_new_task_handler()\n logger.addHandler(handler)\n set_context(logger, si)\n\n line_break = \"-\" * 120\n logger.info(line_break)\n logger.info(\n \"Processing sensor task %s in smart sensor service on host: %s\", self.ti_key, get_hostname()\n )\n logger.info(line_break)\n return logger\n\n def close_sensor_logger(self):\n \"\"\"Close log handler for a sensor work.\"\"\"\n for handler in self.log.handlers:\n try:\n handler.close()\n except Exception as e: # pylint: disable=broad-except\n print(e)\n\n @property\n def ti_key(self):\n \"\"\"Key for the task instance that maps to the sensor work.\"\"\"\n return self.dag_id, self.task_id, self.execution_date\n\n @property\n def cache_key(self):\n \"\"\"Key used to query in smart sensor for cached sensor work.\"\"\"\n return self.operator, self.encoded_poke_context\n\n\nclass CachedPokeWork:\n \"\"\"\n Wrapper class for the poke work inside smart sensor. It saves\n the sensor_task used to poke and recent poke result state.\n state: poke state.\n sensor_task: The cached object for executing the poke function.\n last_poke_time: The latest time this cached work being called.\n to_flush: If we should flush the cached work.\n \"\"\"\n\n def __init__(self):\n self.state = None\n self.sensor_task = None\n self.last_poke_time = None\n self.to_flush = False\n\n def set_state(self, state):\n \"\"\"\n Set state for cached poke work.\n :param state: The sensor_instance state.\n \"\"\"\n self.state = state\n self.last_poke_time = timezone.utcnow()\n\n def clear_state(self):\n \"\"\"Clear state for cached poke work.\"\"\"\n self.state = None\n\n def set_to_flush(self):\n \"\"\"Mark this poke work to be popped from cached dict after current loop.\"\"\"\n self.to_flush = True\n\n def is_expired(self):\n \"\"\"\n The cached task object expires if there is no poke for 20 minutes.\n :return: Boolean\n \"\"\"\n return self.to_flush or (timezone.utcnow() - self.last_poke_time).total_seconds() > 1200\n\n\nclass SensorExceptionInfo:\n \"\"\"\n Hold sensor exception information and the type of exception. For possible transient\n infra failure, give the task more chance to retry before fail it.\n \"\"\"\n\n def __init__(\n self,\n exception_info,\n is_infra_failure=False,\n infra_failure_retry_window=datetime.timedelta(minutes=130),\n ):\n self._exception_info = exception_info\n self._is_infra_failure = is_infra_failure\n self._infra_failure_retry_window = infra_failure_retry_window\n\n self._infra_failure_timeout = None\n self.set_infra_failure_timeout()\n self.fail_current_run = self.should_fail_current_run()\n\n def set_latest_exception(self, exception_info, is_infra_failure=False):\n \"\"\"\n This function set the latest exception information for sensor exception. If the exception\n implies an infra failure, this function will check the recorded infra failure timeout\n which was set at the first infra failure exception arrives. There is a 6 hours window\n for retry without failing current run.\n\n :param exception_info: Details of the exception information.\n :param is_infra_failure: If current exception was caused by transient infra failure.\n There is a retry window _infra_failure_retry_window that the smart sensor will\n retry poke function without failing current task run.\n \"\"\"\n self._exception_info = exception_info\n self._is_infra_failure = is_infra_failure\n\n self.set_infra_failure_timeout()\n self.fail_current_run = self.should_fail_current_run()\n\n def set_infra_failure_timeout(self):\n \"\"\"\n Set the time point when the sensor should be failed if it kept getting infra\n failure.\n :return:\n \"\"\"\n # Only set the infra_failure_timeout if there is no existing one\n if not self._is_infra_failure:\n self._infra_failure_timeout = None\n elif self._infra_failure_timeout is None:\n self._infra_failure_timeout = timezone.utcnow() + self._infra_failure_retry_window\n\n def should_fail_current_run(self):\n \"\"\"\n :return: Should the sensor fail\n :type: boolean\n \"\"\"\n return not self.is_infra_failure or timezone.utcnow() > self._infra_failure_timeout\n\n @property\n def exception_info(self):\n \"\"\":return: exception msg.\"\"\"\n return self._exception_info\n\n @property\n def is_infra_failure(self):\n \"\"\"\n\n :return: If the exception is an infra failure\n :type: boolean\n \"\"\"\n return self._is_infra_failure\n\n def is_expired(self):\n \"\"\"\n :return: If current exception need to be kept.\n :type: boolean\n \"\"\"\n if not self._is_infra_failure:\n return True\n return timezone.utcnow() > self._infra_failure_timeout + datetime.timedelta(minutes=30)\n\n\nclass SmartSensorOperator(BaseOperator, SkipMixin):\n \"\"\"\n Smart sensor operators are derived from this class.\n\n Smart Sensor operators keep refresh a dictionary by visiting DB.\n Taking qualified active sensor tasks. Different from sensor operator,\n Smart sensor operators poke for all sensor tasks in the dictionary at\n a time interval. When a criteria is met or fail by time out, it update\n all sensor task state in task_instance table\n\n :param soft_fail: Set to true to mark the task as SKIPPED on failure\n :type soft_fail: bool\n :param poke_interval: Time in seconds that the job should wait in\n between each tries.\n :type poke_interval: int\n :param smart_sensor_timeout: Time, in seconds before the internal sensor\n job times out if poke_timeout is not defined.\n :type smart_sensor_timeout: float\n :param shard_min: shard code lower bound (inclusive)\n :type shard_min: int\n :param shard_max: shard code upper bound (exclusive)\n :type shard_max: int\n :param poke_timeout: Time, in seconds before the task times out and fails.\n :type poke_timeout: float\n \"\"\"\n\n ui_color = '#e6f1f2'\n\n def __init__(\n self,\n poke_interval=180,\n smart_sensor_timeout=60 * 60 * 24 * 7,\n soft_fail=False,\n shard_min=0,\n shard_max=100000,\n poke_timeout=6.0,\n *args,\n **kwargs,\n ):\n super().__init__(*args, **kwargs)\n # super(SmartSensorOperator, self).__init__(*args, **kwargs)\n self.poke_interval = poke_interval\n self.soft_fail = soft_fail\n self.timeout = smart_sensor_timeout\n self._validate_input_values()\n self.hostname = \"\"\n\n self.sensor_works = []\n self.cached_dedup_works = {}\n self.cached_sensor_exceptions = {}\n\n self.max_tis_per_query = 50\n self.shard_min = shard_min\n self.shard_max = shard_max\n self.poke_timeout = poke_timeout\n\n def _validate_input_values(self):\n if not isinstance(self.poke_interval, (int, float)) or self.poke_interval < 0:\n raise AirflowException(\"The poke_interval must be a non-negative number\")\n if not isinstance(self.timeout, (int, float)) or self.timeout < 0:\n raise AirflowException(\"The timeout must be a non-negative number\")\n\n @provide_session\n def _load_sensor_works(self, session=None):\n \"\"\"\n Refresh sensor instances need to be handled by this operator. Create smart sensor\n internal object based on the information persisted in the sensor_instance table.\n\n \"\"\"\n SI = SensorInstance\n with Stats.timer() as timer:\n query = (\n session.query(SI)\n .filter(SI.state == State.SENSING)\n .filter(SI.shardcode < self.shard_max, SI.shardcode >= self.shard_min)\n )\n tis = query.all()\n\n self.log.info(\"Performance query %s tis, time: %.3f\", len(tis), timer.duration)\n\n # Query without checking dagrun state might keep some failed dag_run tasks alive.\n # Join with DagRun table will be very slow based on the number of sensor tasks we\n # need to handle. We query all smart tasks in this operator\n # and expect scheduler correct the states in _change_state_for_tis_without_dagrun()\n\n sensor_works = []\n for ti in tis:\n try:\n sensor_works.append(SensorWork(ti))\n except Exception: # pylint: disable=broad-except\n self.log.exception(\"Exception at creating sensor work for ti %s\", ti.key)\n\n self.log.info(\"%d tasks detected.\", len(sensor_works))\n\n new_sensor_works = [x for x in sensor_works if x not in self.sensor_works]\n\n self._update_ti_hostname(new_sensor_works)\n\n self.sensor_works = sensor_works\n\n @provide_session\n def _update_ti_hostname(self, sensor_works, session=None):\n \"\"\"\n Update task instance hostname for new sensor works.\n\n :param sensor_works: Smart sensor internal object for a sensor task.\n :param session: The sqlalchemy session.\n \"\"\"\n TI = TaskInstance\n\n def update_ti_hostname_with_count(count, sensor_works):\n # Using or_ instead of in_ here to prevent from full table scan.\n if session.bind.dialect.name == 'mssql':\n ti_filter = or_(\n and_(\n TI.dag_id == ti_key.dag_id,\n TI.task_id == ti_key.task_id,\n TI.execution_date == ti_key.execution_date,\n )\n for ti_key in sensor_works\n )\n else:\n ti_keys = [(x.dag_id, x.task_id, x.execution_date) for x in sensor_works]\n ti_filter = or_(\n tuple_(TI.dag_id, TI.task_id, TI.execution_date) == ti_key for ti_key in ti_keys\n )\n tis = session.query(TI).filter(ti_filter).all()\n\n for ti in tis:\n ti.hostname = self.hostname\n session.commit()\n\n return count + len(sensor_works)\n\n count = helpers.reduce_in_chunks(\n update_ti_hostname_with_count, sensor_works, 0, self.max_tis_per_query\n )\n if count:\n self.log.info(\"Updated hostname on %s tis.\", count)\n\n @provide_session\n def _mark_multi_state(self, operator, poke_hash, encoded_poke_context, state, session=None):\n \"\"\"\n Mark state for multiple tasks in the task_instance table to a new state if they have\n the same signature as the poke_hash.\n\n :param operator: The sensor's operator class name.\n :param poke_hash: The hash code generated from sensor's poke context.\n :param encoded_poke_context: The raw encoded poke_context.\n :param state: Set multiple sensor tasks to this state.\n :param session: The sqlalchemy session.\n \"\"\"\n\n def mark_state(ti, sensor_instance):\n ti.state = state\n sensor_instance.state = state\n if state in State.finished:\n ti.end_date = end_date\n ti.set_duration()\n\n SI = SensorInstance\n TI = TaskInstance\n\n count_marked = 0\n query_result = []\n try:\n query_result = (\n session.query(TI, SI)\n .join(\n TI,\n and_(\n TI.dag_id == SI.dag_id,\n TI.task_id == SI.task_id,\n TI.execution_date == SI.execution_date,\n ),\n )\n .filter(SI.state == State.SENSING)\n .filter(SI.hashcode == poke_hash)\n .filter(SI.operator == operator)\n .with_for_update()\n .all()\n )\n\n end_date = timezone.utcnow()\n for ti, sensor_instance in query_result:\n if sensor_instance.poke_context != encoded_poke_context:\n continue\n\n ti.hostname = self.hostname\n if ti.state == State.SENSING:\n mark_state(ti=ti, sensor_instance=sensor_instance)\n count_marked += 1\n else:\n # ti.state != State.SENSING\n sensor_instance.state = ti.state\n\n session.commit()\n\n except Exception: # pylint: disable=broad-except\n self.log.warning(\n \"Exception _mark_multi_state in smart sensor for hashcode %s\",\n str(poke_hash), # cast to str in advance for highlighting\n exc_info=True,\n )\n self.log.info(\"Marked %s tasks out of %s to state %s\", count_marked, len(query_result), state)\n\n @provide_session\n def _retry_or_fail_task(self, sensor_work, error, session=None):\n \"\"\"\n Change single task state for sensor task. For final state, set the end_date.\n Since smart sensor take care all retries in one process. Failed sensor tasks\n logically experienced all retries and the try_number should be set to max_tries.\n\n :param sensor_work: The sensor_work with exception.\n :type sensor_work: SensorWork\n :param error: The error message for this sensor_work.\n :type error: str.\n :param session: The sqlalchemy session.\n \"\"\"\n\n def email_alert(task_instance, error_info):\n try:\n subject, html_content, _ = task_instance.get_email_subject_content(error_info)\n email = sensor_work.execution_context.get('email')\n\n send_email(email, subject, html_content)\n except Exception: # pylint: disable=broad-except\n sensor_work.log.warning(\"Exception alerting email.\", exc_info=True)\n\n def handle_failure(sensor_work, ti):\n if sensor_work.execution_context.get('retries') and ti.try_number <= ti.max_tries:\n # retry\n ti.state = State.UP_FOR_RETRY\n if sensor_work.execution_context.get('email_on_retry') and sensor_work.execution_context.get(\n 'email'\n ):\n sensor_work.log.info(\"%s sending email alert for retry\", sensor_work.ti_key)\n email_alert(ti, error)\n else:\n ti.state = State.FAILED\n if sensor_work.execution_context.get(\n 'email_on_failure'\n ) and sensor_work.execution_context.get('email'):\n sensor_work.log.info(\"%s sending email alert for failure\", sensor_work.ti_key)\n email_alert(ti, error)\n\n try:\n dag_id, task_id, execution_date = sensor_work.ti_key\n TI = TaskInstance\n SI = SensorInstance\n sensor_instance = (\n session.query(SI)\n .filter(SI.dag_id == dag_id, SI.task_id == task_id, SI.execution_date == execution_date)\n .with_for_update()\n .first()\n )\n\n if sensor_instance.hashcode != sensor_work.hashcode:\n # Return without setting state\n return\n\n ti = (\n session.query(TI)\n .filter(TI.dag_id == dag_id, TI.task_id == task_id, TI.execution_date == execution_date)\n .with_for_update()\n .first()\n )\n\n if ti:\n if ti.state == State.SENSING:\n ti.hostname = self.hostname\n handle_failure(sensor_work, ti)\n\n sensor_instance.state = State.FAILED\n ti.end_date = timezone.utcnow()\n ti.set_duration()\n else:\n sensor_instance.state = ti.state\n session.merge(sensor_instance)\n session.merge(ti)\n session.commit()\n\n sensor_work.log.info(\n \"Task %s got an error: %s. Set the state to failed. Exit.\", str(sensor_work.ti_key), error\n )\n sensor_work.close_sensor_logger()\n\n except AirflowException:\n sensor_work.log.warning(\"Exception on failing %s\", sensor_work.ti_key, exc_info=True)\n\n def _check_and_handle_ti_timeout(self, sensor_work):\n \"\"\"\n Check if a sensor task in smart sensor is timeout. Could be either sensor operator timeout\n or general operator execution_timeout.\n\n :param sensor_work: SensorWork\n \"\"\"\n task_timeout = sensor_work.execution_context.get('timeout', self.timeout)\n task_execution_timeout = sensor_work.execution_context.get('execution_timeout')\n if task_execution_timeout:\n task_timeout = min(task_timeout, task_execution_timeout)\n\n if (timezone.utcnow() - sensor_work.start_date).total_seconds() > task_timeout:\n error = \"Sensor Timeout\"\n sensor_work.log.exception(error)\n self._retry_or_fail_task(sensor_work, error)\n\n def _handle_poke_exception(self, sensor_work):\n \"\"\"\n Fail task if accumulated exceptions exceeds retries.\n\n :param sensor_work: SensorWork\n \"\"\"\n sensor_exception = self.cached_sensor_exceptions.get(sensor_work.cache_key)\n error = sensor_exception.exception_info\n sensor_work.log.exception(\"Handling poke exception: %s\", error)\n\n if sensor_exception.fail_current_run:\n if sensor_exception.is_infra_failure:\n sensor_work.log.exception(\n \"Task %s failed by infra failure in smart sensor.\", sensor_work.ti_key\n )\n # There is a risk for sensor object cached in smart sensor keep throwing\n # exception and cause an infra failure. To make sure the sensor tasks after\n # retry will not fall into same object and have endless infra failure,\n # we mark the sensor task after an infra failure so that it can be popped\n # before next poke loop.\n cache_key = sensor_work.cache_key\n self.cached_dedup_works[cache_key].set_to_flush()\n else:\n sensor_work.log.exception(\"Task %s failed by exceptions.\", sensor_work.ti_key)\n self._retry_or_fail_task(sensor_work, error)\n else:\n sensor_work.log.info(\"Exception detected, retrying without failing current run.\")\n self._check_and_handle_ti_timeout(sensor_work)\n\n def _process_sensor_work_with_cached_state(self, sensor_work, state):\n if state == PokeState.LANDED:\n sensor_work.log.info(\"Task %s succeeded\", str(sensor_work.ti_key))\n sensor_work.close_sensor_logger()\n\n if state == PokeState.NOT_LANDED:\n # Handle timeout if connection valid but not landed yet\n self._check_and_handle_ti_timeout(sensor_work)\n elif state == PokeState.POKE_EXCEPTION:\n self._handle_poke_exception(sensor_work)\n\n def _execute_sensor_work(self, sensor_work):\n ti_key = sensor_work.ti_key\n log = sensor_work.log or self.log\n log.info(\"Sensing ti: %s\", str(ti_key))\n log.info(\"Poking with arguments: %s\", sensor_work.encoded_poke_context)\n\n cache_key = sensor_work.cache_key\n if cache_key not in self.cached_dedup_works:\n # create an empty cached_work for a new cache_key\n self.cached_dedup_works[cache_key] = CachedPokeWork()\n\n cached_work = self.cached_dedup_works[cache_key]\n\n if cached_work.state is not None:\n # Have a valid cached state, don't poke twice in certain time interval\n self._process_sensor_work_with_cached_state(sensor_work, cached_work.state)\n return\n\n try:\n with timeout(seconds=self.poke_timeout):\n if self.poke(sensor_work):\n # Got a landed signal, mark all tasks waiting for this partition\n cached_work.set_state(PokeState.LANDED)\n\n self._mark_multi_state(\n sensor_work.operator,\n sensor_work.hashcode,\n sensor_work.encoded_poke_context,\n State.SUCCESS,\n )\n\n log.info(\"Task %s succeeded\", str(ti_key))\n sensor_work.close_sensor_logger()\n else:\n # Not landed yet. Handle possible timeout\n cached_work.set_state(PokeState.NOT_LANDED)\n self._check_and_handle_ti_timeout(sensor_work)\n\n self.cached_sensor_exceptions.pop(cache_key, None)\n except Exception as e: # pylint: disable=broad-except\n # The retry_infra_failure decorator inside hive_hooks will raise exception with\n # is_infra_failure == True. Long poking timeout here is also considered an infra\n # failure. Other exceptions should fail.\n is_infra_failure = getattr(e, 'is_infra_failure', False) or isinstance(e, AirflowTaskTimeout)\n exception_info = traceback.format_exc()\n cached_work.set_state(PokeState.POKE_EXCEPTION)\n\n if cache_key in self.cached_sensor_exceptions:\n self.cached_sensor_exceptions[cache_key].set_latest_exception(\n exception_info, is_infra_failure=is_infra_failure\n )\n else:\n self.cached_sensor_exceptions[cache_key] = SensorExceptionInfo(\n exception_info, is_infra_failure=is_infra_failure\n )\n\n self._handle_poke_exception(sensor_work)\n\n def flush_cached_sensor_poke_results(self):\n \"\"\"Flush outdated cached sensor states saved in previous loop.\"\"\"\n for key, cached_work in self.cached_dedup_works.copy().items():\n if cached_work.is_expired():\n self.cached_dedup_works.pop(key, None)\n else:\n cached_work.state = None\n\n for ti_key, sensor_exception in self.cached_sensor_exceptions.copy().items():\n if sensor_exception.fail_current_run or sensor_exception.is_expired():\n self.cached_sensor_exceptions.pop(ti_key, None)\n\n def poke(self, sensor_work):\n \"\"\"\n Function that the sensors defined while deriving this class should\n override.\n\n \"\"\"\n cached_work = self.cached_dedup_works[sensor_work.cache_key]\n if not cached_work.sensor_task:\n init_args = dict(list(sensor_work.poke_context.items()) + [('task_id', sensor_work.task_id)])\n operator_class = import_string(sensor_work.op_classpath)\n cached_work.sensor_task = operator_class(**init_args)\n\n return cached_work.sensor_task.poke(sensor_work.poke_context)\n\n def _emit_loop_stats(self):\n try:\n count_poke = 0\n count_poke_success = 0\n count_poke_exception = 0\n count_exception_failures = 0\n count_infra_failure = 0\n for cached_work in self.cached_dedup_works.values():\n if cached_work.state is None:\n continue\n count_poke += 1\n if cached_work.state == PokeState.LANDED:\n count_poke_success += 1\n elif cached_work.state == PokeState.POKE_EXCEPTION:\n count_poke_exception += 1\n for cached_exception in self.cached_sensor_exceptions.values():\n if cached_exception.is_infra_failure and cached_exception.fail_current_run:\n count_infra_failure += 1\n if cached_exception.fail_current_run:\n count_exception_failures += 1\n\n Stats.gauge(\"smart_sensor_operator.poked_tasks\", count_poke)\n Stats.gauge(\"smart_sensor_operator.poked_success\", count_poke_success)\n Stats.gauge(\"smart_sensor_operator.poked_exception\", count_poke_exception)\n Stats.gauge(\"smart_sensor_operator.exception_failures\", count_exception_failures)\n Stats.gauge(\"smart_sensor_operator.infra_failures\", count_infra_failure)\n except Exception: # pylint: disable=broad-except\n self.log.exception(\"Exception at getting loop stats %s\")\n\n def execute(self, context):\n started_at = timezone.utcnow()\n\n self.hostname = get_hostname()\n while True:\n poke_start_time = timezone.utcnow()\n\n self.flush_cached_sensor_poke_results()\n\n self._load_sensor_works()\n self.log.info(\"Loaded %s sensor_works\", len(self.sensor_works))\n Stats.gauge(\"smart_sensor_operator.loaded_tasks\", len(self.sensor_works))\n\n for sensor_work in self.sensor_works:\n self._execute_sensor_work(sensor_work)\n\n duration = (timezone.utcnow() - poke_start_time).total_seconds()\n\n self.log.info(\"Taking %s to execute %s tasks.\", duration, len(self.sensor_works))\n\n Stats.timing(\"smart_sensor_operator.loop_duration\", duration)\n Stats.gauge(\"smart_sensor_operator.executed_tasks\", len(self.sensor_works))\n self._emit_loop_stats()\n\n if duration < self.poke_interval:\n sleep(self.poke_interval - duration)\n if (timezone.utcnow() - started_at).total_seconds() > self.timeout:\n self.log.info(\"Time is out for smart sensor.\")\n return\n\n def on_kill(self):\n pass\n\n\nif __name__ == '__main__':\n SmartSensorOperator(task_id='test').execute({})\n", "path": "airflow/sensors/smart_sensor.py" } ]
diff --git a/airflow/sensors/smart_sensor.py b/airflow/sensors/smart_sensor.py index 9d0a28c65ae00..6c1c16b98f78c 100644 --- a/airflow/sensors/smart_sensor.py +++ b/airflow/sensors/smart_sensor.py @@ -446,6 +446,7 @@ def mark_state(ti, sensor_instance): TI = TaskInstance count_marked = 0 + query_result = [] try: query_result = ( session.query(TI, SI)
tensorflow__tensor2tensor-198
wmt_encs_tokens_32k - Datagen error I am trying to start the d "datagen" for the "wmt_encs_tokens_32k" and I receive the following error: ``` INFO:tensorflow:Generating problems: * wmt_encs_tokens_32k INFO:tensorflow:Generating training data for wmt_encs_tokens_32k. Traceback (most recent call last): File "/home/ahmed/tensorflow/bin/t2t-datagen", line 290, in <module> tf.app.run() File "/home/ahmed/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/platform/app.py", line 48, in run _sys.exit(main(_sys.argv[:1] + flags_passthrough)) File "/home/ahmed/tensorflow/bin/t2t-datagen", line 239, in main generate_data_for_registered_problem(problem) File "/home/ahmed/tensorflow/bin/t2t-datagen", line 286, in generate_data_for_registered_problem task_id=task_id) File "/home/ahmed/tensorflow/local/lib/python2.7/site-packages/tensor2tensor/data_generators/problem.py", line 383, in generate_data self.train_generator(data_dir, tmp_dir, True), File "/home/ahmed/tensorflow/local/lib/python2.7/site-packages/tensor2tensor/data_generators/wmt.py", line 589, in train_generator data_dir, tmp_dir, self.vocab_file, self.targeted_vocab_size, File "/home/ahmed/tensorflow/local/lib/python2.7/site-packages/tensor2tensor/data_generators/problem.py", line 371, in vocab_file return "%s.%d" % (self.vocab_name, self.targeted_vocab_size) File "/home/ahmed/tensorflow/local/lib/python2.7/site-packages/tensor2tensor/data_generators/problem.py", line 343, in targeted_vocab_size raise NotImplementedError() # Not needed if self.is_character_level. NotImplementedError ```
[ { "content": "# coding=utf-8\n# Copyright 2017 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data generators for WMT data-sets.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tarfile\n\n# Dependency imports\n\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import text_encoder\nfrom tensor2tensor.data_generators import wsj_parsing\nfrom tensor2tensor.utils import registry\n\nimport tensorflow as tf\n\ntf.flags.DEFINE_string(\"ende_bpe_path\", \"\", \"Path to BPE files in tmp_dir.\"\n \"Download from https://drive.google.com/open?\"\n \"id=0B_bZck-ksdkpM25jRUN2X2UxMm8\")\n\nFLAGS = tf.flags.FLAGS\n\n\n# End-of-sentence marker.\nEOS = text_encoder.EOS_ID\n\n\nclass WMTProblem(problem.Text2TextProblem):\n \"\"\"Base class for WMT problems.\"\"\"\n\n @property\n def is_character_level(self):\n return False\n\n @property\n def num_shards(self):\n return 100\n\n @property\n def vocab_name(self):\n return \"vocab.endefr\"\n\n @property\n def use_subword_tokenizer(self):\n return True\n\n\n# Generic generators used later for multiple problems.\n\n\ndef character_generator(source_path, target_path, character_vocab, eos=None):\n \"\"\"Generator for sequence-to-sequence tasks that just uses characters.\n\n This generator assumes the files at source_path and target_path have\n the same number of lines and yields dictionaries of \"inputs\" and \"targets\"\n where inputs are characters from the source lines converted to integers,\n and targets are characters from the target lines, also converted to integers.\n\n Args:\n source_path: path to the file with source sentences.\n target_path: path to the file with target sentences.\n character_vocab: a TextEncoder to encode the characters.\n eos: integer to append at the end of each sequence (default: None).\n\n Yields:\n A dictionary {\"inputs\": source-line, \"targets\": target-line} where\n the lines are integer lists converted from characters in the file lines.\n \"\"\"\n eos_list = [] if eos is None else [eos]\n with tf.gfile.GFile(source_path, mode=\"r\") as source_file:\n with tf.gfile.GFile(target_path, mode=\"r\") as target_file:\n source, target = source_file.readline(), target_file.readline()\n while source and target:\n source_ints = character_vocab.encode(source.strip()) + eos_list\n target_ints = character_vocab.encode(target.strip()) + eos_list\n yield {\"inputs\": source_ints, \"targets\": target_ints}\n source, target = source_file.readline(), target_file.readline()\n\n\ndef tabbed_generator(source_path, source_vocab, target_vocab, eos=None):\n r\"\"\"Generator for sequence-to-sequence tasks using tabbed files.\n\n Tokens are derived from text files where each line contains both\n a source and a target string. The two strings are separated by a tab\n character ('\\t'). It yields dictionaries of \"inputs\" and \"targets\" where\n inputs are characters from the source lines converted to integers, and\n targets are characters from the target lines, also converted to integers.\n\n Args:\n source_path: path to the file with source and target sentences.\n source_vocab: a SubwordTextEncoder to encode the source string.\n target_vocab: a SubwordTextEncoder to encode the target string.\n eos: integer to append at the end of each sequence (default: None).\n\n Yields:\n A dictionary {\"inputs\": source-line, \"targets\": target-line} where\n the lines are integer lists converted from characters in the file lines.\n \"\"\"\n eos_list = [] if eos is None else [eos]\n with tf.gfile.GFile(source_path, mode=\"r\") as source_file:\n for line in source_file:\n if line and \"\\t\" in line:\n parts = line.split(\"\\t\", maxsplit=1)\n source, target = parts[0].strip(), parts[1].strip()\n source_ints = source_vocab.encode(source) + eos_list\n target_ints = target_vocab.encode(target) + eos_list\n yield {\"inputs\": source_ints, \"targets\": target_ints}\n\n\ndef token_generator(source_path, target_path, token_vocab, eos=None):\n \"\"\"Generator for sequence-to-sequence tasks that uses tokens.\n\n This generator assumes the files at source_path and target_path have\n the same number of lines and yields dictionaries of \"inputs\" and \"targets\"\n where inputs are token ids from the \" \"-split source (and target, resp.) lines\n converted to integers using the token_map.\n\n Args:\n source_path: path to the file with source sentences.\n target_path: path to the file with target sentences.\n token_vocab: text_encoder.TextEncoder object.\n eos: integer to append at the end of each sequence (default: None).\n\n Yields:\n A dictionary {\"inputs\": source-line, \"targets\": target-line} where\n the lines are integer lists converted from tokens in the file lines.\n \"\"\"\n eos_list = [] if eos is None else [eos]\n with tf.gfile.GFile(source_path, mode=\"r\") as source_file:\n with tf.gfile.GFile(target_path, mode=\"r\") as target_file:\n source, target = source_file.readline(), target_file.readline()\n while source and target:\n source_ints = token_vocab.encode(source.strip()) + eos_list\n target_ints = token_vocab.encode(target.strip()) + eos_list\n yield {\"inputs\": source_ints, \"targets\": target_ints}\n source, target = source_file.readline(), target_file.readline()\n\n\ndef bi_vocabs_token_generator(source_path,\n target_path,\n source_token_vocab,\n target_token_vocab,\n eos=None):\n \"\"\"Generator for sequence-to-sequence tasks that uses tokens.\n\n This generator assumes the files at source_path and target_path have\n the same number of lines and yields dictionaries of \"inputs\" and \"targets\"\n where inputs are token ids from the \" \"-split source (and target, resp.) lines\n converted to integers using the token_map.\n\n Args:\n source_path: path to the file with source sentences.\n target_path: path to the file with target sentences.\n source_token_vocab: text_encoder.TextEncoder object.\n target_token_vocab: text_encoder.TextEncoder object.\n eos: integer to append at the end of each sequence (default: None).\n\n Yields:\n A dictionary {\"inputs\": source-line, \"targets\": target-line} where\n the lines are integer lists converted from tokens in the file lines.\n \"\"\"\n eos_list = [] if eos is None else [eos]\n with tf.gfile.GFile(source_path, mode=\"r\") as source_file:\n with tf.gfile.GFile(target_path, mode=\"r\") as target_file:\n source, target = source_file.readline(), target_file.readline()\n while source and target:\n source_ints = source_token_vocab.encode(source.strip()) + eos_list\n target_ints = target_token_vocab.encode(target.strip()) + eos_list\n yield {\"inputs\": source_ints, \"targets\": target_ints}\n source, target = source_file.readline(), target_file.readline()\n\n\n# Data-set URLs.\n\n\n_ENDE_TRAIN_DATASETS = [\n [\n \"http://data.statmt.org/wmt16/translation-task/training-parallel-nc-v11.tgz\", # pylint: disable=line-too-long\n (\"training-parallel-nc-v11/news-commentary-v11.de-en.en\",\n \"training-parallel-nc-v11/news-commentary-v11.de-en.de\")\n ],\n [\n \"http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz\",\n (\"commoncrawl.de-en.en\", \"commoncrawl.de-en.de\")\n ],\n [\n \"http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz\",\n (\"training/europarl-v7.de-en.en\", \"training/europarl-v7.de-en.de\")\n ],\n]\n_ENDE_TEST_DATASETS = [\n [\n \"http://data.statmt.org/wmt17/translation-task/dev.tgz\",\n (\"dev/newstest2013.en\", \"dev/newstest2013.de\")\n ],\n]\n\n_ENFR_TRAIN_DATASETS = [\n [\n \"http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz\",\n (\"commoncrawl.fr-en.en\", \"commoncrawl.fr-en.fr\")\n ],\n [\n \"http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz\",\n (\"training/europarl-v7.fr-en.en\", \"training/europarl-v7.fr-en.fr\")\n ],\n [\n \"http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz\",\n (\"training/news-commentary-v9.fr-en.en\",\n \"training/news-commentary-v9.fr-en.fr\")\n ],\n [\n \"http://www.statmt.org/wmt10/training-giga-fren.tar\",\n (\"giga-fren.release2.fixed.en.gz\", \"giga-fren.release2.fixed.fr.gz\")\n ],\n [\n \"http://www.statmt.org/wmt13/training-parallel-un.tgz\",\n (\"un/undoc.2000.fr-en.en\", \"un/undoc.2000.fr-en.fr\")\n ],\n]\n_ENFR_TEST_DATASETS = [\n [\n \"http://data.statmt.org/wmt17/translation-task/dev.tgz\",\n (\"dev/newstest2013.en\", \"dev/newstest2013.fr\")\n ],\n]\n\n_ZHEN_TRAIN_DATASETS = [[(\"http://data.statmt.org/wmt17/translation-task/\"\n \"training-parallel-nc-v12.tgz\"),\n (\"training/news-commentary-v12.zh-en.zh\",\n \"training/news-commentary-v12.zh-en.en\")]]\n\n_ZHEN_TEST_DATASETS = [[\n \"http://data.statmt.org/wmt17/translation-task/dev.tgz\",\n (\"dev/newsdev2017-zhen-src.zh\", \"dev/newsdev2017-zhen-ref.en\")\n]]\n\n# For Macedonian-English the SETimes corpus\n# from http://nlp.ffzg.hr/resources/corpora/setimes/ is used.\n# The original dataset has 207,777 parallel sentences.\n# For training the first 205,777 sentences are used.\n_MKEN_TRAIN_DATASETS = [[\n \"https://github.com/stefan-it/nmt-mk-en/raw/master/data/setimes.mk-en.train.tgz\", # pylint: disable=line-too-long\n (\"train.mk\", \"train.en\")\n]]\n\n# For development 1000 parallel sentences are used.\n_MKEN_TEST_DATASETS = [[\n \"https://github.com/stefan-it/nmt-mk-en/raw/master/data/setimes.mk-en.dev.tgz\", # pylint: disable=line-too-long\n (\"dev.mk\", \"dev.en\")\n]]\n\n# English-Czech datasets\n_ENCS_TRAIN_DATASETS = [\n [\n \"http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v11.tgz\", # pylint: disable=line-too-long\n (\"training-parallel-nc-v11/news-commentary-v11.cs-en.en\",\n \"training-parallel-nc-v11/news-commentary-v11.cs-en.cs\")\n ],\n [\n \"http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz\",\n (\"commoncrawl.cs-en.en\", \"commoncrawl.cs-en.cs\")\n ],\n [\n \"http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz\",\n (\"training/europarl-v7.cs-en.en\", \"training/europarl-v7.cs-en.cs\")\n ],\n]\n_ENCS_TEST_DATASETS = [\n [\n \"http://data.statmt.org/wmt17/translation-task/dev.tgz\",\n (\"dev/newstest2013.en\", \"dev/newstest2013.cs\")\n ],\n]\n\n\n# Generators.\n\n\ndef _get_wmt_ende_dataset(directory, filename):\n \"\"\"Extract the WMT en-de corpus `filename` to directory unless it's there.\"\"\"\n train_path = os.path.join(directory, filename)\n if not (tf.gfile.Exists(train_path + \".de\") and\n tf.gfile.Exists(train_path + \".en\")):\n # We expect that this file has been downloaded from:\n # https://drive.google.com/open?id=0B_bZck-ksdkpM25jRUN2X2UxMm8 and placed\n # in `directory`.\n corpus_file = os.path.join(directory, FLAGS.ende_bpe_path)\n with tarfile.open(corpus_file, \"r:gz\") as corpus_tar:\n corpus_tar.extractall(directory)\n return train_path\n\n\ndef ende_bpe_token_generator(data_dir, tmp_dir, train):\n \"\"\"Instance of token generator for the WMT en->de task, training set.\"\"\"\n dataset_path = (\"train.tok.clean.bpe.32000\"\n if train else \"newstest2013.tok.bpe.32000\")\n train_path = _get_wmt_ende_dataset(tmp_dir, dataset_path)\n token_tmp_path = os.path.join(tmp_dir, \"vocab.bpe.32000\")\n token_path = os.path.join(data_dir, \"vocab.bpe.32000\")\n tf.gfile.Copy(token_tmp_path, token_path, overwrite=True)\n token_vocab = text_encoder.TokenTextEncoder(vocab_filename=token_path)\n return token_generator(train_path + \".en\", train_path + \".de\", token_vocab,\n EOS)\n\n\ndef _compile_data(tmp_dir, datasets, filename):\n \"\"\"Concatenate all `datasets` and save to `filename`.\"\"\"\n filename = os.path.join(tmp_dir, filename)\n with tf.gfile.GFile(filename + \".lang1\", mode=\"w\") as lang1_resfile:\n with tf.gfile.GFile(filename + \".lang2\", mode=\"w\") as lang2_resfile:\n for dataset in datasets:\n url = dataset[0]\n compressed_filename = os.path.basename(url)\n compressed_filepath = os.path.join(tmp_dir, compressed_filename)\n\n lang1_filename, lang2_filename = dataset[1]\n lang1_filepath = os.path.join(tmp_dir, lang1_filename)\n lang2_filepath = os.path.join(tmp_dir, lang2_filename)\n\n if not os.path.exists(compressed_filepath):\n generator_utils.maybe_download(tmp_dir, compressed_filename, url)\n if not (os.path.exists(lang1_filepath) and\n os.path.exists(lang2_filepath)):\n # For .tar.gz and .tgz files, we read compressed.\n mode = \"r:gz\" if compressed_filepath.endswith(\"gz\") else \"r\"\n with tarfile.open(compressed_filepath, mode) as corpus_tar:\n corpus_tar.extractall(tmp_dir)\n if lang1_filepath.endswith(\".gz\"):\n new_filepath = lang1_filepath.strip(\".gz\")\n generator_utils.gunzip_file(lang1_filepath, new_filepath)\n lang1_filepath = new_filepath\n if lang2_filepath.endswith(\".gz\"):\n new_filepath = lang2_filepath.strip(\".gz\")\n generator_utils.gunzip_file(lang2_filepath, new_filepath)\n lang2_filepath = new_filepath\n with tf.gfile.GFile(lang1_filepath, mode=\"r\") as lang1_file:\n with tf.gfile.GFile(lang2_filepath, mode=\"r\") as lang2_file:\n line1, line2 = lang1_file.readline(), lang2_file.readline()\n while line1 or line2:\n lang1_resfile.write(line1.strip() + \"\\n\")\n lang2_resfile.write(line2.strip() + \"\\n\")\n line1, line2 = lang1_file.readline(), lang2_file.readline()\n\n return filename\n\n\[email protected]_problem(\"wmt_ende_tokens_8k\")\nclass WMTEnDeTokens8k(WMTProblem):\n \"\"\"Problem spec for WMT En-De translation.\"\"\"\n\n @property\n def targeted_vocab_size(self):\n return 2**13 # 8192\n\n def train_generator(self, data_dir, tmp_dir, train):\n symbolizer_vocab = generator_utils.get_or_generate_vocab(\n data_dir, tmp_dir, self.vocab_file, self.targeted_vocab_size)\n datasets = _ENDE_TRAIN_DATASETS if train else _ENDE_TEST_DATASETS\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"wmt_ende_tok_%s\" % tag)\n return token_generator(data_path + \".lang1\", data_path + \".lang2\",\n symbolizer_vocab, EOS)\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.DE_TOK\n\n\[email protected]_problem(\"wmt_ende_tokens_32k\")\nclass WMTEnDeTokens32k(WMTEnDeTokens8k):\n\n @property\n def targeted_vocab_size(self):\n return 2**15 # 32768\n\n\[email protected]_problem(\"wmt_ende_characters\")\nclass WMTEnDeCharacters(WMTProblem):\n \"\"\"Problem spec for WMT En-De translation.\"\"\"\n\n @property\n def is_character_level(self):\n return True\n\n def train_generator(self, tmp_dir, train):\n character_vocab = text_encoder.ByteTextEncoder()\n datasets = _ENDE_TRAIN_DATASETS if train else _ENDE_TEST_DATASETS\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"wmt_ende_chr_%s\" % tag)\n return character_generator(data_path + \".lang1\", data_path + \".lang2\",\n character_vocab, EOS)\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_CHR\n\n @property\n def target_space_id(self):\n return problem.SpaceID.DE_CHR\n\n\[email protected]_problem(\"wmt_zhen_tokens_8k\")\nclass WMTZhEnTokens8k(WMTProblem):\n \"\"\"Problem spec for WMT Zh-En translation.\"\"\"\n\n @property\n def targeted_vocab_size(self):\n return 2**13 # 8192\n\n def train_generator(self, data_dir, tmp_dir, train):\n source_vocab_size = self.targeted_vocab_size\n target_vocab_size = self.targeted_vocab_size\n datasets = _ZHEN_TRAIN_DATASETS if train else _ZHEN_TEST_DATASETS\n source_datasets = [[item[0], [item[1][0]]] for item in datasets]\n target_datasets = [[item[0], [item[1][1]]] for item in datasets]\n source_vocab = generator_utils.get_or_generate_vocab(\n data_dir, tmp_dir, \"vocab.zh.%d\" % source_vocab_size, source_vocab_size,\n source_datasets)\n target_vocab = generator_utils.get_or_generate_vocab(\n data_dir, tmp_dir, \"vocab.en.%d\" % target_vocab_size, target_vocab_size,\n target_datasets)\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"wmt_zhen_tok_%s\" % tag)\n return bi_vocabs_token_generator(data_path + \".lang1\", data_path + \".lang2\",\n source_vocab, target_vocab, EOS)\n\n @property\n def input_space_id(self):\n return problem.SpaceID.ZH_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.EN_TOK\n\n def feature_encoders(self, data_dir):\n vocab_size = self.targeted_vocab_size\n source_vocab_filename = os.path.join(data_dir,\n \"vocab.zh.%d\" % vocab_size)\n target_vocab_filename = os.path.join(data_dir,\n \"vocab.en.%d\" % vocab_size)\n source_token = text_encoder.SubwordTextEncoder(source_vocab_filename)\n target_token = text_encoder.SubwordTextEncoder(target_vocab_filename)\n return {\n \"inputs\": source_token,\n \"targets\": target_token,\n }\n\n\[email protected]_problem(\"wmt_zhen_tokens_32k\")\nclass WMTZhEnTokens32k(WMTZhEnTokens8k):\n\n @property\n def targeted_vocab_size(self):\n return 2**15 # 32768\n\n\[email protected]_problem(\"wmt_enfr_tokens_8k\")\nclass WMTEnFrTokens8k(WMTProblem):\n \"\"\"Problem spec for WMT En-Fr translation.\"\"\"\n\n @property\n def targeted_vocab_size(self):\n return 2**13 # 8192\n\n def train_generator(self, data_dir, tmp_dir, train):\n symbolizer_vocab = generator_utils.get_or_generate_vocab(\n data_dir, tmp_dir, self.vocab_file, self.targeted_vocab_size)\n datasets = _ENFR_TRAIN_DATASETS if train else _ENFR_TEST_DATASETS\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"wmt_enfr_tok_%s\" % tag)\n return token_generator(data_path + \".lang1\", data_path + \".lang2\",\n symbolizer_vocab, EOS)\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.FR_TOK\n\n\[email protected]_problem(\"wmt_enfr_tokens_32k\")\nclass WMTEnFrTokens32k(WMTEnFrTokens8k):\n\n @property\n def targeted_vocab_size(self):\n return 2**15 # 32768\n\n\[email protected]_problem(\"wmt_enfr_characters\")\nclass WMTEnFrCharacters(WMTProblem):\n \"\"\"Problem spec for WMT En-Fr translation.\"\"\"\n\n @property\n def is_character_level(self):\n return True\n\n def train_generator(self, data_dir, tmp_dir, train):\n character_vocab = text_encoder.ByteTextEncoder()\n datasets = _ENFR_TRAIN_DATASETS if train else _ENFR_TEST_DATASETS\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"wmt_enfr_chr_%s\" % tag)\n return character_generator(data_path + \".lang1\", data_path + \".lang2\",\n character_vocab, EOS)\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_CHR\n\n @property\n def target_space_id(self):\n return problem.SpaceID.FR_CHR\n\n\[email protected]_problem(\"setimes_mken_tokens_32k\")\nclass SETimesMkEnTokens32k(WMTProblem):\n \"\"\"Problem spec for SETimes Mk-En translation.\"\"\"\n\n @property\n def targeted_vocab_size(self):\n return 2**15 # 32768\n\n @property\n def vocab_name(self):\n return \"vocab.mken\"\n\n def train_generator(self, data_dir, tmp_dir, train):\n datasets = _MKEN_TRAIN_DATASETS if train else _MKEN_TEST_DATASETS\n source_datasets = [[item[0], [item[1][0]]] for item in datasets]\n target_datasets = [[item[0], [item[1][1]]] for item in datasets]\n symbolizer_vocab = generator_utils.get_or_generate_vocab(\n data_dir, tmp_dir, self.vocab_file, self.targeted_vocab_size,\n source_datasets + target_datasets)\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"setimes_mken_tok_%s\" % tag)\n return token_generator(data_path + \".lang1\", data_path + \".lang2\",\n symbolizer_vocab, EOS)\n\n @property\n def input_space_id(self):\n return problem.SpaceID.MK_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.EN_TOK\n\n\[email protected]_problem(\"wmt_encs_tokens_32k\")\nclass WMTEnCsTokens32k(WMTProblem):\n \"\"\"Problem spec for WMT English-Czech translation.\"\"\"\n\n @property\n def target_vocab_size(self):\n return 2**15 # 32768\n\n @property\n def vocab_name(self):\n return \"vocab.encs\"\n\n def train_generator(self, data_dir, tmp_dir, train):\n datasets = _ENCS_TRAIN_DATASETS if train else _ENCS_TEST_DATASETS\n source_datasets = [[item[0], [item[1][0]]] for item in datasets]\n target_datasets = [[item[0], [item[1][1]]] for item in datasets]\n symbolizer_vocab = generator_utils.get_or_generate_vocab(\n data_dir, tmp_dir, self.vocab_file, self.targeted_vocab_size,\n source_datasets + target_datasets)\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"wmt_encs_tok_%s\" % tag)\n return token_generator(data_path + \".lang1\", data_path + \".lang2\",\n symbolizer_vocab, EOS)\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.CS_TOK\n\n\[email protected]_problem(\"wmt_encs_characters\")\nclass WMTEnCsCharacters(WMTProblem):\n \"\"\"Problem spec for WMT En-Cs character-based translation.\"\"\"\n\n @property\n def is_character_level(self):\n return True\n\n def train_generator(self, data_dir, tmp_dir, train):\n character_vocab = text_encoder.ByteTextEncoder()\n datasets = _ENCS_TRAIN_DATASETS if train else _ENCS_TEST_DATASETS\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"wmt_encs_chr_%s\" % tag)\n return character_generator(data_path + \".lang1\", data_path + \".lang2\",\n character_vocab, EOS)\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_CHR\n\n @property\n def target_space_id(self):\n return problem.SpaceID.CS_CHR\n\n\ndef tabbed_parsing_token_generator(data_dir, tmp_dir, train, prefix,\n source_vocab_size, target_vocab_size):\n \"\"\"Generate source and target data from a single file.\"\"\"\n source_vocab = generator_utils.get_or_generate_tabbed_vocab(\n data_dir, tmp_dir, \"parsing_train.pairs\", 0,\n prefix + \"_source.vocab.%d\" % source_vocab_size, source_vocab_size)\n target_vocab = generator_utils.get_or_generate_tabbed_vocab(\n data_dir, tmp_dir, \"parsing_train.pairs\", 1,\n prefix + \"_target.vocab.%d\" % target_vocab_size, target_vocab_size)\n filename = \"parsing_%s\" % (\"train\" if train else \"dev\")\n pair_filepath = os.path.join(tmp_dir, filename + \".pairs\")\n return tabbed_generator(pair_filepath, source_vocab, target_vocab, EOS)\n\n\ndef tabbed_parsing_character_generator(tmp_dir, train):\n \"\"\"Generate source and target data from a single file.\"\"\"\n character_vocab = text_encoder.ByteTextEncoder()\n filename = \"parsing_%s\" % (\"train\" if train else \"dev\")\n pair_filepath = os.path.join(tmp_dir, filename + \".pairs\")\n return tabbed_generator(pair_filepath, character_vocab, character_vocab, EOS)\n\n\ndef parsing_token_generator(data_dir, tmp_dir, train, vocab_size):\n symbolizer_vocab = generator_utils.get_or_generate_vocab(\n data_dir, tmp_dir, \"vocab.endefr.%d\" % vocab_size, vocab_size)\n filename = \"%s_%s.trees\" % (FLAGS.parsing_path, \"train\" if train else \"dev\")\n tree_filepath = os.path.join(tmp_dir, filename)\n return wsj_parsing.token_generator(tree_filepath, symbolizer_vocab,\n symbolizer_vocab, EOS)\n", "path": "tensor2tensor/data_generators/wmt.py" } ]
[ { "content": "# coding=utf-8\n# Copyright 2017 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data generators for WMT data-sets.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tarfile\n\n# Dependency imports\n\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import text_encoder\nfrom tensor2tensor.data_generators import wsj_parsing\nfrom tensor2tensor.utils import registry\n\nimport tensorflow as tf\n\ntf.flags.DEFINE_string(\"ende_bpe_path\", \"\", \"Path to BPE files in tmp_dir.\"\n \"Download from https://drive.google.com/open?\"\n \"id=0B_bZck-ksdkpM25jRUN2X2UxMm8\")\n\nFLAGS = tf.flags.FLAGS\n\n\n# End-of-sentence marker.\nEOS = text_encoder.EOS_ID\n\n\nclass WMTProblem(problem.Text2TextProblem):\n \"\"\"Base class for WMT problems.\"\"\"\n\n @property\n def is_character_level(self):\n return False\n\n @property\n def num_shards(self):\n return 100\n\n @property\n def vocab_name(self):\n return \"vocab.endefr\"\n\n @property\n def use_subword_tokenizer(self):\n return True\n\n\n# Generic generators used later for multiple problems.\n\n\ndef character_generator(source_path, target_path, character_vocab, eos=None):\n \"\"\"Generator for sequence-to-sequence tasks that just uses characters.\n\n This generator assumes the files at source_path and target_path have\n the same number of lines and yields dictionaries of \"inputs\" and \"targets\"\n where inputs are characters from the source lines converted to integers,\n and targets are characters from the target lines, also converted to integers.\n\n Args:\n source_path: path to the file with source sentences.\n target_path: path to the file with target sentences.\n character_vocab: a TextEncoder to encode the characters.\n eos: integer to append at the end of each sequence (default: None).\n\n Yields:\n A dictionary {\"inputs\": source-line, \"targets\": target-line} where\n the lines are integer lists converted from characters in the file lines.\n \"\"\"\n eos_list = [] if eos is None else [eos]\n with tf.gfile.GFile(source_path, mode=\"r\") as source_file:\n with tf.gfile.GFile(target_path, mode=\"r\") as target_file:\n source, target = source_file.readline(), target_file.readline()\n while source and target:\n source_ints = character_vocab.encode(source.strip()) + eos_list\n target_ints = character_vocab.encode(target.strip()) + eos_list\n yield {\"inputs\": source_ints, \"targets\": target_ints}\n source, target = source_file.readline(), target_file.readline()\n\n\ndef tabbed_generator(source_path, source_vocab, target_vocab, eos=None):\n r\"\"\"Generator for sequence-to-sequence tasks using tabbed files.\n\n Tokens are derived from text files where each line contains both\n a source and a target string. The two strings are separated by a tab\n character ('\\t'). It yields dictionaries of \"inputs\" and \"targets\" where\n inputs are characters from the source lines converted to integers, and\n targets are characters from the target lines, also converted to integers.\n\n Args:\n source_path: path to the file with source and target sentences.\n source_vocab: a SubwordTextEncoder to encode the source string.\n target_vocab: a SubwordTextEncoder to encode the target string.\n eos: integer to append at the end of each sequence (default: None).\n\n Yields:\n A dictionary {\"inputs\": source-line, \"targets\": target-line} where\n the lines are integer lists converted from characters in the file lines.\n \"\"\"\n eos_list = [] if eos is None else [eos]\n with tf.gfile.GFile(source_path, mode=\"r\") as source_file:\n for line in source_file:\n if line and \"\\t\" in line:\n parts = line.split(\"\\t\", maxsplit=1)\n source, target = parts[0].strip(), parts[1].strip()\n source_ints = source_vocab.encode(source) + eos_list\n target_ints = target_vocab.encode(target) + eos_list\n yield {\"inputs\": source_ints, \"targets\": target_ints}\n\n\ndef token_generator(source_path, target_path, token_vocab, eos=None):\n \"\"\"Generator for sequence-to-sequence tasks that uses tokens.\n\n This generator assumes the files at source_path and target_path have\n the same number of lines and yields dictionaries of \"inputs\" and \"targets\"\n where inputs are token ids from the \" \"-split source (and target, resp.) lines\n converted to integers using the token_map.\n\n Args:\n source_path: path to the file with source sentences.\n target_path: path to the file with target sentences.\n token_vocab: text_encoder.TextEncoder object.\n eos: integer to append at the end of each sequence (default: None).\n\n Yields:\n A dictionary {\"inputs\": source-line, \"targets\": target-line} where\n the lines are integer lists converted from tokens in the file lines.\n \"\"\"\n eos_list = [] if eos is None else [eos]\n with tf.gfile.GFile(source_path, mode=\"r\") as source_file:\n with tf.gfile.GFile(target_path, mode=\"r\") as target_file:\n source, target = source_file.readline(), target_file.readline()\n while source and target:\n source_ints = token_vocab.encode(source.strip()) + eos_list\n target_ints = token_vocab.encode(target.strip()) + eos_list\n yield {\"inputs\": source_ints, \"targets\": target_ints}\n source, target = source_file.readline(), target_file.readline()\n\n\ndef bi_vocabs_token_generator(source_path,\n target_path,\n source_token_vocab,\n target_token_vocab,\n eos=None):\n \"\"\"Generator for sequence-to-sequence tasks that uses tokens.\n\n This generator assumes the files at source_path and target_path have\n the same number of lines and yields dictionaries of \"inputs\" and \"targets\"\n where inputs are token ids from the \" \"-split source (and target, resp.) lines\n converted to integers using the token_map.\n\n Args:\n source_path: path to the file with source sentences.\n target_path: path to the file with target sentences.\n source_token_vocab: text_encoder.TextEncoder object.\n target_token_vocab: text_encoder.TextEncoder object.\n eos: integer to append at the end of each sequence (default: None).\n\n Yields:\n A dictionary {\"inputs\": source-line, \"targets\": target-line} where\n the lines are integer lists converted from tokens in the file lines.\n \"\"\"\n eos_list = [] if eos is None else [eos]\n with tf.gfile.GFile(source_path, mode=\"r\") as source_file:\n with tf.gfile.GFile(target_path, mode=\"r\") as target_file:\n source, target = source_file.readline(), target_file.readline()\n while source and target:\n source_ints = source_token_vocab.encode(source.strip()) + eos_list\n target_ints = target_token_vocab.encode(target.strip()) + eos_list\n yield {\"inputs\": source_ints, \"targets\": target_ints}\n source, target = source_file.readline(), target_file.readline()\n\n\n# Data-set URLs.\n\n\n_ENDE_TRAIN_DATASETS = [\n [\n \"http://data.statmt.org/wmt16/translation-task/training-parallel-nc-v11.tgz\", # pylint: disable=line-too-long\n (\"training-parallel-nc-v11/news-commentary-v11.de-en.en\",\n \"training-parallel-nc-v11/news-commentary-v11.de-en.de\")\n ],\n [\n \"http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz\",\n (\"commoncrawl.de-en.en\", \"commoncrawl.de-en.de\")\n ],\n [\n \"http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz\",\n (\"training/europarl-v7.de-en.en\", \"training/europarl-v7.de-en.de\")\n ],\n]\n_ENDE_TEST_DATASETS = [\n [\n \"http://data.statmt.org/wmt17/translation-task/dev.tgz\",\n (\"dev/newstest2013.en\", \"dev/newstest2013.de\")\n ],\n]\n\n_ENFR_TRAIN_DATASETS = [\n [\n \"http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz\",\n (\"commoncrawl.fr-en.en\", \"commoncrawl.fr-en.fr\")\n ],\n [\n \"http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz\",\n (\"training/europarl-v7.fr-en.en\", \"training/europarl-v7.fr-en.fr\")\n ],\n [\n \"http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz\",\n (\"training/news-commentary-v9.fr-en.en\",\n \"training/news-commentary-v9.fr-en.fr\")\n ],\n [\n \"http://www.statmt.org/wmt10/training-giga-fren.tar\",\n (\"giga-fren.release2.fixed.en.gz\", \"giga-fren.release2.fixed.fr.gz\")\n ],\n [\n \"http://www.statmt.org/wmt13/training-parallel-un.tgz\",\n (\"un/undoc.2000.fr-en.en\", \"un/undoc.2000.fr-en.fr\")\n ],\n]\n_ENFR_TEST_DATASETS = [\n [\n \"http://data.statmt.org/wmt17/translation-task/dev.tgz\",\n (\"dev/newstest2013.en\", \"dev/newstest2013.fr\")\n ],\n]\n\n_ZHEN_TRAIN_DATASETS = [[(\"http://data.statmt.org/wmt17/translation-task/\"\n \"training-parallel-nc-v12.tgz\"),\n (\"training/news-commentary-v12.zh-en.zh\",\n \"training/news-commentary-v12.zh-en.en\")]]\n\n_ZHEN_TEST_DATASETS = [[\n \"http://data.statmt.org/wmt17/translation-task/dev.tgz\",\n (\"dev/newsdev2017-zhen-src.zh\", \"dev/newsdev2017-zhen-ref.en\")\n]]\n\n# For Macedonian-English the SETimes corpus\n# from http://nlp.ffzg.hr/resources/corpora/setimes/ is used.\n# The original dataset has 207,777 parallel sentences.\n# For training the first 205,777 sentences are used.\n_MKEN_TRAIN_DATASETS = [[\n \"https://github.com/stefan-it/nmt-mk-en/raw/master/data/setimes.mk-en.train.tgz\", # pylint: disable=line-too-long\n (\"train.mk\", \"train.en\")\n]]\n\n# For development 1000 parallel sentences are used.\n_MKEN_TEST_DATASETS = [[\n \"https://github.com/stefan-it/nmt-mk-en/raw/master/data/setimes.mk-en.dev.tgz\", # pylint: disable=line-too-long\n (\"dev.mk\", \"dev.en\")\n]]\n\n# English-Czech datasets\n_ENCS_TRAIN_DATASETS = [\n [\n \"http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v11.tgz\", # pylint: disable=line-too-long\n (\"training-parallel-nc-v11/news-commentary-v11.cs-en.en\",\n \"training-parallel-nc-v11/news-commentary-v11.cs-en.cs\")\n ],\n [\n \"http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz\",\n (\"commoncrawl.cs-en.en\", \"commoncrawl.cs-en.cs\")\n ],\n [\n \"http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz\",\n (\"training/europarl-v7.cs-en.en\", \"training/europarl-v7.cs-en.cs\")\n ],\n]\n_ENCS_TEST_DATASETS = [\n [\n \"http://data.statmt.org/wmt17/translation-task/dev.tgz\",\n (\"dev/newstest2013.en\", \"dev/newstest2013.cs\")\n ],\n]\n\n\n# Generators.\n\n\ndef _get_wmt_ende_dataset(directory, filename):\n \"\"\"Extract the WMT en-de corpus `filename` to directory unless it's there.\"\"\"\n train_path = os.path.join(directory, filename)\n if not (tf.gfile.Exists(train_path + \".de\") and\n tf.gfile.Exists(train_path + \".en\")):\n # We expect that this file has been downloaded from:\n # https://drive.google.com/open?id=0B_bZck-ksdkpM25jRUN2X2UxMm8 and placed\n # in `directory`.\n corpus_file = os.path.join(directory, FLAGS.ende_bpe_path)\n with tarfile.open(corpus_file, \"r:gz\") as corpus_tar:\n corpus_tar.extractall(directory)\n return train_path\n\n\ndef ende_bpe_token_generator(data_dir, tmp_dir, train):\n \"\"\"Instance of token generator for the WMT en->de task, training set.\"\"\"\n dataset_path = (\"train.tok.clean.bpe.32000\"\n if train else \"newstest2013.tok.bpe.32000\")\n train_path = _get_wmt_ende_dataset(tmp_dir, dataset_path)\n token_tmp_path = os.path.join(tmp_dir, \"vocab.bpe.32000\")\n token_path = os.path.join(data_dir, \"vocab.bpe.32000\")\n tf.gfile.Copy(token_tmp_path, token_path, overwrite=True)\n token_vocab = text_encoder.TokenTextEncoder(vocab_filename=token_path)\n return token_generator(train_path + \".en\", train_path + \".de\", token_vocab,\n EOS)\n\n\ndef _compile_data(tmp_dir, datasets, filename):\n \"\"\"Concatenate all `datasets` and save to `filename`.\"\"\"\n filename = os.path.join(tmp_dir, filename)\n with tf.gfile.GFile(filename + \".lang1\", mode=\"w\") as lang1_resfile:\n with tf.gfile.GFile(filename + \".lang2\", mode=\"w\") as lang2_resfile:\n for dataset in datasets:\n url = dataset[0]\n compressed_filename = os.path.basename(url)\n compressed_filepath = os.path.join(tmp_dir, compressed_filename)\n\n lang1_filename, lang2_filename = dataset[1]\n lang1_filepath = os.path.join(tmp_dir, lang1_filename)\n lang2_filepath = os.path.join(tmp_dir, lang2_filename)\n\n if not os.path.exists(compressed_filepath):\n generator_utils.maybe_download(tmp_dir, compressed_filename, url)\n if not (os.path.exists(lang1_filepath) and\n os.path.exists(lang2_filepath)):\n # For .tar.gz and .tgz files, we read compressed.\n mode = \"r:gz\" if compressed_filepath.endswith(\"gz\") else \"r\"\n with tarfile.open(compressed_filepath, mode) as corpus_tar:\n corpus_tar.extractall(tmp_dir)\n if lang1_filepath.endswith(\".gz\"):\n new_filepath = lang1_filepath.strip(\".gz\")\n generator_utils.gunzip_file(lang1_filepath, new_filepath)\n lang1_filepath = new_filepath\n if lang2_filepath.endswith(\".gz\"):\n new_filepath = lang2_filepath.strip(\".gz\")\n generator_utils.gunzip_file(lang2_filepath, new_filepath)\n lang2_filepath = new_filepath\n with tf.gfile.GFile(lang1_filepath, mode=\"r\") as lang1_file:\n with tf.gfile.GFile(lang2_filepath, mode=\"r\") as lang2_file:\n line1, line2 = lang1_file.readline(), lang2_file.readline()\n while line1 or line2:\n lang1_resfile.write(line1.strip() + \"\\n\")\n lang2_resfile.write(line2.strip() + \"\\n\")\n line1, line2 = lang1_file.readline(), lang2_file.readline()\n\n return filename\n\n\[email protected]_problem(\"wmt_ende_tokens_8k\")\nclass WMTEnDeTokens8k(WMTProblem):\n \"\"\"Problem spec for WMT En-De translation.\"\"\"\n\n @property\n def targeted_vocab_size(self):\n return 2**13 # 8192\n\n def train_generator(self, data_dir, tmp_dir, train):\n symbolizer_vocab = generator_utils.get_or_generate_vocab(\n data_dir, tmp_dir, self.vocab_file, self.targeted_vocab_size)\n datasets = _ENDE_TRAIN_DATASETS if train else _ENDE_TEST_DATASETS\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"wmt_ende_tok_%s\" % tag)\n return token_generator(data_path + \".lang1\", data_path + \".lang2\",\n symbolizer_vocab, EOS)\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.DE_TOK\n\n\[email protected]_problem(\"wmt_ende_tokens_32k\")\nclass WMTEnDeTokens32k(WMTEnDeTokens8k):\n\n @property\n def targeted_vocab_size(self):\n return 2**15 # 32768\n\n\[email protected]_problem(\"wmt_ende_characters\")\nclass WMTEnDeCharacters(WMTProblem):\n \"\"\"Problem spec for WMT En-De translation.\"\"\"\n\n @property\n def is_character_level(self):\n return True\n\n def train_generator(self, tmp_dir, train):\n character_vocab = text_encoder.ByteTextEncoder()\n datasets = _ENDE_TRAIN_DATASETS if train else _ENDE_TEST_DATASETS\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"wmt_ende_chr_%s\" % tag)\n return character_generator(data_path + \".lang1\", data_path + \".lang2\",\n character_vocab, EOS)\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_CHR\n\n @property\n def target_space_id(self):\n return problem.SpaceID.DE_CHR\n\n\[email protected]_problem(\"wmt_zhen_tokens_8k\")\nclass WMTZhEnTokens8k(WMTProblem):\n \"\"\"Problem spec for WMT Zh-En translation.\"\"\"\n\n @property\n def targeted_vocab_size(self):\n return 2**13 # 8192\n\n def train_generator(self, data_dir, tmp_dir, train):\n source_vocab_size = self.targeted_vocab_size\n target_vocab_size = self.targeted_vocab_size\n datasets = _ZHEN_TRAIN_DATASETS if train else _ZHEN_TEST_DATASETS\n source_datasets = [[item[0], [item[1][0]]] for item in datasets]\n target_datasets = [[item[0], [item[1][1]]] for item in datasets]\n source_vocab = generator_utils.get_or_generate_vocab(\n data_dir, tmp_dir, \"vocab.zh.%d\" % source_vocab_size, source_vocab_size,\n source_datasets)\n target_vocab = generator_utils.get_or_generate_vocab(\n data_dir, tmp_dir, \"vocab.en.%d\" % target_vocab_size, target_vocab_size,\n target_datasets)\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"wmt_zhen_tok_%s\" % tag)\n return bi_vocabs_token_generator(data_path + \".lang1\", data_path + \".lang2\",\n source_vocab, target_vocab, EOS)\n\n @property\n def input_space_id(self):\n return problem.SpaceID.ZH_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.EN_TOK\n\n def feature_encoders(self, data_dir):\n vocab_size = self.targeted_vocab_size\n source_vocab_filename = os.path.join(data_dir,\n \"vocab.zh.%d\" % vocab_size)\n target_vocab_filename = os.path.join(data_dir,\n \"vocab.en.%d\" % vocab_size)\n source_token = text_encoder.SubwordTextEncoder(source_vocab_filename)\n target_token = text_encoder.SubwordTextEncoder(target_vocab_filename)\n return {\n \"inputs\": source_token,\n \"targets\": target_token,\n }\n\n\[email protected]_problem(\"wmt_zhen_tokens_32k\")\nclass WMTZhEnTokens32k(WMTZhEnTokens8k):\n\n @property\n def targeted_vocab_size(self):\n return 2**15 # 32768\n\n\[email protected]_problem(\"wmt_enfr_tokens_8k\")\nclass WMTEnFrTokens8k(WMTProblem):\n \"\"\"Problem spec for WMT En-Fr translation.\"\"\"\n\n @property\n def targeted_vocab_size(self):\n return 2**13 # 8192\n\n def train_generator(self, data_dir, tmp_dir, train):\n symbolizer_vocab = generator_utils.get_or_generate_vocab(\n data_dir, tmp_dir, self.vocab_file, self.targeted_vocab_size)\n datasets = _ENFR_TRAIN_DATASETS if train else _ENFR_TEST_DATASETS\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"wmt_enfr_tok_%s\" % tag)\n return token_generator(data_path + \".lang1\", data_path + \".lang2\",\n symbolizer_vocab, EOS)\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.FR_TOK\n\n\[email protected]_problem(\"wmt_enfr_tokens_32k\")\nclass WMTEnFrTokens32k(WMTEnFrTokens8k):\n\n @property\n def targeted_vocab_size(self):\n return 2**15 # 32768\n\n\[email protected]_problem(\"wmt_enfr_characters\")\nclass WMTEnFrCharacters(WMTProblem):\n \"\"\"Problem spec for WMT En-Fr translation.\"\"\"\n\n @property\n def is_character_level(self):\n return True\n\n def train_generator(self, data_dir, tmp_dir, train):\n character_vocab = text_encoder.ByteTextEncoder()\n datasets = _ENFR_TRAIN_DATASETS if train else _ENFR_TEST_DATASETS\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"wmt_enfr_chr_%s\" % tag)\n return character_generator(data_path + \".lang1\", data_path + \".lang2\",\n character_vocab, EOS)\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_CHR\n\n @property\n def target_space_id(self):\n return problem.SpaceID.FR_CHR\n\n\[email protected]_problem(\"setimes_mken_tokens_32k\")\nclass SETimesMkEnTokens32k(WMTProblem):\n \"\"\"Problem spec for SETimes Mk-En translation.\"\"\"\n\n @property\n def targeted_vocab_size(self):\n return 2**15 # 32768\n\n @property\n def vocab_name(self):\n return \"vocab.mken\"\n\n def train_generator(self, data_dir, tmp_dir, train):\n datasets = _MKEN_TRAIN_DATASETS if train else _MKEN_TEST_DATASETS\n source_datasets = [[item[0], [item[1][0]]] for item in datasets]\n target_datasets = [[item[0], [item[1][1]]] for item in datasets]\n symbolizer_vocab = generator_utils.get_or_generate_vocab(\n data_dir, tmp_dir, self.vocab_file, self.targeted_vocab_size,\n source_datasets + target_datasets)\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"setimes_mken_tok_%s\" % tag)\n return token_generator(data_path + \".lang1\", data_path + \".lang2\",\n symbolizer_vocab, EOS)\n\n @property\n def input_space_id(self):\n return problem.SpaceID.MK_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.EN_TOK\n\n\[email protected]_problem(\"wmt_encs_tokens_32k\")\nclass WMTEnCsTokens32k(WMTProblem):\n \"\"\"Problem spec for WMT English-Czech translation.\"\"\"\n\n @property\n def targeted_vocab_size(self):\n return 2**15 # 32768\n\n @property\n def vocab_name(self):\n return \"vocab.encs\"\n\n def train_generator(self, data_dir, tmp_dir, train):\n datasets = _ENCS_TRAIN_DATASETS if train else _ENCS_TEST_DATASETS\n source_datasets = [[item[0], [item[1][0]]] for item in datasets]\n target_datasets = [[item[0], [item[1][1]]] for item in datasets]\n symbolizer_vocab = generator_utils.get_or_generate_vocab(\n data_dir, tmp_dir, self.vocab_file, self.targeted_vocab_size,\n source_datasets + target_datasets)\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"wmt_encs_tok_%s\" % tag)\n return token_generator(data_path + \".lang1\", data_path + \".lang2\",\n symbolizer_vocab, EOS)\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.CS_TOK\n\n\[email protected]_problem(\"wmt_encs_characters\")\nclass WMTEnCsCharacters(WMTProblem):\n \"\"\"Problem spec for WMT En-Cs character-based translation.\"\"\"\n\n @property\n def is_character_level(self):\n return True\n\n def train_generator(self, data_dir, tmp_dir, train):\n character_vocab = text_encoder.ByteTextEncoder()\n datasets = _ENCS_TRAIN_DATASETS if train else _ENCS_TEST_DATASETS\n tag = \"train\" if train else \"dev\"\n data_path = _compile_data(tmp_dir, datasets, \"wmt_encs_chr_%s\" % tag)\n return character_generator(data_path + \".lang1\", data_path + \".lang2\",\n character_vocab, EOS)\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_CHR\n\n @property\n def target_space_id(self):\n return problem.SpaceID.CS_CHR\n\n\ndef tabbed_parsing_token_generator(data_dir, tmp_dir, train, prefix,\n source_vocab_size, target_vocab_size):\n \"\"\"Generate source and target data from a single file.\"\"\"\n source_vocab = generator_utils.get_or_generate_tabbed_vocab(\n data_dir, tmp_dir, \"parsing_train.pairs\", 0,\n prefix + \"_source.vocab.%d\" % source_vocab_size, source_vocab_size)\n target_vocab = generator_utils.get_or_generate_tabbed_vocab(\n data_dir, tmp_dir, \"parsing_train.pairs\", 1,\n prefix + \"_target.vocab.%d\" % target_vocab_size, target_vocab_size)\n filename = \"parsing_%s\" % (\"train\" if train else \"dev\")\n pair_filepath = os.path.join(tmp_dir, filename + \".pairs\")\n return tabbed_generator(pair_filepath, source_vocab, target_vocab, EOS)\n\n\ndef tabbed_parsing_character_generator(tmp_dir, train):\n \"\"\"Generate source and target data from a single file.\"\"\"\n character_vocab = text_encoder.ByteTextEncoder()\n filename = \"parsing_%s\" % (\"train\" if train else \"dev\")\n pair_filepath = os.path.join(tmp_dir, filename + \".pairs\")\n return tabbed_generator(pair_filepath, character_vocab, character_vocab, EOS)\n\n\ndef parsing_token_generator(data_dir, tmp_dir, train, vocab_size):\n symbolizer_vocab = generator_utils.get_or_generate_vocab(\n data_dir, tmp_dir, \"vocab.endefr.%d\" % vocab_size, vocab_size)\n filename = \"%s_%s.trees\" % (FLAGS.parsing_path, \"train\" if train else \"dev\")\n tree_filepath = os.path.join(tmp_dir, filename)\n return wsj_parsing.token_generator(tree_filepath, symbolizer_vocab,\n symbolizer_vocab, EOS)\n", "path": "tensor2tensor/data_generators/wmt.py" } ]
diff --git a/README.md b/README.md index edd6460d0..bb0f6f534 100644 --- a/README.md +++ b/README.md @@ -89,7 +89,7 @@ t2t-datagen \ --problem=$PROBLEM # Train -# * If you run out of memory, add --hparams='batch_size=2048' or even 1024. +# * If you run out of memory, add --hparams='batch_size=1024'. t2t-trainer \ --data_dir=$DATA_DIR \ --problems=$PROBLEM \ @@ -166,7 +166,7 @@ python -c "from tensor2tensor.models.transformer import Transformer" with `Modality` objects, which are specified per-feature in the dataset/task specification. * Support for multi-GPU machines and synchronous (1 master, many workers) and - asynchrounous (independent workers synchronizing through a parameter server) + asynchronous (independent workers synchronizing through a parameter server) [distributed training](https://github.com/tensorflow/tensor2tensor/tree/master/docs/distributed_training.md). * Easily swap amongst datasets and models by command-line flag with the data generation script `t2t-datagen` and the training script `t2t-trainer`. diff --git a/tensor2tensor/data_generators/wmt.py b/tensor2tensor/data_generators/wmt.py index 456f36321..bcd29e1d4 100644 --- a/tensor2tensor/data_generators/wmt.py +++ b/tensor2tensor/data_generators/wmt.py @@ -574,7 +574,7 @@ class WMTEnCsTokens32k(WMTProblem): """Problem spec for WMT English-Czech translation.""" @property - def target_vocab_size(self): + def targeted_vocab_size(self): return 2**15 # 32768 @property
facebookresearch__xformers-819
TypeError: Trainer.__init__() got an unexpected keyword argument 'gpus' # 🐛 Bug When running [xformers_mingpt.ipynb](https://colab.research.google.com/github/facebookresearch/xformers/blob/main/docs/source/xformers_mingpt.ipynb) in colab there is an arror raised during creation an instance of `Trainer`. ## Command ```python trainer = Trainer( gpus=1, max_epochs=EPOCHS, precision=16, gradient_clip_val=1, log_every_n_steps=1, detect_anomaly=True, accumulate_grad_batches=REF_BATCH // BATCH, ) ``` ## To Reproduce Open [xformers_mingpt.ipynb](https://colab.research.google.com/github/facebookresearch/xformers/blob/main/docs/source/xformers_mingpt.ipynb) in colab and: "Kernel" -> "Run all cells" `TypeError: Trainer.__init__() got an unexpected keyword argument 'gpus'` ![image](https://github.com/facebookresearch/xformers/assets/36787333/9e6e64f6-54d9-4809-80ae-80eb91e2f414) ## Expected behavior `Trainer` object created successfully. ## Environment Default colab env: ``` Collecting environment information... PyTorch version: 2.0.1+cu118 Is debug build: False CUDA used to build PyTorch: 11.8 ROCM used to build PyTorch: N/A OS: Ubuntu 22.04.2 LTS (x86_64) GCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 Clang version: 14.0.0-1ubuntu1.1 CMake version: version 3.25.2 Libc version: glibc-2.35 Python version: 3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0] (64-bit runtime) Python platform: Linux-5.15.109+-x86_64-with-glibc2.35 Is CUDA available: True CUDA runtime version: 11.8.89 CUDA_MODULE_LOADING set to: LAZY GPU models and configuration: GPU 0: Tesla T4 Nvidia driver version: 525.105.17 cuDNN version: Probably one of the following: /usr/lib/x86_64-linux-gnu/libcudnn.so.8.9.0 /usr/lib/x86_64-linux-gnu/libcudnn_adv_infer.so.8.9.0 /usr/lib/x86_64-linux-gnu/libcudnn_adv_train.so.8.9.0 /usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8.9.0 /usr/lib/x86_64-linux-gnu/libcudnn_cnn_train.so.8.9.0 /usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8.9.0 /usr/lib/x86_64-linux-gnu/libcudnn_ops_train.so.8.9.0 HIP runtime version: N/A MIOpen runtime version: N/A Is XNNPACK available: True CPU: Architecture: x86_64 CPU op-mode(s): 32-bit, 64-bit Address sizes: 46 bits physical, 48 bits virtual Byte Order: Little Endian CPU(s): 2 On-line CPU(s) list: 0,1 Vendor ID: GenuineIntel Model name: Intel(R) Xeon(R) CPU @ 2.30GHz CPU family: 6 Model: 63 Thread(s) per core: 2 Core(s) per socket: 1 Socket(s): 1 Stepping: 0 BogoMIPS: 4599.99 Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear arch_capabilities Hypervisor vendor: KVM Virtualization type: full L1d cache: 32 KiB (1 instance) L1i cache: 32 KiB (1 instance) L2 cache: 256 KiB (1 instance) L3 cache: 45 MiB (1 instance) NUMA node(s): 1 NUMA node0 CPU(s): 0,1 Vulnerability Itlb multihit: Not affected Vulnerability L1tf: Mitigation; PTE Inversion Vulnerability Mds: Vulnerable; SMT Host state unknown Vulnerability Meltdown: Vulnerable Vulnerability Mmio stale data: Vulnerable Vulnerability Retbleed: Vulnerable Vulnerability Spec store bypass: Vulnerable Vulnerability Spectre v1: Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers Vulnerability Spectre v2: Vulnerable, IBPB: disabled, STIBP: disabled, PBRSB-eIBRS: Not affected Vulnerability Srbds: Not affected Vulnerability Tsx async abort: Not affected Versions of relevant libraries: [pip3] mypy-extensions==1.0.0 [pip3] numpy==1.22.4 [pip3] pytorch-lightning==2.0.6 [pip3] torch==2.0.1+cu118 [pip3] torchaudio==2.0.2+cu118 [pip3] torchdata==0.6.1 [pip3] torchmetrics==1.0.2 [pip3] torchsummary==1.5.1 [pip3] torchtext==0.15.2 [pip3] torchvision==0.15.2+cu118 ``` ## Additional context The same error will arise in [microGPT.p](https://github.com/facebookresearch/xformers/blob/main/examples/microGPT.py#L313) in newer versions of pytorch_lightning
[ { "content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n# A MinGPT + Lightning + xFormers example Code from Sean Naren (@seannaren)\n# This is an hommage to https://github.com/karpathy/minGPT\n\nimport math\nimport os\n\nimport pytorch_lightning as pl\nimport torch\nimport torch.nn as nn\nfrom pytorch_lightning import Trainer, seed_everything\nfrom pytorch_lightning.utilities import rank_zero_info\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader, Dataset, RandomSampler\n\nfrom xformers.factory.model_factory import xFormer, xFormerConfig\n\n\nclass GPT(pl.LightningModule):\n \"\"\"the full GPT language model, with a context size of block_size\"\"\"\n\n def __init__(\n self,\n vocab_size,\n weight_decay=0.1,\n betas=(0.9, 0.95),\n learning_rate=6e-4,\n n_embd=512,\n block_size=128,\n n_layer=8,\n n_head=8,\n resid_pdrop=0.1,\n attn_pdrop=0.1,\n mlp_pdrop=0.1,\n attention=\"scaled_dot_product\",\n hidden_layer_multiplier=4,\n warmup_tokens=20,\n final_tokens=1000,\n ):\n super().__init__()\n\n # auto creates self.hparams from the method signature\n self.save_hyperparameters()\n\n # A list of the encoder or decoder blocks which constitute the Transformer.\n xformer_config = [\n {\n \"reversible\": False, # Turn on to test the effect of using reversible layers\n \"block_type\": \"encoder\",\n \"num_layers\": self.hparams.n_layer,\n \"dim_model\": self.hparams.n_embd,\n \"residual_norm_style\": \"post\",\n \"position_encoding_config\": {\n \"name\": \"vocab\",\n \"seq_len\": self.hparams.block_size,\n \"vocab_size\": self.hparams.vocab_size,\n },\n \"multi_head_config\": {\n \"num_heads\": self.hparams.n_head,\n \"residual_dropout\": self.hparams.resid_pdrop,\n \"use_rotary_embeddings\": True,\n \"attention\": {\n \"name\": self.hparams.attention,\n \"dropout\": self.hparams.attn_pdrop,\n \"causal\": True,\n \"seq_len\": self.hparams.block_size,\n \"num_rules\": self.hparams.n_head,\n },\n },\n \"feedforward_config\": {\n \"name\": \"FusedMLP\", # Use MLP if Triton is not available\n \"dropout\": self.hparams.mlp_pdrop,\n \"activation\": \"gelu\",\n \"hidden_layer_multiplier\": self.hparams.hidden_layer_multiplier,\n },\n }\n ]\n\n config = xFormerConfig(xformer_config)\n config.weight_init = \"small\"\n self.model = xFormer.from_config(config)\n\n # decoder head\n self.ln_f = nn.LayerNorm(self.hparams.n_embd)\n self.head = nn.Linear(self.hparams.n_embd, self.hparams.vocab_size, bias=False)\n\n self.block_size = self.hparams.block_size\n self.apply(self._init_weights)\n\n self._tokens_seen = 0\n\n def _init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n # Reset the token counter\n self._tokens_seen = 0\n\n def get_block_size(self):\n return self.block_size\n\n def configure_optimizers(self):\n # Create the optimizer and the training schedule:\n # - Handle the per-param weight decay\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n params_decay = [\n p for n, p in self.named_parameters() if not any(nd in n for nd in no_decay)\n ]\n params_nodecay = [\n p for n, p in self.named_parameters() if any(nd in n for nd in no_decay)\n ]\n optim_groups = [\n {\"params\": params_decay, \"weight_decay\": self.hparams.weight_decay},\n {\"params\": params_nodecay, \"weight_decay\": 0.0},\n ]\n\n # - Start with a warm up, ramp up then cosine\n optimizer = torch.optim.AdamW(\n optim_groups, lr=self.hparams.learning_rate, betas=self.hparams.betas\n )\n\n def update_lr(*_):\n config = self.hparams\n\n if self._tokens_seen < config.warmup_tokens:\n # linear warmup\n lr_mult = float(self._tokens_seen) / float(max(1, config.warmup_tokens))\n lr_mult = max(lr_mult, 1e-2) # could be that we've not seen any yet\n else:\n # cosine learning rate decay\n progress = float(self._tokens_seen - config.warmup_tokens) / float(\n max(1, config.final_tokens - config.warmup_tokens)\n )\n lr_mult = max(0.1, 0.5 * (1.0 + math.cos(math.pi * progress)))\n\n return lr_mult\n\n lr_scheduler = {\n \"scheduler\": torch.optim.lr_scheduler.LambdaLR(\n optimizer,\n lr_lambda=[update_lr, update_lr],\n ),\n \"name\": \"learning_rate\",\n \"interval\": \"step\", # The unit of the scheduler's step size\n \"frequency\": 1, # The frequency of the scheduler\n }\n return [optimizer], [lr_scheduler]\n\n def forward(self, src):\n # predict the next tokens (in latent space)\n prediction = self.model(src)\n\n # translate the predictions into tokens\n prediction = self.ln_f(prediction)\n logits = self.head(prediction)\n\n return logits\n\n def training_step(self, batch, _):\n src, targets = batch\n\n # Update the tokens we've seen (tracked for LR scheduling)\n self._tokens_seen += (src >= 0).numel()\n\n # same action as inference\n logits = self(src)\n\n # if we are given some desired targets also calculate the loss\n loss = None\n if targets is not None:\n loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))\n\n self.logger.log_metrics(\n {\n \"train_loss\": loss.mean(),\n \"learning_rate\": self.lr_schedulers().get_last_lr()[0],\n },\n step=trainer.global_step,\n )\n\n return loss\n\n\nclass CharDataset(Dataset):\n def __init__(self, data, block_size):\n chars = list(set(data))\n data_size, vocab_size = len(data), len(chars)\n rank_zero_info(\"data has %d characters, %d unique.\" % (data_size, vocab_size))\n\n self.stoi = {ch: i for i, ch in enumerate(chars)}\n self.itos = {i: ch for i, ch in enumerate(chars)}\n self.block_size = block_size\n self.vocab_size = vocab_size\n self.data = data\n\n def __len__(self):\n return len(self.data) - self.block_size\n\n def __getitem__(self, i):\n chunk = self.data[i : i + self.block_size + 1]\n dix = [self.stoi[s] for s in chunk]\n\n # src and target are off by one, we want the model to predict the next word\n x = torch.tensor(dix[:-1], dtype=torch.long)\n y = torch.tensor(dix[1:], dtype=torch.long)\n return x, y\n\n def to_tokens(self, message, device):\n return torch.tensor([self.stoi[s] for s in message], dtype=torch.long)[\n None, ...\n ].to(device)\n\n def from_tokens(self, tokens):\n return \"\".join([self.itos[int(i)] for i in tokens])\n\n\[email protected]_grad()\ndef sample(model, x, steps, temperature=1.0, sample=False, top_k=None):\n \"\"\"\n take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in\n the sequence, feeding the predictions back into the model each time. Clearly the sampling\n has quadratic complexity unlike an RNN that is only linear, and has a finite context window\n of block_size, unlike an RNN that has an infinite context window.\n \"\"\"\n block_size = model.get_block_size()\n model.eval()\n\n # CREDITS: https://github.com/karpathy/minGPT/blob/master/mingpt/utils.py\n def top_k_logits(logits, k):\n v, _ = torch.topk(logits, k)\n out = logits.clone()\n out[out < v[:, [-1]]] = -float(\"Inf\")\n return out\n\n for _ in range(steps):\n x_cond = (\n x if x.size(1) <= block_size else x[:, -block_size:]\n ) # crop context if needed\n logits = model(x_cond)\n\n # pluck the logits at the final step and scale by temperature\n logits = logits[:, -1, :] / temperature\n\n # optionally crop probabilities to only the top k options\n if top_k is not None:\n logits = top_k_logits(logits, top_k)\n\n # apply softmax to convert to probabilities\n probs = F.softmax(logits, dim=-1)\n\n # sample from the distribution or take the most likely\n if sample:\n ix = torch.multinomial(probs, num_samples=1)\n else:\n _, ix = torch.topk(probs, k=1, dim=-1)\n\n # append to the sequence and continue\n x = torch.cat((x, ix), dim=1)\n\n return x[0] # escape the batch dimension\n\n\nif __name__ == \"__main__\":\n seed_everything(42)\n\n # Adjust batch depending on the available memory on your machine.\n # You can also use reversible layers to save memory\n REF_BATCH = 512\n BATCH = 128\n\n WORKERS = 4\n EPOCHS = 1\n BLOCK = 128\n WARMUP = 20\n\n if not os.path.exists(\"input.txt\"):\n os.system(\n \"wget https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt\"\n )\n\n text = open(\"input.txt\", \"r\").read()\n train_dataset = CharDataset(\n text, BLOCK\n ) # one line of poem is roughly 50 characters\n random_sampler = RandomSampler(train_dataset)\n train_loader = DataLoader(\n train_dataset,\n sampler=random_sampler,\n batch_size=BATCH,\n num_workers=WORKERS,\n pin_memory=True,\n )\n\n model = GPT(\n vocab_size=train_dataset.vocab_size,\n block_size=train_dataset.block_size,\n attention=\"scaled_dot_product\",\n warmup_tokens=REF_BATCH * WARMUP,\n final_tokens=EPOCHS * len(train_dataset) * BLOCK,\n )\n print(model)\n\n trainer = Trainer(\n gpus=1,\n max_epochs=EPOCHS,\n precision=16,\n log_every_n_steps=1,\n accumulate_grad_batches=REF_BATCH // BATCH,\n )\n\n trainer.fit(model, train_loader)\n\n # Sample from the model, let it predict a paragraph\n context = \"Friends of my soul\" # prime with something\n x = train_dataset.to_tokens(context, model.device)\n y = sample(model, x, steps=1000, temperature=1.0, sample=True, top_k=10)\n\n print(train_dataset.from_tokens(y))\n", "path": "examples/microGPT.py" } ]
[ { "content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n# A MinGPT + Lightning + xFormers example Code from Sean Naren (@seannaren)\n# This is an hommage to https://github.com/karpathy/minGPT\n\nimport math\nimport os\n\nimport pytorch_lightning as pl\nimport torch\nimport torch.nn as nn\nfrom pytorch_lightning import Trainer, seed_everything\nfrom pytorch_lightning.utilities import rank_zero_info\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader, Dataset, RandomSampler\n\nfrom xformers.factory.model_factory import xFormer, xFormerConfig\n\n\nclass GPT(pl.LightningModule):\n \"\"\"the full GPT language model, with a context size of block_size\"\"\"\n\n def __init__(\n self,\n vocab_size,\n weight_decay=0.1,\n betas=(0.9, 0.95),\n learning_rate=6e-4,\n n_embd=512,\n block_size=128,\n n_layer=8,\n n_head=8,\n resid_pdrop=0.1,\n attn_pdrop=0.1,\n mlp_pdrop=0.1,\n attention=\"scaled_dot_product\",\n hidden_layer_multiplier=4,\n warmup_tokens=20,\n final_tokens=1000,\n ):\n super().__init__()\n\n # auto creates self.hparams from the method signature\n self.save_hyperparameters()\n\n # A list of the encoder or decoder blocks which constitute the Transformer.\n xformer_config = [\n {\n \"reversible\": False, # Turn on to test the effect of using reversible layers\n \"block_type\": \"encoder\",\n \"num_layers\": self.hparams.n_layer,\n \"dim_model\": self.hparams.n_embd,\n \"residual_norm_style\": \"post\",\n \"position_encoding_config\": {\n \"name\": \"vocab\",\n \"seq_len\": self.hparams.block_size,\n \"vocab_size\": self.hparams.vocab_size,\n },\n \"multi_head_config\": {\n \"num_heads\": self.hparams.n_head,\n \"residual_dropout\": self.hparams.resid_pdrop,\n \"use_rotary_embeddings\": True,\n \"attention\": {\n \"name\": self.hparams.attention,\n \"dropout\": self.hparams.attn_pdrop,\n \"causal\": True,\n \"seq_len\": self.hparams.block_size,\n \"num_rules\": self.hparams.n_head,\n },\n },\n \"feedforward_config\": {\n \"name\": \"FusedMLP\", # Use MLP if Triton is not available\n \"dropout\": self.hparams.mlp_pdrop,\n \"activation\": \"gelu\",\n \"hidden_layer_multiplier\": self.hparams.hidden_layer_multiplier,\n },\n }\n ]\n\n config = xFormerConfig(xformer_config)\n config.weight_init = \"small\"\n self.model = xFormer.from_config(config)\n\n # decoder head\n self.ln_f = nn.LayerNorm(self.hparams.n_embd)\n self.head = nn.Linear(self.hparams.n_embd, self.hparams.vocab_size, bias=False)\n\n self.block_size = self.hparams.block_size\n self.apply(self._init_weights)\n\n self._tokens_seen = 0\n\n def _init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n # Reset the token counter\n self._tokens_seen = 0\n\n def get_block_size(self):\n return self.block_size\n\n def configure_optimizers(self):\n # Create the optimizer and the training schedule:\n # - Handle the per-param weight decay\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n params_decay = [\n p for n, p in self.named_parameters() if not any(nd in n for nd in no_decay)\n ]\n params_nodecay = [\n p for n, p in self.named_parameters() if any(nd in n for nd in no_decay)\n ]\n optim_groups = [\n {\"params\": params_decay, \"weight_decay\": self.hparams.weight_decay},\n {\"params\": params_nodecay, \"weight_decay\": 0.0},\n ]\n\n # - Start with a warm up, ramp up then cosine\n optimizer = torch.optim.AdamW(\n optim_groups, lr=self.hparams.learning_rate, betas=self.hparams.betas\n )\n\n def update_lr(*_):\n config = self.hparams\n\n if self._tokens_seen < config.warmup_tokens:\n # linear warmup\n lr_mult = float(self._tokens_seen) / float(max(1, config.warmup_tokens))\n lr_mult = max(lr_mult, 1e-2) # could be that we've not seen any yet\n else:\n # cosine learning rate decay\n progress = float(self._tokens_seen - config.warmup_tokens) / float(\n max(1, config.final_tokens - config.warmup_tokens)\n )\n lr_mult = max(0.1, 0.5 * (1.0 + math.cos(math.pi * progress)))\n\n return lr_mult\n\n lr_scheduler = {\n \"scheduler\": torch.optim.lr_scheduler.LambdaLR(\n optimizer,\n lr_lambda=[update_lr, update_lr],\n ),\n \"name\": \"learning_rate\",\n \"interval\": \"step\", # The unit of the scheduler's step size\n \"frequency\": 1, # The frequency of the scheduler\n }\n return [optimizer], [lr_scheduler]\n\n def forward(self, src):\n # predict the next tokens (in latent space)\n prediction = self.model(src)\n\n # translate the predictions into tokens\n prediction = self.ln_f(prediction)\n logits = self.head(prediction)\n\n return logits\n\n def training_step(self, batch, _):\n src, targets = batch\n\n # Update the tokens we've seen (tracked for LR scheduling)\n self._tokens_seen += (src >= 0).numel()\n\n # same action as inference\n logits = self(src)\n\n # if we are given some desired targets also calculate the loss\n loss = None\n if targets is not None:\n loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))\n\n self.logger.log_metrics(\n {\n \"train_loss\": loss.mean(),\n \"learning_rate\": self.lr_schedulers().get_last_lr()[0],\n },\n step=trainer.global_step,\n )\n\n return loss\n\n\nclass CharDataset(Dataset):\n def __init__(self, data, block_size):\n chars = list(set(data))\n data_size, vocab_size = len(data), len(chars)\n rank_zero_info(\"data has %d characters, %d unique.\" % (data_size, vocab_size))\n\n self.stoi = {ch: i for i, ch in enumerate(chars)}\n self.itos = {i: ch for i, ch in enumerate(chars)}\n self.block_size = block_size\n self.vocab_size = vocab_size\n self.data = data\n\n def __len__(self):\n return len(self.data) - self.block_size\n\n def __getitem__(self, i):\n chunk = self.data[i : i + self.block_size + 1]\n dix = [self.stoi[s] for s in chunk]\n\n # src and target are off by one, we want the model to predict the next word\n x = torch.tensor(dix[:-1], dtype=torch.long)\n y = torch.tensor(dix[1:], dtype=torch.long)\n return x, y\n\n def to_tokens(self, message, device):\n return torch.tensor([self.stoi[s] for s in message], dtype=torch.long)[\n None, ...\n ].to(device)\n\n def from_tokens(self, tokens):\n return \"\".join([self.itos[int(i)] for i in tokens])\n\n\[email protected]_grad()\ndef sample(model, x, steps, temperature=1.0, sample=False, top_k=None):\n \"\"\"\n take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in\n the sequence, feeding the predictions back into the model each time. Clearly the sampling\n has quadratic complexity unlike an RNN that is only linear, and has a finite context window\n of block_size, unlike an RNN that has an infinite context window.\n \"\"\"\n block_size = model.get_block_size()\n model.eval()\n\n # CREDITS: https://github.com/karpathy/minGPT/blob/master/mingpt/utils.py\n def top_k_logits(logits, k):\n v, _ = torch.topk(logits, k)\n out = logits.clone()\n out[out < v[:, [-1]]] = -float(\"Inf\")\n return out\n\n for _ in range(steps):\n x_cond = (\n x if x.size(1) <= block_size else x[:, -block_size:]\n ) # crop context if needed\n logits = model(x_cond)\n\n # pluck the logits at the final step and scale by temperature\n logits = logits[:, -1, :] / temperature\n\n # optionally crop probabilities to only the top k options\n if top_k is not None:\n logits = top_k_logits(logits, top_k)\n\n # apply softmax to convert to probabilities\n probs = F.softmax(logits, dim=-1)\n\n # sample from the distribution or take the most likely\n if sample:\n ix = torch.multinomial(probs, num_samples=1)\n else:\n _, ix = torch.topk(probs, k=1, dim=-1)\n\n # append to the sequence and continue\n x = torch.cat((x, ix), dim=1)\n\n return x[0] # escape the batch dimension\n\n\nif __name__ == \"__main__\":\n seed_everything(42)\n\n # Adjust batch depending on the available memory on your machine.\n # You can also use reversible layers to save memory\n REF_BATCH = 512\n BATCH = 128\n\n WORKERS = 4\n EPOCHS = 1\n BLOCK = 128\n WARMUP = 20\n\n if not os.path.exists(\"input.txt\"):\n os.system(\n \"wget https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt\"\n )\n\n text = open(\"input.txt\", \"r\").read()\n train_dataset = CharDataset(\n text, BLOCK\n ) # one line of poem is roughly 50 characters\n random_sampler = RandomSampler(train_dataset)\n train_loader = DataLoader(\n train_dataset,\n sampler=random_sampler,\n batch_size=BATCH,\n num_workers=WORKERS,\n pin_memory=True,\n )\n\n model = GPT(\n vocab_size=train_dataset.vocab_size,\n block_size=train_dataset.block_size,\n attention=\"scaled_dot_product\",\n warmup_tokens=REF_BATCH * WARMUP,\n final_tokens=EPOCHS * len(train_dataset) * BLOCK,\n )\n print(model)\n\n trainer = Trainer(\n gpusdevices=1, accelerator=\"gpu\",\n max_epochs=EPOCHS,\n precision=16,\n log_every_n_steps=1,\n accumulate_grad_batches=REF_BATCH // BATCH,\n )\n\n trainer.fit(model, train_loader)\n\n # Sample from the model, let it predict a paragraph\n context = \"Friends of my soul\" # prime with something\n x = train_dataset.to_tokens(context, model.device)\n y = sample(model, x, steps=1000, temperature=1.0, sample=True, top_k=10)\n\n print(train_dataset.from_tokens(y))\n", "path": "examples/microGPT.py" } ]
diff --git a/docs/source/xformers_mingpt.ipynb b/docs/source/xformers_mingpt.ipynb index 6f24372d4c..875fe10410 100644 --- a/docs/source/xformers_mingpt.ipynb +++ b/docs/source/xformers_mingpt.ipynb @@ -433,7 +433,7 @@ ")\n", "\n", "trainer = Trainer(\n", - " gpus=1,\n", + " devices=1, accelerator=\"gpu\",\n", " max_epochs=EPOCHS,\n", " precision=16,\n", " gradient_clip_val=1,\n", diff --git a/examples/microGPT.py b/examples/microGPT.py index 6f78643155..72831c0948 100644 --- a/examples/microGPT.py +++ b/examples/microGPT.py @@ -310,7 +310,7 @@ def top_k_logits(logits, k): print(model) trainer = Trainer( - gpus=1, + gpusdevices=1, accelerator="gpu", max_epochs=EPOCHS, precision=16, log_every_n_steps=1,
ocadotechnology__codeforlife-portal-412
Update models search field values in admin
[ { "content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2016, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS – Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any “Ocado” logos,\n# trade names or the trademark “Ocado” or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of “Ocado” as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# “Ocado” or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\nfrom __future__ import absolute_import\n\nimport re\nimport datetime\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django_countries.fields import CountryField\nfrom django.core.cache import cache\nfrom django.utils import timezone\n\nfrom online_status.status import CACHE_USERS\n\n\nclass UserProfile(models.Model):\n user = models.OneToOneField(User)\n can_view_aggregated_data = models.BooleanField(default=False)\n developer = models.BooleanField(default=False)\n\n awaiting_email_verification = models.BooleanField(default=False)\n\n def __unicode__(self):\n return self.user.username\n\n def joined_recently(self):\n now = timezone.now()\n return now - datetime.timedelta(days=7) <= self.user.date_joined\n\n\nclass School(models.Model):\n name = models.CharField(max_length=200)\n postcode = models.CharField(max_length=10)\n town = models.CharField(max_length=200)\n latitude = models.CharField(max_length=20)\n longitude = models.CharField(max_length=20)\n country = CountryField(blank_label='(select country)')\n\n class Meta:\n permissions = (\n ('view_aggregated_data', \"Can see available aggregated data\"),\n ('view_map_data', \"Can see schools' location displayed on map\")\n )\n\n def __unicode__(self):\n return self.name\n\n\nclass TeacherModelManager(models.Manager):\n def factory(self, title, first_name, last_name, email, password):\n from portal.helpers.generators import get_random_username\n\n user = User.objects.create_user(\n username=email,\n email=email,\n password=password,\n first_name=first_name,\n last_name=last_name)\n\n user_profile = UserProfile.objects.create(user=user)\n\n return Teacher.objects.create(user=user_profile, new_user=user, title=title)\n\n\nclass Teacher(models.Model):\n title = models.CharField(max_length=35)\n user = models.OneToOneField(UserProfile)\n new_user = models.OneToOneField(User, related_name='new_teacher', null=True, blank=True)\n school = models.ForeignKey(School, related_name='teacher_school', null=True)\n is_admin = models.BooleanField(default=False)\n pending_join_request = models.ForeignKey(School, related_name='join_request', null=True)\n\n objects = TeacherModelManager()\n\n def teaches(self, userprofile):\n if hasattr(userprofile, 'student'):\n student = userprofile.student\n return not student.is_independent() and student.class_field.teacher == self\n\n def has_school(self):\n return self.school is not (None or \"\")\n\n def has_class(self):\n classes = self.class_teacher.all()\n return classes.count() != 0\n\n def class_(self):\n if self.has_class():\n classes = self.class_teacher.all()\n return classes[0]\n return None\n\n def __unicode__(self):\n return '%s %s' % (self.user.first_name, self.user.last_name)\n\n\nclass Class(models.Model):\n name = models.CharField(max_length=200)\n teacher = models.ForeignKey(Teacher, related_name='class_teacher')\n access_code = models.CharField(max_length=5)\n classmates_data_viewable = models.BooleanField(default=False)\n always_accept_requests = models.BooleanField(default=False)\n accept_requests_until = models.DateTimeField(null=True)\n\n def __unicode__(self):\n return self.name\n\n def has_students(self):\n students = self.students.all()\n return students.count() != 0\n\n def get_logged_in_students(self):\n ONLINE = 1\n\n \"\"\"This gets all the students who are logged in.\"\"\"\n users_status = cache.get(CACHE_USERS)\n online_users_status = filter(lambda status: status.status == ONLINE, users_status)\n online_user_ids = map(lambda status: status.user.id, online_users_status)\n\n # Query all logged in users based on id list\n return Student.objects.filter(class_field=self).filter(new_user__id__in=online_user_ids)\n\n class Meta:\n verbose_name_plural = \"classes\"\n\n\nclass StudentModelManager(models.Manager):\n def schoolFactory(self, klass, name, password):\n from portal.helpers.generators import get_random_username\n\n user = User.objects.create_user(\n username=get_random_username(),\n password=password,\n first_name=name)\n\n user_profile = UserProfile.objects.create(user=user)\n return Student.objects.create(class_field=klass, user=user_profile, new_user=user)\n\n def independentStudentFactory(self, username, name, email, password):\n user = User.objects.create_user(\n username=username,\n email=email,\n password=password,\n first_name=name)\n\n user_profile = UserProfile.objects.create(user=user)\n\n return Student.objects.create(user=user_profile, new_user=user)\n\n\nclass Student(models.Model):\n class_field = models.ForeignKey(Class, related_name='students', null=True)\n user = models.OneToOneField(UserProfile)\n new_user = models.OneToOneField(User, related_name='new_student', null=True, blank=True)\n pending_class_request = models.ForeignKey(Class, related_name='class_request', null=True)\n\n objects = StudentModelManager()\n\n def is_independent(self):\n return not self.class_field\n\n def __unicode__(self):\n return '%s %s' % (self.new_user.first_name, self.new_user.last_name)\n\n\ndef stripStudentName(name):\n return re.sub('[ \\t]+', ' ', name.strip())\n\n\nclass Guardian(models.Model):\n name = models.CharField(max_length=200)\n children = models.ManyToManyField(Student)\n user = models.OneToOneField(UserProfile)\n new_user = models.OneToOneField(User, related_name='new_guardian', null=True, blank=True)\n\n def __unicode__(self):\n return '%s %s' % (self.new_user.first_name, self.new_user.last_name)\n\n\nclass EmailVerification(models.Model):\n user = models.ForeignKey(User, related_name='email_verifications', null=True, blank=True)\n token = models.CharField(max_length=30)\n email = models.CharField(max_length=200, null=True, default=None, blank=True)\n expiry = models.DateTimeField()\n verified = models.BooleanField(default=False)\n\n\nclass FrontPageNews(models.Model):\n title = models.CharField(max_length=200)\n text = models.CharField(max_length=1000)\n link = models.CharField(max_length=500)\n link_text = models.CharField(max_length=200)\n added_dstamp = models.DateTimeField()\n\n class Meta:\n verbose_name_plural = \"front page news\"\n\n def __unicode__(self):\n return self.title\n\n\nfrom . import handlers # noqa\n", "path": "portal/models.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2016, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS – Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any “Ocado” logos,\n# trade names or the trademark “Ocado” or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of “Ocado” as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# “Ocado” or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\nfrom __future__ import absolute_import\n\nimport re\nimport datetime\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django_countries.fields import CountryField\nfrom django.core.cache import cache\nfrom django.utils import timezone\n\nfrom online_status.status import CACHE_USERS\n\n\nclass UserProfile(models.Model):\n user = models.OneToOneField(User)\n can_view_aggregated_data = models.BooleanField(default=False)\n developer = models.BooleanField(default=False)\n\n awaiting_email_verification = models.BooleanField(default=False)\n\n def __unicode__(self):\n return self.user.username\n\n def joined_recently(self):\n now = timezone.now()\n return now - datetime.timedelta(days=7) <= self.user.date_joined\n\n\nclass School(models.Model):\n name = models.CharField(max_length=200)\n postcode = models.CharField(max_length=10)\n town = models.CharField(max_length=200)\n latitude = models.CharField(max_length=20)\n longitude = models.CharField(max_length=20)\n country = CountryField(blank_label='(select country)')\n\n class Meta:\n permissions = (\n ('view_aggregated_data', \"Can see available aggregated data\"),\n ('view_map_data', \"Can see schools' location displayed on map\")\n )\n\n def __unicode__(self):\n return self.name\n\n\nclass TeacherModelManager(models.Manager):\n def factory(self, title, first_name, last_name, email, password):\n from portal.helpers.generators import get_random_username\n\n user = User.objects.create_user(\n username=email,\n email=email,\n password=password,\n first_name=first_name,\n last_name=last_name)\n\n user_profile = UserProfile.objects.create(user=user)\n\n return Teacher.objects.create(user=user_profile, new_user=user, title=title)\n\n\nclass Teacher(models.Model):\n title = models.CharField(max_length=35)\n user = models.OneToOneField(UserProfile)\n new_user = models.OneToOneField(User, related_name='new_teacher', null=True, blank=True)\n school = models.ForeignKey(School, related_name='teacher_school', null=True)\n is_admin = models.BooleanField(default=False)\n pending_join_request = models.ForeignKey(School, related_name='join_request', null=True)\n\n objects = TeacherModelManager()\n\n def teaches(self, userprofile):\n if hasattr(userprofile, 'student'):\n student = userprofile.student\n return not student.is_independent() and student.class_field.teacher == self\n\n def has_school(self):\n return self.school is not (None or \"\")\n\n def has_class(self):\n classes = self.class_teacher.all()\n return classes.count() != 0\n\n def class_(self):\n if self.has_class():\n classes = self.class_teacher.all()\n return classes[0]\n return None\n\n def __unicode__(self):\n return '%s %s' % (self.new_user.first_name, self.new_user.last_name)\n\n\nclass Class(models.Model):\n name = models.CharField(max_length=200)\n teacher = models.ForeignKey(Teacher, related_name='class_teacher')\n access_code = models.CharField(max_length=5)\n classmates_data_viewable = models.BooleanField(default=False)\n always_accept_requests = models.BooleanField(default=False)\n accept_requests_until = models.DateTimeField(null=True)\n\n def __unicode__(self):\n return self.name\n\n def has_students(self):\n students = self.students.all()\n return students.count() != 0\n\n def get_logged_in_students(self):\n ONLINE = 1\n\n \"\"\"This gets all the students who are logged in.\"\"\"\n users_status = cache.get(CACHE_USERS)\n online_users_status = filter(lambda status: status.status == ONLINE, users_status)\n online_user_ids = map(lambda status: status.user.id, online_users_status)\n\n # Query all logged in users based on id list\n return Student.objects.filter(class_field=self).filter(new_user__id__in=online_user_ids)\n\n class Meta:\n verbose_name_plural = \"classes\"\n\n\nclass StudentModelManager(models.Manager):\n def schoolFactory(self, klass, name, password):\n from portal.helpers.generators import get_random_username\n\n user = User.objects.create_user(\n username=get_random_username(),\n password=password,\n first_name=name)\n\n user_profile = UserProfile.objects.create(user=user)\n return Student.objects.create(class_field=klass, user=user_profile, new_user=user)\n\n def independentStudentFactory(self, username, name, email, password):\n user = User.objects.create_user(\n username=username,\n email=email,\n password=password,\n first_name=name)\n\n user_profile = UserProfile.objects.create(user=user)\n\n return Student.objects.create(user=user_profile, new_user=user)\n\n\nclass Student(models.Model):\n class_field = models.ForeignKey(Class, related_name='students', null=True)\n user = models.OneToOneField(UserProfile)\n new_user = models.OneToOneField(User, related_name='new_student', null=True, blank=True)\n pending_class_request = models.ForeignKey(Class, related_name='class_request', null=True)\n\n objects = StudentModelManager()\n\n def is_independent(self):\n return not self.class_field\n\n def __unicode__(self):\n return '%s %s' % (self.new_user.first_name, self.new_user.last_name)\n\n\ndef stripStudentName(name):\n return re.sub('[ \\t]+', ' ', name.strip())\n\n\nclass Guardian(models.Model):\n name = models.CharField(max_length=200)\n children = models.ManyToManyField(Student)\n user = models.OneToOneField(UserProfile)\n new_user = models.OneToOneField(User, related_name='new_guardian', null=True, blank=True)\n\n def __unicode__(self):\n return '%s %s' % (self.new_user.first_name, self.new_user.last_name)\n\n\nclass EmailVerification(models.Model):\n user = models.ForeignKey(User, related_name='email_verifications', null=True, blank=True)\n token = models.CharField(max_length=30)\n email = models.CharField(max_length=200, null=True, default=None, blank=True)\n expiry = models.DateTimeField()\n verified = models.BooleanField(default=False)\n\n\nclass FrontPageNews(models.Model):\n title = models.CharField(max_length=200)\n text = models.CharField(max_length=1000)\n link = models.CharField(max_length=500)\n link_text = models.CharField(max_length=200)\n added_dstamp = models.DateTimeField()\n\n class Meta:\n verbose_name_plural = \"front page news\"\n\n def __unicode__(self):\n return self.title\n\n\nfrom . import handlers # noqa\n", "path": "portal/models.py" } ]
diff --git a/portal/models.py b/portal/models.py index daa12713de..7dcc17e237 100644 --- a/portal/models.py +++ b/portal/models.py @@ -126,7 +126,7 @@ def class_(self): return None def __unicode__(self): - return '%s %s' % (self.user.first_name, self.user.last_name) + return '%s %s' % (self.new_user.first_name, self.new_user.last_name) class Class(models.Model):
dj-stripe__dj-stripe-1964
Creating WebhooksEndpoint locally raises tolerance constraint failed error **Describe the bug** Attempting to create new WebhookEndpoint via django admin fails with `djstripe.models.webhooks.WebhookEndpoint.DoesNotExist: WebhookEndpoint matching query does not exist.` above error is caused by failure to create local object: sqlite: `sqlite3.IntegrityError: NOT NULL constraint failed: djstripe_webhookendpoint.tolerance` mysql: `MySQLdb.IntegrityError: (1048, "Column 'tolerance' cannot be null")` The WebhookEndpoint is successfully created in stripe, but not in the local DB. **To Reproduce** Steps to reproduce the behavior: 1. Fresh django installation with dj-stripe 2. `python3 manage.py migrate && python3 manage.py runserver` 3. Add an API key via Django admin 4. [optionally] run `python3 manage.py djstripe_sync_models` 5. Add a new WebhookEndpoint via django admin **Expected behavior** A new WebhookEndpoint is created in local django app **Environment** - dj-stripe version: 2.8.1 - stripe API version: "2022-11-15" - Database: [any] - Python version: 3.11.4 - Django version: 4.2.3 **Can you reproduce the issue with the latest version of master?** Yes It appears that the API does not return the `tolerance` key, so somewhere along the line this field is explicitly set to `None`, which eventually causes the error `IntegrityError: NOT NULL constraint failed: djstripe_webhookendpoint.tolerance` (as opposed to the default value being used as defined in the model). This issue can be easily reproduced by modifying the webhook fixture (`tests/fixtures/webhook_endpoint_fake0001.json`) to remove the `tolerance` key, and running pytest. I'm very new to dj-stripe, but issue can be resolved by adding a check to `models.webhooks.WebhookEndpoint._attach_objects_hook`: ```python def _attach_objects_hook( self, cls, data, current_ids=None, api_key=djstripe_settings.STRIPE_SECRET_KEY ): ... self.djstripe_uuid = data.get("metadata", {}).get("djstripe_uuid") if not isinstance(data.get("tolerance", None), (int, float)): self.tolerance = djstripe_settings.WEBHOOK_TOLERANCE ``` If this is actually a bug, and the proposed fix looks good, I'd be happy to contribute a PR.
[ { "content": "\"\"\"\nModule for dj-stripe Webhook models\n\"\"\"\n\nimport json\nimport warnings\nfrom traceback import format_exc\nfrom uuid import uuid4\n\nimport stripe\nfrom django.conf import settings\nfrom django.db import models\nfrom django.utils.datastructures import CaseInsensitiveMapping\nfrom django.utils.functional import cached_property\n\nfrom .. import signals\nfrom ..enums import WebhookEndpointStatus\nfrom ..fields import JSONField, StripeEnumField, StripeForeignKey\nfrom ..settings import djstripe_settings\nfrom .base import StripeModel, logger\nfrom .core import Event\n\n\n# TODO: Add Tests\nclass WebhookEndpoint(StripeModel):\n stripe_class = stripe.WebhookEndpoint\n stripe_dashboard_item_name = \"webhooks\"\n\n api_version = models.CharField(\n max_length=64,\n blank=True,\n help_text=(\n \"The API version events are rendered as for this webhook endpoint. Defaults\"\n \" to the configured Stripe API Version.\"\n ),\n )\n enabled_events = JSONField(\n help_text=(\n \"The list of events to enable for this endpoint. ['*'] indicates that all\"\n \" events are enabled, except those that require explicit selection.\"\n )\n )\n secret = models.CharField(\n max_length=256,\n blank=True,\n editable=False,\n help_text=\"The endpoint's secret, used to generate webhook signatures.\",\n )\n status = StripeEnumField(\n enum=WebhookEndpointStatus,\n help_text=\"The status of the webhook. It can be enabled or disabled.\",\n )\n url = models.URLField(help_text=\"The URL of the webhook endpoint.\", max_length=2048)\n application = models.CharField(\n max_length=255,\n blank=True,\n help_text=\"The ID of the associated Connect application.\",\n )\n\n djstripe_uuid = models.UUIDField(\n null=True,\n unique=True,\n default=uuid4,\n help_text=\"A UUID specific to dj-stripe generated for the endpoint\",\n )\n tolerance = models.PositiveSmallIntegerField(\n help_text=\"Controls the milliseconds tolerance which wards against replay attacks. Leave this to its default value unless you know what you're doing.\",\n default=stripe.Webhook.DEFAULT_TOLERANCE,\n )\n\n def __str__(self):\n return self.url or str(self.djstripe_uuid)\n\n def _attach_objects_hook(\n self, cls, data, current_ids=None, api_key=djstripe_settings.STRIPE_SECRET_KEY\n ):\n \"\"\"\n Gets called by this object's create and sync methods just before save.\n Use this to populate fields before the model is saved.\n \"\"\"\n super()._attach_objects_hook(\n cls, data, current_ids=current_ids, api_key=api_key\n )\n\n self.djstripe_uuid = data.get(\"metadata\", {}).get(\"djstripe_uuid\")\n\n\ndef _get_version():\n from ..apps import __version__\n\n return __version__\n\n\ndef get_remote_ip(request):\n \"\"\"Given the HTTPRequest object return the IP Address of the client\n\n :param request: client request\n :type request: HTTPRequest\n\n :Returns: the client ip address\n \"\"\"\n\n # x-forwarded-for is relevant for django running behind a proxy\n x_forwarded_for = request.headers.get(\"x-forwarded-for\")\n if x_forwarded_for:\n ip = x_forwarded_for.split(\",\")[0]\n else:\n ip = request.META.get(\"REMOTE_ADDR\")\n\n if not ip:\n warnings.warn(\n \"Could not determine remote IP (missing REMOTE_ADDR). \"\n \"This is likely an issue with your wsgi/server setup.\"\n )\n ip = \"0.0.0.0\"\n\n return ip\n\n\nclass WebhookEventTrigger(models.Model):\n \"\"\"\n An instance of a request that reached the server endpoint for Stripe webhooks.\n\n Webhook Events are initially **UNTRUSTED**, as it is possible for any web entity to\n post any data to our webhook url. Data posted may be valid Stripe information,\n garbage, or even malicious.\n The 'valid' flag in this model monitors this.\n \"\"\"\n\n id = models.BigAutoField(primary_key=True)\n remote_ip = models.GenericIPAddressField(\n help_text=\"IP address of the request client.\"\n )\n headers = JSONField()\n body = models.TextField(blank=True)\n valid = models.BooleanField(\n default=False,\n help_text=\"Whether or not the webhook event has passed validation\",\n )\n processed = models.BooleanField(\n default=False,\n help_text=\"Whether or not the webhook event has been successfully processed\",\n )\n exception = models.CharField(max_length=128, blank=True)\n traceback = models.TextField(\n blank=True, help_text=\"Traceback if an exception was thrown during processing\"\n )\n event = StripeForeignKey(\n \"Event\",\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n help_text=\"Event object contained in the (valid) Webhook\",\n )\n djstripe_version = models.CharField(\n max_length=32,\n default=_get_version, # Needs to be a callable, otherwise it's a db default.\n help_text=\"The version of dj-stripe when the webhook was received\",\n )\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n stripe_trigger_account = StripeForeignKey(\n \"djstripe.Account\",\n on_delete=models.CASCADE,\n to_field=\"id\",\n null=True,\n blank=True,\n help_text=\"The Stripe Account this object belongs to.\",\n )\n webhook_endpoint = StripeForeignKey(\n \"WebhookEndpoint\",\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n help_text=\"The endpoint this webhook was received on\",\n )\n\n def __str__(self):\n return f\"id={self.id}, valid={self.valid}, processed={self.processed}\"\n\n @classmethod\n def from_request(cls, request, *, webhook_endpoint: WebhookEndpoint = None):\n \"\"\"\n Create, validate and process a WebhookEventTrigger given a Django\n request object.\n\n The process is three-fold:\n 1. Create a WebhookEventTrigger object from a Django request.\n 2. Validate the WebhookEventTrigger as a Stripe event using the API.\n 3. If valid, process it into an Event object (and child resource).\n \"\"\"\n\n try:\n body = request.body.decode(request.encoding or \"utf-8\")\n except Exception:\n body = \"(error decoding body)\"\n\n ip = get_remote_ip(request)\n\n try:\n data = json.loads(body)\n except ValueError:\n data = {}\n\n if webhook_endpoint is None:\n stripe_account = StripeModel._find_owner_account(data=data)\n secret = djstripe_settings.WEBHOOK_SECRET\n else:\n stripe_account = webhook_endpoint.djstripe_owner_account\n secret = webhook_endpoint.secret\n\n obj = cls.objects.create(\n headers=dict(request.headers),\n body=body,\n remote_ip=ip,\n stripe_trigger_account=stripe_account,\n webhook_endpoint=webhook_endpoint,\n )\n api_key = (\n stripe_account.default_api_key\n or djstripe_settings.get_default_api_key(obj.livemode)\n )\n\n try:\n # Validate the webhook first\n signals.webhook_pre_validate.send(sender=cls, instance=obj)\n\n if webhook_endpoint:\n # Default to per Webhook Endpoint Tolerance\n obj.valid = obj.validate(\n secret=secret,\n api_key=api_key,\n tolerance=webhook_endpoint.tolerance,\n )\n else:\n obj.valid = obj.validate(secret=secret, api_key=api_key)\n signals.webhook_post_validate.send(\n sender=cls, instance=obj, valid=obj.valid\n )\n\n if obj.valid:\n signals.webhook_pre_process.send(sender=cls, instance=obj)\n if djstripe_settings.WEBHOOK_EVENT_CALLBACK:\n # If WEBHOOK_EVENT_CALLBACK, pass it for processing\n djstripe_settings.WEBHOOK_EVENT_CALLBACK(obj, api_key=api_key)\n else:\n # Process the item (do not save it, it'll get saved below)\n obj.process(save=False, api_key=api_key)\n signals.webhook_post_process.send(\n sender=cls, instance=obj, api_key=api_key\n )\n except Exception as e:\n max_length = cls._meta.get_field(\"exception\").max_length\n obj.exception = str(e)[:max_length]\n obj.traceback = format_exc()\n\n # Send the exception as the webhook_processing_error signal\n signals.webhook_processing_error.send(\n sender=cls,\n instance=obj,\n api_key=api_key,\n exception=e,\n data=getattr(e, \"http_body\", \"\"),\n )\n\n # re-raise the exception so Django sees it\n raise e\n finally:\n obj.save()\n\n return obj\n\n @cached_property\n def json_body(self):\n try:\n return json.loads(self.body)\n except ValueError:\n return {}\n\n @property\n def is_test_event(self):\n event_id = self.json_body.get(\"id\")\n return event_id and event_id.endswith(\"_00000000000000\")\n\n def verify_signature(\n self, secret: str, tolerance: int = stripe.Webhook.DEFAULT_TOLERANCE\n ) -> bool:\n if not secret:\n raise ValueError(\"Cannot verify event signature without a secret\")\n\n # HTTP headers are case-insensitive, but we store them as a dict.\n headers = CaseInsensitiveMapping(self.headers)\n signature = headers.get(\"stripe-signature\")\n\n try:\n stripe.WebhookSignature.verify_header(\n self.body, signature, secret, tolerance\n )\n except stripe.error.SignatureVerificationError:\n logger.exception(\"Failed to verify header\")\n return False\n else:\n return True\n\n def validate(\n self,\n api_key: str = None,\n secret: str = djstripe_settings.WEBHOOK_SECRET,\n tolerance: int = stripe.Webhook.DEFAULT_TOLERANCE,\n validation_method=djstripe_settings.WEBHOOK_VALIDATION,\n ):\n \"\"\"\n The original contents of the Event message must be confirmed by\n refetching it and comparing the fetched data with the original data.\n\n This function makes an API call to Stripe to redownload the Event data\n and returns whether or not it matches the WebhookEventTrigger data.\n \"\"\"\n\n local_data = self.json_body\n if \"id\" not in local_data or \"livemode\" not in local_data:\n logger.error(\n '\"id\" not in json body or \"livemode\" not in json body(%s)', local_data\n )\n return False\n\n if self.is_test_event:\n logger.info(\"Test webhook received and discarded: %s\", local_data)\n return False\n\n if validation_method is None:\n # validation disabled\n warnings.warn(\"WEBHOOK VALIDATION is disabled.\")\n return True\n elif validation_method == \"verify_signature\":\n if settings.DEBUG:\n # In debug mode, allow overriding the webhook secret with\n # the x-djstripe-webhook-secret header.\n # (used for stripe cli webhook forwarding)\n headers = CaseInsensitiveMapping(self.headers)\n local_secret = headers.get(\"x-djstripe-webhook-secret\")\n secret = local_secret if local_secret else secret\n return self.verify_signature(secret=secret, tolerance=tolerance)\n\n livemode = local_data[\"livemode\"]\n api_key = api_key or djstripe_settings.get_default_api_key(livemode)\n\n # Retrieve the event using the api_version specified in itself\n remote_data = Event.stripe_class.retrieve(\n id=local_data[\"id\"],\n api_key=api_key,\n stripe_version=local_data[\"api_version\"],\n )\n\n return local_data[\"data\"] == remote_data[\"data\"]\n\n def process(self, save=True, api_key: str = None):\n # Reset traceback and exception in case of reprocessing\n self.exception = \"\"\n self.traceback = \"\"\n\n self.event = Event.process(self.json_body, api_key=api_key)\n self.processed = True\n if save:\n self.save()\n\n return self.event\n", "path": "djstripe/models/webhooks.py" } ]
[ { "content": "\"\"\"\nModule for dj-stripe Webhook models\n\"\"\"\n\nimport json\nimport warnings\nfrom traceback import format_exc\nfrom uuid import uuid4\n\nimport stripe\nfrom django.conf import settings\nfrom django.db import models\nfrom django.utils.datastructures import CaseInsensitiveMapping\nfrom django.utils.functional import cached_property\n\nfrom .. import signals\nfrom ..enums import WebhookEndpointStatus\nfrom ..fields import JSONField, StripeEnumField, StripeForeignKey\nfrom ..settings import djstripe_settings\nfrom .base import StripeModel, logger\nfrom .core import Event\n\n\n# TODO: Add Tests\nclass WebhookEndpoint(StripeModel):\n stripe_class = stripe.WebhookEndpoint\n stripe_dashboard_item_name = \"webhooks\"\n\n api_version = models.CharField(\n max_length=64,\n blank=True,\n help_text=(\n \"The API version events are rendered as for this webhook endpoint. Defaults\"\n \" to the configured Stripe API Version.\"\n ),\n )\n enabled_events = JSONField(\n help_text=(\n \"The list of events to enable for this endpoint. ['*'] indicates that all\"\n \" events are enabled, except those that require explicit selection.\"\n )\n )\n secret = models.CharField(\n max_length=256,\n blank=True,\n editable=False,\n help_text=\"The endpoint's secret, used to generate webhook signatures.\",\n )\n status = StripeEnumField(\n enum=WebhookEndpointStatus,\n help_text=\"The status of the webhook. It can be enabled or disabled.\",\n )\n url = models.URLField(help_text=\"The URL of the webhook endpoint.\", max_length=2048)\n application = models.CharField(\n max_length=255,\n blank=True,\n help_text=\"The ID of the associated Connect application.\",\n )\n\n djstripe_uuid = models.UUIDField(\n null=True,\n unique=True,\n default=uuid4,\n help_text=\"A UUID specific to dj-stripe generated for the endpoint\",\n )\n tolerance = models.PositiveSmallIntegerField(\n help_text=\"Controls the milliseconds tolerance which wards against replay attacks. Leave this to its default value unless you know what you're doing.\",\n default=djstripe_settings.WEBHOOK_TOLERANCE,\n )\n\n def __str__(self):\n return self.url or str(self.djstripe_uuid)\n\n def _attach_objects_hook(\n self, cls, data, current_ids=None, api_key=djstripe_settings.STRIPE_SECRET_KEY\n ):\n \"\"\"\n Gets called by this object's create and sync methods just before save.\n Use this to populate fields before the model is saved.\n \"\"\"\n super()._attach_objects_hook(\n cls, data, current_ids=current_ids, api_key=api_key\n )\n\n self.djstripe_uuid = data.get(\"metadata\", {}).get(\"djstripe_uuid\")\n self.tolerance = data.get(\"tolerance\", djstripe_settings.WEBHOOK_TOLERANCE)\n\n\ndef _get_version():\n from ..apps import __version__\n\n return __version__\n\n\ndef get_remote_ip(request):\n \"\"\"Given the HTTPRequest object return the IP Address of the client\n\n :param request: client request\n :type request: HTTPRequest\n\n :Returns: the client ip address\n \"\"\"\n\n # x-forwarded-for is relevant for django running behind a proxy\n x_forwarded_for = request.headers.get(\"x-forwarded-for\")\n if x_forwarded_for:\n ip = x_forwarded_for.split(\",\")[0]\n else:\n ip = request.META.get(\"REMOTE_ADDR\")\n\n if not ip:\n warnings.warn(\n \"Could not determine remote IP (missing REMOTE_ADDR). \"\n \"This is likely an issue with your wsgi/server setup.\"\n )\n ip = \"0.0.0.0\"\n\n return ip\n\n\nclass WebhookEventTrigger(models.Model):\n \"\"\"\n An instance of a request that reached the server endpoint for Stripe webhooks.\n\n Webhook Events are initially **UNTRUSTED**, as it is possible for any web entity to\n post any data to our webhook url. Data posted may be valid Stripe information,\n garbage, or even malicious.\n The 'valid' flag in this model monitors this.\n \"\"\"\n\n id = models.BigAutoField(primary_key=True)\n remote_ip = models.GenericIPAddressField(\n help_text=\"IP address of the request client.\"\n )\n headers = JSONField()\n body = models.TextField(blank=True)\n valid = models.BooleanField(\n default=False,\n help_text=\"Whether or not the webhook event has passed validation\",\n )\n processed = models.BooleanField(\n default=False,\n help_text=\"Whether or not the webhook event has been successfully processed\",\n )\n exception = models.CharField(max_length=128, blank=True)\n traceback = models.TextField(\n blank=True, help_text=\"Traceback if an exception was thrown during processing\"\n )\n event = StripeForeignKey(\n \"Event\",\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n help_text=\"Event object contained in the (valid) Webhook\",\n )\n djstripe_version = models.CharField(\n max_length=32,\n default=_get_version, # Needs to be a callable, otherwise it's a db default.\n help_text=\"The version of dj-stripe when the webhook was received\",\n )\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n stripe_trigger_account = StripeForeignKey(\n \"djstripe.Account\",\n on_delete=models.CASCADE,\n to_field=\"id\",\n null=True,\n blank=True,\n help_text=\"The Stripe Account this object belongs to.\",\n )\n webhook_endpoint = StripeForeignKey(\n \"WebhookEndpoint\",\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n help_text=\"The endpoint this webhook was received on\",\n )\n\n def __str__(self):\n return f\"id={self.id}, valid={self.valid}, processed={self.processed}\"\n\n @classmethod\n def from_request(cls, request, *, webhook_endpoint: WebhookEndpoint = None):\n \"\"\"\n Create, validate and process a WebhookEventTrigger given a Django\n request object.\n\n The process is three-fold:\n 1. Create a WebhookEventTrigger object from a Django request.\n 2. Validate the WebhookEventTrigger as a Stripe event using the API.\n 3. If valid, process it into an Event object (and child resource).\n \"\"\"\n\n try:\n body = request.body.decode(request.encoding or \"utf-8\")\n except Exception:\n body = \"(error decoding body)\"\n\n ip = get_remote_ip(request)\n\n try:\n data = json.loads(body)\n except ValueError:\n data = {}\n\n if webhook_endpoint is None:\n stripe_account = StripeModel._find_owner_account(data=data)\n secret = djstripe_settings.WEBHOOK_SECRET\n else:\n stripe_account = webhook_endpoint.djstripe_owner_account\n secret = webhook_endpoint.secret\n\n obj = cls.objects.create(\n headers=dict(request.headers),\n body=body,\n remote_ip=ip,\n stripe_trigger_account=stripe_account,\n webhook_endpoint=webhook_endpoint,\n )\n api_key = (\n stripe_account.default_api_key\n or djstripe_settings.get_default_api_key(obj.livemode)\n )\n\n try:\n # Validate the webhook first\n signals.webhook_pre_validate.send(sender=cls, instance=obj)\n\n if webhook_endpoint:\n # Default to per Webhook Endpoint Tolerance\n obj.valid = obj.validate(\n secret=secret,\n api_key=api_key,\n tolerance=webhook_endpoint.tolerance,\n )\n else:\n obj.valid = obj.validate(secret=secret, api_key=api_key)\n signals.webhook_post_validate.send(\n sender=cls, instance=obj, valid=obj.valid\n )\n\n if obj.valid:\n signals.webhook_pre_process.send(sender=cls, instance=obj)\n if djstripe_settings.WEBHOOK_EVENT_CALLBACK:\n # If WEBHOOK_EVENT_CALLBACK, pass it for processing\n djstripe_settings.WEBHOOK_EVENT_CALLBACK(obj, api_key=api_key)\n else:\n # Process the item (do not save it, it'll get saved below)\n obj.process(save=False, api_key=api_key)\n signals.webhook_post_process.send(\n sender=cls, instance=obj, api_key=api_key\n )\n except Exception as e:\n max_length = cls._meta.get_field(\"exception\").max_length\n obj.exception = str(e)[:max_length]\n obj.traceback = format_exc()\n\n # Send the exception as the webhook_processing_error signal\n signals.webhook_processing_error.send(\n sender=cls,\n instance=obj,\n api_key=api_key,\n exception=e,\n data=getattr(e, \"http_body\", \"\"),\n )\n\n # re-raise the exception so Django sees it\n raise e\n finally:\n obj.save()\n\n return obj\n\n @cached_property\n def json_body(self):\n try:\n return json.loads(self.body)\n except ValueError:\n return {}\n\n @property\n def is_test_event(self):\n event_id = self.json_body.get(\"id\")\n return event_id and event_id.endswith(\"_00000000000000\")\n\n def verify_signature(\n self, secret: str, tolerance: int = djstripe_settings.WEBHOOK_TOLERANCE\n ) -> bool:\n if not secret:\n raise ValueError(\"Cannot verify event signature without a secret\")\n\n # HTTP headers are case-insensitive, but we store them as a dict.\n headers = CaseInsensitiveMapping(self.headers)\n signature = headers.get(\"stripe-signature\")\n\n try:\n stripe.WebhookSignature.verify_header(\n self.body, signature, secret, tolerance\n )\n except stripe.error.SignatureVerificationError:\n logger.exception(\"Failed to verify header\")\n return False\n else:\n return True\n\n def validate(\n self,\n api_key: str = None,\n secret: str = djstripe_settings.WEBHOOK_SECRET,\n tolerance: int = djstripe_settings.WEBHOOK_TOLERANCE,\n validation_method=djstripe_settings.WEBHOOK_VALIDATION,\n ):\n \"\"\"\n The original contents of the Event message must be confirmed by\n refetching it and comparing the fetched data with the original data.\n\n This function makes an API call to Stripe to redownload the Event data\n and returns whether or not it matches the WebhookEventTrigger data.\n \"\"\"\n\n local_data = self.json_body\n if \"id\" not in local_data or \"livemode\" not in local_data:\n logger.error(\n '\"id\" not in json body or \"livemode\" not in json body(%s)', local_data\n )\n return False\n\n if self.is_test_event:\n logger.info(\"Test webhook received and discarded: %s\", local_data)\n return False\n\n if validation_method is None:\n # validation disabled\n warnings.warn(\"WEBHOOK VALIDATION is disabled.\")\n return True\n elif validation_method == \"verify_signature\":\n if settings.DEBUG:\n # In debug mode, allow overriding the webhook secret with\n # the x-djstripe-webhook-secret header.\n # (used for stripe cli webhook forwarding)\n headers = CaseInsensitiveMapping(self.headers)\n local_secret = headers.get(\"x-djstripe-webhook-secret\")\n secret = local_secret if local_secret else secret\n return self.verify_signature(secret=secret, tolerance=tolerance)\n\n livemode = local_data[\"livemode\"]\n api_key = api_key or djstripe_settings.get_default_api_key(livemode)\n\n # Retrieve the event using the api_version specified in itself\n remote_data = Event.stripe_class.retrieve(\n id=local_data[\"id\"],\n api_key=api_key,\n stripe_version=local_data[\"api_version\"],\n )\n\n return local_data[\"data\"] == remote_data[\"data\"]\n\n def process(self, save=True, api_key: str = None):\n # Reset traceback and exception in case of reprocessing\n self.exception = \"\"\n self.traceback = \"\"\n\n self.event = Event.process(self.json_body, api_key=api_key)\n self.processed = True\n if save:\n self.save()\n\n return self.event\n", "path": "djstripe/models/webhooks.py" } ]
diff --git a/djstripe/models/webhooks.py b/djstripe/models/webhooks.py index 045ae48fa5..da2053407a 100644 --- a/djstripe/models/webhooks.py +++ b/djstripe/models/webhooks.py @@ -83,6 +83,7 @@ def _attach_objects_hook( ) self.djstripe_uuid = data.get("metadata", {}).get("djstripe_uuid") + self.tolerance = data.get("tolerance", djstripe_settings.WEBHOOK_TOLERANCE) def _get_version():
django-import-export__django-import-export-613
NumberWidget.is_empty() should strip the value if string type At the moment `NumberWidget.is_empty()` check doesn't strip the value before making the `value == ""` comparison. As a consequence, if the value happens to a be a string comprised entirely of spaces e.g `u' '`, the `is_empty()` check evaluates to False. This in effect can cause value errors (e.g `ValueError: could not convert string to float: ` ) in child widgets (`IntegerWidget`, etc) which do a type conversion. To resolve this, I think we should strip the value (if it's a string type) before the comparison. ```Python class NumberWidget(Widget): """ """ def is_empty(self, value): if isinstance(value, six.string_types): value = value.strip() # 0 is not empty return value is None or value == "" ```
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom decimal import Decimal\nfrom datetime import datetime, date\nfrom django.utils import datetime_safe, timezone, six\nfrom django.utils.encoding import smart_text\nfrom django.conf import settings\n\ntry:\n from django.utils.encoding import force_text\nexcept ImportError:\n from django.utils.encoding import force_unicode as force_text\n\ntry:\n from django.utils.dateparse import parse_duration\nexcept ImportError:\n # Duration fields were added in Django 1.8\n pass\n\n\nclass Widget(object):\n \"\"\"\n A Widget takes care of converting between import and export representations.\n\n This is achieved by the two methods,\n :meth:`~import_export.widgets.Widget.clean` and\n :meth:`~import_export.widgets.Widget.render`.\n \"\"\"\n def clean(self, value, row=None, *args, **kwargs):\n \"\"\"\n Returns an appropriate Python object for an imported value.\n\n For example, if you import a value from a spreadsheet,\n :meth:`~import_export.widgets.Widget.clean` handles conversion\n of this value into the corresponding Python object.\n\n Numbers or dates can be *cleaned* to their respective data types and\n don't have to be imported as Strings.\n \"\"\"\n return value\n\n def render(self, value, obj=None):\n \"\"\"\n Returns an export representation of a Python value.\n\n For example, if you have an object you want to export,\n :meth:`~import_export.widgets.Widget.render` takes care of converting\n the object's field to a value that can be written to a spreadsheet.\n \"\"\"\n return force_text(value)\n\n\nclass NumberWidget(Widget):\n \"\"\"\n \"\"\"\n\n def is_empty(self, value):\n # 0 is not empty\n return value is None or value == \"\"\n\n def render(self, value, obj=None):\n return value\n\n\nclass FloatWidget(NumberWidget):\n \"\"\"\n Widget for converting floats fields.\n \"\"\"\n\n def clean(self, value, row=None, *args, **kwargs):\n if self.is_empty(value):\n return None\n return float(value)\n\n\nclass IntegerWidget(NumberWidget):\n \"\"\"\n Widget for converting integer fields.\n \"\"\"\n\n def clean(self, value, row=None, *args, **kwargs):\n if self.is_empty(value):\n return None\n return int(float(value))\n\n\nclass DecimalWidget(NumberWidget):\n \"\"\"\n Widget for converting decimal fields.\n \"\"\"\n\n def clean(self, value, row=None, *args, **kwargs):\n if self.is_empty(value):\n return None\n return Decimal(value)\n\n\nclass CharWidget(Widget):\n \"\"\"\n Widget for converting text fields.\n \"\"\"\n\n def render(self, value, obj=None):\n return force_text(value)\n\n\nclass BooleanWidget(Widget):\n \"\"\"\n Widget for converting boolean fields.\n \"\"\"\n TRUE_VALUES = [\"1\", 1]\n FALSE_VALUE = \"0\"\n\n def render(self, value, obj=None):\n if value is None:\n return \"\"\n return self.TRUE_VALUES[0] if value else self.FALSE_VALUE\n\n def clean(self, value, row=None, *args, **kwargs):\n if value == \"\":\n return None\n return True if value in self.TRUE_VALUES else False\n\n\nclass DateWidget(Widget):\n \"\"\"\n Widget for converting date fields.\n\n Takes optional ``format`` parameter.\n \"\"\"\n\n def __init__(self, format=None):\n if format is None:\n if not settings.DATE_INPUT_FORMATS:\n formats = (\"%Y-%m-%d\",)\n else:\n formats = settings.DATE_INPUT_FORMATS\n else:\n formats = (format,)\n self.formats = formats\n\n def clean(self, value, row=None, *args, **kwargs):\n if not value:\n return None\n if isinstance(value, date):\n return value\n for format in self.formats:\n try:\n return datetime.strptime(value, format).date()\n except (ValueError, TypeError):\n continue\n raise ValueError(\"Enter a valid date.\")\n\n def render(self, value, obj=None):\n if not value:\n return \"\"\n try:\n return value.strftime(self.formats[0])\n except:\n return datetime_safe.new_date(value).strftime(self.formats[0])\n\n\nclass DateTimeWidget(Widget):\n \"\"\"\n Widget for converting date fields.\n\n Takes optional ``format`` parameter. If none is set, either\n ``settings.DATETIME_INPUT_FORMATS`` or ``\"%Y-%m-%d %H:%M:%S\"`` is used.\n \"\"\"\n\n def __init__(self, format=None):\n if format is None:\n if not settings.DATETIME_INPUT_FORMATS:\n formats = (\"%Y-%m-%d %H:%M:%S\",)\n else:\n formats = settings.DATETIME_INPUT_FORMATS\n else:\n formats = (format,)\n self.formats = formats\n\n def clean(self, value, row=None, *args, **kwargs):\n if not value:\n return None\n if isinstance(value, datetime):\n return value\n for format in self.formats:\n try:\n dt = datetime.strptime(value, format)\n if settings.USE_TZ:\n # make datetime timezone aware so we don't compare\n # naive datetime to an aware one\n dt = timezone.make_aware(dt,\n timezone.get_default_timezone())\n return dt\n except (ValueError, TypeError):\n continue\n raise ValueError(\"Enter a valid date/time.\")\n\n def render(self, value, obj=None):\n if not value:\n return \"\"\n return value.strftime(self.formats[0])\n\n\nclass TimeWidget(Widget):\n \"\"\"\n Widget for converting time fields.\n\n Takes optional ``format`` parameter.\n \"\"\"\n\n def __init__(self, format=None):\n if format is None:\n if not settings.TIME_INPUT_FORMATS:\n formats = (\"%H:%M:%S\",)\n else:\n formats = settings.TIME_INPUT_FORMATS\n else:\n formats = (format,)\n self.formats = formats\n\n def clean(self, value, row=None, *args, **kwargs):\n if not value:\n return None\n for format in self.formats:\n try:\n return datetime.strptime(value, format).time()\n except (ValueError, TypeError):\n continue\n raise ValueError(\"Enter a valid time.\")\n\n def render(self, value, obj=None):\n if not value:\n return \"\"\n return value.strftime(self.formats[0])\n\n\nclass DurationWidget(Widget):\n \"\"\"\n Widget for converting time duration fields.\n \"\"\"\n\n def clean(self, value, row=None, *args, **kwargs):\n if not value:\n return None\n\n try:\n return parse_duration(value)\n except NameError:\n # Duration fields were added in Django 1.8\n raise RuntimeError(\"Duration parsing not supported.\")\n except (ValueError, TypeError):\n raise ValueError(\"Enter a valid duration.\")\n\n def render(self, value, obj=None):\n if not value:\n return \"\"\n return str(value)\n\n\nclass SimpleArrayWidget(Widget):\n def __init__(self, separator=None):\n if separator is None:\n separator = ','\n self.separator = separator\n super(SimpleArrayWidget, self).__init__()\n\n def clean(self, value, row=None, *args, **kwargs):\n return value.split(self.separator) if value else []\n\n def render(self, value, obj=None):\n return self.separator.join(six.text_type(v) for v in value)\n\n\nclass ForeignKeyWidget(Widget):\n \"\"\"\n Widget for a ``ForeignKey`` field which looks up a related model using\n \"natural keys\" in both export an import.\n\n The lookup field defaults to using the primary key (``pk``) as lookup\n criterion but can be customised to use any field on the related model.\n\n Unlike specifying a related field in your resource like so…\n\n ::\n\n class Meta:\n fields = ('author__name',)\n\n …using a :class:`~import_export.widgets.ForeignKeyWidget` has the\n advantage that it can not only be used for exporting, but also importing\n data with foreign key relationships.\n\n Here's an example on how to use\n :class:`~import_export.widgets.ForeignKeyWidget` to lookup related objects\n using ``Author.name`` instead of ``Author.pk``::\n\n class BookResource(resources.ModelResource):\n author = fields.Field(\n column_name='author',\n attribute='author',\n widget=ForeignKeyWidget(Author, 'name'))\n\n class Meta:\n fields = ('author',)\n\n :param model: The Model the ForeignKey refers to (required).\n :param field: A field on the related model used for looking up a particular object.\n \"\"\"\n def __init__(self, model, field='pk', *args, **kwargs):\n self.model = model\n self.field = field\n super(ForeignKeyWidget, self).__init__(*args, **kwargs)\n\n def get_queryset(self, value, row, *args, **kwargs):\n \"\"\"\n Returns a queryset of all objects for this Model.\n\n Overwrite this method if you want to limit the pool of objects from\n which the related object is retrieved.\n\n :param value: The field's value in the datasource.\n :param row: The datasource's current row.\n\n As an example; if you'd like to have ForeignKeyWidget look up a Person\n by their pre- **and** lastname column, you could subclass the widget\n like so::\n\n class FullNameForeignKeyWidget(ForeignKeyWidget):\n def get_queryset(self, value, row):\n return self.model.objects.filter(\n first_name__iexact=row[\"first_name\"],\n last_name__iexact=row[\"last_name\"]\n )\n \"\"\"\n return self.model.objects.all()\n\n def clean(self, value, row=None, *args, **kwargs):\n val = super(ForeignKeyWidget, self).clean(value)\n if val:\n return self.get_queryset(value, row, *args, **kwargs).get(**{self.field: val})\n else:\n return None\n\n def render(self, value, obj=None):\n if value is None:\n return \"\"\n return getattr(value, self.field)\n\n\nclass ManyToManyWidget(Widget):\n \"\"\"\n Widget that converts between representations of a ManyToMany relationships\n as a list and an actual ManyToMany field.\n\n :param model: The model the ManyToMany field refers to (required).\n :param separator: Defaults to ``','``.\n :param field: A field on the related model. Default is ``pk``.\n \"\"\"\n\n def __init__(self, model, separator=None, field=None, *args, **kwargs):\n if separator is None:\n separator = ','\n if field is None:\n field = 'pk'\n self.model = model\n self.separator = separator\n self.field = field\n super(ManyToManyWidget, self).__init__(*args, **kwargs)\n\n def clean(self, value, row=None, *args, **kwargs):\n if not value:\n return self.model.objects.none()\n if isinstance(value, (float, int)):\n ids = [int(value)]\n else:\n ids = value.split(self.separator)\n ids = filter(None, ids)\n return self.model.objects.filter(**{\n '%s__in' % self.field: ids\n })\n\n def render(self, value, obj=None):\n ids = [smart_text(getattr(obj, self.field)) for obj in value.all()]\n return self.separator.join(ids)\n", "path": "import_export/widgets.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom decimal import Decimal\nfrom datetime import datetime, date\nfrom django.utils import datetime_safe, timezone, six\nfrom django.utils.encoding import smart_text\nfrom django.conf import settings\n\ntry:\n from django.utils.encoding import force_text\nexcept ImportError:\n from django.utils.encoding import force_unicode as force_text\n\ntry:\n from django.utils.dateparse import parse_duration\nexcept ImportError:\n # Duration fields were added in Django 1.8\n pass\n\n\nclass Widget(object):\n \"\"\"\n A Widget takes care of converting between import and export representations.\n\n This is achieved by the two methods,\n :meth:`~import_export.widgets.Widget.clean` and\n :meth:`~import_export.widgets.Widget.render`.\n \"\"\"\n def clean(self, value, row=None, *args, **kwargs):\n \"\"\"\n Returns an appropriate Python object for an imported value.\n\n For example, if you import a value from a spreadsheet,\n :meth:`~import_export.widgets.Widget.clean` handles conversion\n of this value into the corresponding Python object.\n\n Numbers or dates can be *cleaned* to their respective data types and\n don't have to be imported as Strings.\n \"\"\"\n return value\n\n def render(self, value, obj=None):\n \"\"\"\n Returns an export representation of a Python value.\n\n For example, if you have an object you want to export,\n :meth:`~import_export.widgets.Widget.render` takes care of converting\n the object's field to a value that can be written to a spreadsheet.\n \"\"\"\n return force_text(value)\n\n\nclass NumberWidget(Widget):\n \"\"\"\n \"\"\"\n\n def is_empty(self, value):\n if isinstance(value, six.string_types):\n value = value.strip()\n # 0 is not empty\n return value is None or value == \"\"\n\n def render(self, value, obj=None):\n return value\n\n\nclass FloatWidget(NumberWidget):\n \"\"\"\n Widget for converting floats fields.\n \"\"\"\n\n def clean(self, value, row=None, *args, **kwargs):\n if self.is_empty(value):\n return None\n return float(value)\n\n\nclass IntegerWidget(NumberWidget):\n \"\"\"\n Widget for converting integer fields.\n \"\"\"\n\n def clean(self, value, row=None, *args, **kwargs):\n if self.is_empty(value):\n return None\n return int(float(value))\n\n\nclass DecimalWidget(NumberWidget):\n \"\"\"\n Widget for converting decimal fields.\n \"\"\"\n\n def clean(self, value, row=None, *args, **kwargs):\n if self.is_empty(value):\n return None\n return Decimal(value)\n\n\nclass CharWidget(Widget):\n \"\"\"\n Widget for converting text fields.\n \"\"\"\n\n def render(self, value, obj=None):\n return force_text(value)\n\n\nclass BooleanWidget(Widget):\n \"\"\"\n Widget for converting boolean fields.\n \"\"\"\n TRUE_VALUES = [\"1\", 1]\n FALSE_VALUE = \"0\"\n\n def render(self, value, obj=None):\n if value is None:\n return \"\"\n return self.TRUE_VALUES[0] if value else self.FALSE_VALUE\n\n def clean(self, value, row=None, *args, **kwargs):\n if value == \"\":\n return None\n return True if value in self.TRUE_VALUES else False\n\n\nclass DateWidget(Widget):\n \"\"\"\n Widget for converting date fields.\n\n Takes optional ``format`` parameter.\n \"\"\"\n\n def __init__(self, format=None):\n if format is None:\n if not settings.DATE_INPUT_FORMATS:\n formats = (\"%Y-%m-%d\",)\n else:\n formats = settings.DATE_INPUT_FORMATS\n else:\n formats = (format,)\n self.formats = formats\n\n def clean(self, value, row=None, *args, **kwargs):\n if not value:\n return None\n if isinstance(value, date):\n return value\n for format in self.formats:\n try:\n return datetime.strptime(value, format).date()\n except (ValueError, TypeError):\n continue\n raise ValueError(\"Enter a valid date.\")\n\n def render(self, value, obj=None):\n if not value:\n return \"\"\n try:\n return value.strftime(self.formats[0])\n except:\n return datetime_safe.new_date(value).strftime(self.formats[0])\n\n\nclass DateTimeWidget(Widget):\n \"\"\"\n Widget for converting date fields.\n\n Takes optional ``format`` parameter. If none is set, either\n ``settings.DATETIME_INPUT_FORMATS`` or ``\"%Y-%m-%d %H:%M:%S\"`` is used.\n \"\"\"\n\n def __init__(self, format=None):\n if format is None:\n if not settings.DATETIME_INPUT_FORMATS:\n formats = (\"%Y-%m-%d %H:%M:%S\",)\n else:\n formats = settings.DATETIME_INPUT_FORMATS\n else:\n formats = (format,)\n self.formats = formats\n\n def clean(self, value, row=None, *args, **kwargs):\n if not value:\n return None\n if isinstance(value, datetime):\n return value\n for format in self.formats:\n try:\n dt = datetime.strptime(value, format)\n if settings.USE_TZ:\n # make datetime timezone aware so we don't compare\n # naive datetime to an aware one\n dt = timezone.make_aware(dt,\n timezone.get_default_timezone())\n return dt\n except (ValueError, TypeError):\n continue\n raise ValueError(\"Enter a valid date/time.\")\n\n def render(self, value, obj=None):\n if not value:\n return \"\"\n return value.strftime(self.formats[0])\n\n\nclass TimeWidget(Widget):\n \"\"\"\n Widget for converting time fields.\n\n Takes optional ``format`` parameter.\n \"\"\"\n\n def __init__(self, format=None):\n if format is None:\n if not settings.TIME_INPUT_FORMATS:\n formats = (\"%H:%M:%S\",)\n else:\n formats = settings.TIME_INPUT_FORMATS\n else:\n formats = (format,)\n self.formats = formats\n\n def clean(self, value, row=None, *args, **kwargs):\n if not value:\n return None\n for format in self.formats:\n try:\n return datetime.strptime(value, format).time()\n except (ValueError, TypeError):\n continue\n raise ValueError(\"Enter a valid time.\")\n\n def render(self, value, obj=None):\n if not value:\n return \"\"\n return value.strftime(self.formats[0])\n\n\nclass DurationWidget(Widget):\n \"\"\"\n Widget for converting time duration fields.\n \"\"\"\n\n def clean(self, value, row=None, *args, **kwargs):\n if not value:\n return None\n\n try:\n return parse_duration(value)\n except NameError:\n # Duration fields were added in Django 1.8\n raise RuntimeError(\"Duration parsing not supported.\")\n except (ValueError, TypeError):\n raise ValueError(\"Enter a valid duration.\")\n\n def render(self, value, obj=None):\n if not value:\n return \"\"\n return str(value)\n\n\nclass SimpleArrayWidget(Widget):\n def __init__(self, separator=None):\n if separator is None:\n separator = ','\n self.separator = separator\n super(SimpleArrayWidget, self).__init__()\n\n def clean(self, value, row=None, *args, **kwargs):\n return value.split(self.separator) if value else []\n\n def render(self, value, obj=None):\n return self.separator.join(six.text_type(v) for v in value)\n\n\nclass ForeignKeyWidget(Widget):\n \"\"\"\n Widget for a ``ForeignKey`` field which looks up a related model using\n \"natural keys\" in both export an import.\n\n The lookup field defaults to using the primary key (``pk``) as lookup\n criterion but can be customised to use any field on the related model.\n\n Unlike specifying a related field in your resource like so…\n\n ::\n\n class Meta:\n fields = ('author__name',)\n\n …using a :class:`~import_export.widgets.ForeignKeyWidget` has the\n advantage that it can not only be used for exporting, but also importing\n data with foreign key relationships.\n\n Here's an example on how to use\n :class:`~import_export.widgets.ForeignKeyWidget` to lookup related objects\n using ``Author.name`` instead of ``Author.pk``::\n\n class BookResource(resources.ModelResource):\n author = fields.Field(\n column_name='author',\n attribute='author',\n widget=ForeignKeyWidget(Author, 'name'))\n\n class Meta:\n fields = ('author',)\n\n :param model: The Model the ForeignKey refers to (required).\n :param field: A field on the related model used for looking up a particular object.\n \"\"\"\n def __init__(self, model, field='pk', *args, **kwargs):\n self.model = model\n self.field = field\n super(ForeignKeyWidget, self).__init__(*args, **kwargs)\n\n def get_queryset(self, value, row, *args, **kwargs):\n \"\"\"\n Returns a queryset of all objects for this Model.\n\n Overwrite this method if you want to limit the pool of objects from\n which the related object is retrieved.\n\n :param value: The field's value in the datasource.\n :param row: The datasource's current row.\n\n As an example; if you'd like to have ForeignKeyWidget look up a Person\n by their pre- **and** lastname column, you could subclass the widget\n like so::\n\n class FullNameForeignKeyWidget(ForeignKeyWidget):\n def get_queryset(self, value, row):\n return self.model.objects.filter(\n first_name__iexact=row[\"first_name\"],\n last_name__iexact=row[\"last_name\"]\n )\n \"\"\"\n return self.model.objects.all()\n\n def clean(self, value, row=None, *args, **kwargs):\n val = super(ForeignKeyWidget, self).clean(value)\n if val:\n return self.get_queryset(value, row, *args, **kwargs).get(**{self.field: val})\n else:\n return None\n\n def render(self, value, obj=None):\n if value is None:\n return \"\"\n return getattr(value, self.field)\n\n\nclass ManyToManyWidget(Widget):\n \"\"\"\n Widget that converts between representations of a ManyToMany relationships\n as a list and an actual ManyToMany field.\n\n :param model: The model the ManyToMany field refers to (required).\n :param separator: Defaults to ``','``.\n :param field: A field on the related model. Default is ``pk``.\n \"\"\"\n\n def __init__(self, model, separator=None, field=None, *args, **kwargs):\n if separator is None:\n separator = ','\n if field is None:\n field = 'pk'\n self.model = model\n self.separator = separator\n self.field = field\n super(ManyToManyWidget, self).__init__(*args, **kwargs)\n\n def clean(self, value, row=None, *args, **kwargs):\n if not value:\n return self.model.objects.none()\n if isinstance(value, (float, int)):\n ids = [int(value)]\n else:\n ids = value.split(self.separator)\n ids = filter(None, ids)\n return self.model.objects.filter(**{\n '%s__in' % self.field: ids\n })\n\n def render(self, value, obj=None):\n ids = [smart_text(getattr(obj, self.field)) for obj in value.all()]\n return self.separator.join(ids)\n", "path": "import_export/widgets.py" } ]
diff --git a/import_export/widgets.py b/import_export/widgets.py index 72cf7b755..04cb98a61 100644 --- a/import_export/widgets.py +++ b/import_export/widgets.py @@ -56,6 +56,8 @@ class NumberWidget(Widget): """ def is_empty(self, value): + if isinstance(value, six.string_types): + value = value.strip() # 0 is not empty return value is None or value == "" diff --git a/tests/core/tests/widgets_tests.py b/tests/core/tests/widgets_tests.py index ee7c3fbfb..29aec7f70 100644 --- a/tests/core/tests/widgets_tests.py +++ b/tests/core/tests/widgets_tests.py @@ -131,6 +131,28 @@ def test_clean(self): self.assertEqual(self.widget.clean("1:57:00"), self.duration) +class FloatWidgetTest(TestCase): + + def setUp(self): + self.value = 11.111 + self.widget = widgets.FloatWidget() + + def test_clean(self): + self.assertEqual(self.widget.clean(11.111), self.value) + + def test_render(self): + self.assertEqual(self.widget.render(self.value), self.value) + + def test_clean_string_zero(self): + self.assertEqual(self.widget.clean("0"), 0.0) + self.assertEqual(self.widget.clean("0.0"), 0.0) + + def test_clean_empty_string(self): + self.assertEqual(self.widget.clean(""), None) + self.assertEqual(self.widget.clean(" "), None) + self.assertEqual(self.widget.clean("\r\n\t"), None) + + class DecimalWidgetTest(TestCase): def setUp(self): @@ -147,6 +169,11 @@ def test_clean_string_zero(self): self.assertEqual(self.widget.clean("0"), Decimal("0")) self.assertEqual(self.widget.clean("0.0"), Decimal("0")) + def test_clean_empty_string(self): + self.assertEqual(self.widget.clean(""), None) + self.assertEqual(self.widget.clean(" "), None) + self.assertEqual(self.widget.clean("\r\n\t"), None) + class IntegerWidgetTest(TestCase): @@ -161,6 +188,11 @@ def test_clean_string_zero(self): self.assertEqual(self.widget.clean("0"), self.value) self.assertEqual(self.widget.clean("0.0"), self.value) + def test_clean_empty_string(self): + self.assertEqual(self.widget.clean(""), None) + self.assertEqual(self.widget.clean(" "), None) + self.assertEqual(self.widget.clean("\n\t\r"), None) + class ForeignKeyWidgetTest(TestCase):
scrapy__scrapy-4311
Consider making METAREFRESH_IGNORE_TAGS an empty list by default As a way to allow users to fix #1422, #3768 introduced the `METAREFRESH_IGNORE_TAGS` setting. To keep backward compatibility, the setting was introduced with `['script', 'noscript']` as default value. However, to reproduce the behavior of web browsers, it seems the right value would be `[]`. Should we switch the default value of the `METAREFRESH_IGNORE_TAGS` setting to `[]`, even though the change breaks backward compatibility?
[ { "content": "\"\"\"\nThis module contains the default values for all settings used by Scrapy.\n\nFor more information about these settings you can read the settings\ndocumentation in docs/topics/settings.rst\n\nScrapy developers, if you add a setting here remember to:\n\n* add it in alphabetical order\n* group similar settings without leaving blank lines\n* add its documentation to the available settings documentation\n (docs/topics/settings.rst)\n\n\"\"\"\n\nimport sys\nfrom importlib import import_module\nfrom os.path import join, abspath, dirname\n\nAJAXCRAWL_ENABLED = False\n\nASYNCIO_REACTOR = False\n\nAUTOTHROTTLE_ENABLED = False\nAUTOTHROTTLE_DEBUG = False\nAUTOTHROTTLE_MAX_DELAY = 60.0\nAUTOTHROTTLE_START_DELAY = 5.0\nAUTOTHROTTLE_TARGET_CONCURRENCY = 1.0\n\nBOT_NAME = 'scrapybot'\n\nCLOSESPIDER_TIMEOUT = 0\nCLOSESPIDER_PAGECOUNT = 0\nCLOSESPIDER_ITEMCOUNT = 0\nCLOSESPIDER_ERRORCOUNT = 0\n\nCOMMANDS_MODULE = ''\n\nCOMPRESSION_ENABLED = True\n\nCONCURRENT_ITEMS = 100\n\nCONCURRENT_REQUESTS = 16\nCONCURRENT_REQUESTS_PER_DOMAIN = 8\nCONCURRENT_REQUESTS_PER_IP = 0\n\nCOOKIES_ENABLED = True\nCOOKIES_DEBUG = False\n\nDEFAULT_ITEM_CLASS = 'scrapy.item.Item'\n\nDEFAULT_REQUEST_HEADERS = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en',\n}\n\nDEPTH_LIMIT = 0\nDEPTH_STATS_VERBOSE = False\nDEPTH_PRIORITY = 0\n\nDNSCACHE_ENABLED = True\nDNSCACHE_SIZE = 10000\nDNS_RESOLVER = 'scrapy.resolver.CachingThreadedResolver'\nDNS_TIMEOUT = 60\n\nDOWNLOAD_DELAY = 0\n\nDOWNLOAD_HANDLERS = {}\nDOWNLOAD_HANDLERS_BASE = {\n 'data': 'scrapy.core.downloader.handlers.datauri.DataURIDownloadHandler',\n 'file': 'scrapy.core.downloader.handlers.file.FileDownloadHandler',\n 'http': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',\n 'https': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',\n 's3': 'scrapy.core.downloader.handlers.s3.S3DownloadHandler',\n 'ftp': 'scrapy.core.downloader.handlers.ftp.FTPDownloadHandler',\n}\n\nDOWNLOAD_TIMEOUT = 180 # 3mins\n\nDOWNLOAD_MAXSIZE = 1024*1024*1024 # 1024m\nDOWNLOAD_WARNSIZE = 32*1024*1024 # 32m\n\nDOWNLOAD_FAIL_ON_DATALOSS = True\n\nDOWNLOADER = 'scrapy.core.downloader.Downloader'\n\nDOWNLOADER_HTTPCLIENTFACTORY = 'scrapy.core.downloader.webclient.ScrapyHTTPClientFactory'\nDOWNLOADER_CLIENTCONTEXTFACTORY = 'scrapy.core.downloader.contextfactory.ScrapyClientContextFactory'\nDOWNLOADER_CLIENT_TLS_CIPHERS = 'DEFAULT'\n# Use highest TLS/SSL protocol version supported by the platform, also allowing negotiation:\nDOWNLOADER_CLIENT_TLS_METHOD = 'TLS'\nDOWNLOADER_CLIENT_TLS_VERBOSE_LOGGING = False\n\nDOWNLOADER_MIDDLEWARES = {}\n\nDOWNLOADER_MIDDLEWARES_BASE = {\n # Engine side\n 'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': 100,\n 'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware': 300,\n 'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware': 350,\n 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': 400,\n 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 500,\n 'scrapy.downloadermiddlewares.retry.RetryMiddleware': 550,\n 'scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware': 560,\n 'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware': 580,\n 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 590,\n 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': 600,\n 'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 700,\n 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 750,\n 'scrapy.downloadermiddlewares.stats.DownloaderStats': 850,\n 'scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware': 900,\n # Downloader side\n}\n\nDOWNLOADER_STATS = True\n\nDUPEFILTER_CLASS = 'scrapy.dupefilters.RFPDupeFilter'\n\nEDITOR = 'vi'\nif sys.platform == 'win32':\n EDITOR = '%s -m idlelib.idle'\n\nEXTENSIONS = {}\n\nEXTENSIONS_BASE = {\n 'scrapy.extensions.corestats.CoreStats': 0,\n 'scrapy.extensions.telnet.TelnetConsole': 0,\n 'scrapy.extensions.memusage.MemoryUsage': 0,\n 'scrapy.extensions.memdebug.MemoryDebugger': 0,\n 'scrapy.extensions.closespider.CloseSpider': 0,\n 'scrapy.extensions.feedexport.FeedExporter': 0,\n 'scrapy.extensions.logstats.LogStats': 0,\n 'scrapy.extensions.spiderstate.SpiderState': 0,\n 'scrapy.extensions.throttle.AutoThrottle': 0,\n}\n\nFEED_TEMPDIR = None\nFEED_URI = None\nFEED_URI_PARAMS = None # a function to extend uri arguments\nFEED_FORMAT = 'jsonlines'\nFEED_STORE_EMPTY = False\nFEED_EXPORT_ENCODING = None\nFEED_EXPORT_FIELDS = None\nFEED_STORAGES = {}\nFEED_STORAGES_BASE = {\n '': 'scrapy.extensions.feedexport.FileFeedStorage',\n 'file': 'scrapy.extensions.feedexport.FileFeedStorage',\n 'stdout': 'scrapy.extensions.feedexport.StdoutFeedStorage',\n 's3': 'scrapy.extensions.feedexport.S3FeedStorage',\n 'ftp': 'scrapy.extensions.feedexport.FTPFeedStorage',\n}\nFEED_EXPORTERS = {}\nFEED_EXPORTERS_BASE = {\n 'json': 'scrapy.exporters.JsonItemExporter',\n 'jsonlines': 'scrapy.exporters.JsonLinesItemExporter',\n 'jl': 'scrapy.exporters.JsonLinesItemExporter',\n 'csv': 'scrapy.exporters.CsvItemExporter',\n 'xml': 'scrapy.exporters.XmlItemExporter',\n 'marshal': 'scrapy.exporters.MarshalItemExporter',\n 'pickle': 'scrapy.exporters.PickleItemExporter',\n}\nFEED_EXPORT_INDENT = 0\n\nFEED_STORAGE_FTP_ACTIVE = False\nFEED_STORAGE_S3_ACL = ''\n\nFILES_STORE_S3_ACL = 'private'\nFILES_STORE_GCS_ACL = ''\n\nFTP_USER = 'anonymous'\nFTP_PASSWORD = 'guest'\nFTP_PASSIVE_MODE = True\n\nHTTPCACHE_ENABLED = False\nHTTPCACHE_DIR = 'httpcache'\nHTTPCACHE_IGNORE_MISSING = False\nHTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'\nHTTPCACHE_EXPIRATION_SECS = 0\nHTTPCACHE_ALWAYS_STORE = False\nHTTPCACHE_IGNORE_HTTP_CODES = []\nHTTPCACHE_IGNORE_SCHEMES = ['file']\nHTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS = []\nHTTPCACHE_DBM_MODULE = 'dbm'\nHTTPCACHE_POLICY = 'scrapy.extensions.httpcache.DummyPolicy'\nHTTPCACHE_GZIP = False\n\nHTTPPROXY_ENABLED = True\nHTTPPROXY_AUTH_ENCODING = 'latin-1'\n\nIMAGES_STORE_S3_ACL = 'private'\nIMAGES_STORE_GCS_ACL = ''\n\nITEM_PROCESSOR = 'scrapy.pipelines.ItemPipelineManager'\n\nITEM_PIPELINES = {}\nITEM_PIPELINES_BASE = {}\n\nLOG_ENABLED = True\nLOG_ENCODING = 'utf-8'\nLOG_FORMATTER = 'scrapy.logformatter.LogFormatter'\nLOG_FORMAT = '%(asctime)s [%(name)s] %(levelname)s: %(message)s'\nLOG_DATEFORMAT = '%Y-%m-%d %H:%M:%S'\nLOG_STDOUT = False\nLOG_LEVEL = 'DEBUG'\nLOG_FILE = None\nLOG_SHORT_NAMES = False\n\nSCHEDULER_DEBUG = False\n\nLOGSTATS_INTERVAL = 60.0\n\nMAIL_HOST = 'localhost'\nMAIL_PORT = 25\nMAIL_FROM = 'scrapy@localhost'\nMAIL_PASS = None\nMAIL_USER = None\n\nMEMDEBUG_ENABLED = False # enable memory debugging\nMEMDEBUG_NOTIFY = [] # send memory debugging report by mail at engine shutdown\n\nMEMUSAGE_CHECK_INTERVAL_SECONDS = 60.0\nMEMUSAGE_ENABLED = True\nMEMUSAGE_LIMIT_MB = 0\nMEMUSAGE_NOTIFY_MAIL = []\nMEMUSAGE_WARNING_MB = 0\n\nMETAREFRESH_ENABLED = True\nMETAREFRESH_IGNORE_TAGS = ['script', 'noscript']\nMETAREFRESH_MAXDELAY = 100\n\nNEWSPIDER_MODULE = ''\n\nRANDOMIZE_DOWNLOAD_DELAY = True\n\nREACTOR_THREADPOOL_MAXSIZE = 10\n\nREDIRECT_ENABLED = True\nREDIRECT_MAX_TIMES = 20 # uses Firefox default setting\nREDIRECT_PRIORITY_ADJUST = +2\n\nREFERER_ENABLED = True\nREFERRER_POLICY = 'scrapy.spidermiddlewares.referer.DefaultReferrerPolicy'\n\nRETRY_ENABLED = True\nRETRY_TIMES = 2 # initial response + 2 retries = 3 requests\nRETRY_HTTP_CODES = [500, 502, 503, 504, 522, 524, 408, 429]\nRETRY_PRIORITY_ADJUST = -1\n\nROBOTSTXT_OBEY = False\nROBOTSTXT_PARSER = 'scrapy.robotstxt.ProtegoRobotParser'\nROBOTSTXT_USER_AGENT = None\n\nSCHEDULER = 'scrapy.core.scheduler.Scheduler'\nSCHEDULER_DISK_QUEUE = 'scrapy.squeues.PickleLifoDiskQueue'\nSCHEDULER_MEMORY_QUEUE = 'scrapy.squeues.LifoMemoryQueue'\nSCHEDULER_PRIORITY_QUEUE = 'scrapy.pqueues.ScrapyPriorityQueue'\n\nSCRAPER_SLOT_MAX_ACTIVE_SIZE = 5000000\n\nSPIDER_LOADER_CLASS = 'scrapy.spiderloader.SpiderLoader'\nSPIDER_LOADER_WARN_ONLY = False\n\nSPIDER_MIDDLEWARES = {}\n\nSPIDER_MIDDLEWARES_BASE = {\n # Engine side\n 'scrapy.spidermiddlewares.httperror.HttpErrorMiddleware': 50,\n 'scrapy.spidermiddlewares.offsite.OffsiteMiddleware': 500,\n 'scrapy.spidermiddlewares.referer.RefererMiddleware': 700,\n 'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware': 800,\n 'scrapy.spidermiddlewares.depth.DepthMiddleware': 900,\n # Spider side\n}\n\nSPIDER_MODULES = []\n\nSTATS_CLASS = 'scrapy.statscollectors.MemoryStatsCollector'\nSTATS_DUMP = True\n\nSTATSMAILER_RCPTS = []\n\nTEMPLATES_DIR = abspath(join(dirname(__file__), '..', 'templates'))\n\nURLLENGTH_LIMIT = 2083\n\nUSER_AGENT = 'Scrapy/%s (+https://scrapy.org)' % import_module('scrapy').__version__\n\nTELNETCONSOLE_ENABLED = 1\nTELNETCONSOLE_PORT = [6023, 6073]\nTELNETCONSOLE_HOST = '127.0.0.1'\nTELNETCONSOLE_USERNAME = 'scrapy'\nTELNETCONSOLE_PASSWORD = None\n\nSPIDER_CONTRACTS = {}\nSPIDER_CONTRACTS_BASE = {\n 'scrapy.contracts.default.UrlContract': 1,\n 'scrapy.contracts.default.CallbackKeywordArgumentsContract': 1,\n 'scrapy.contracts.default.ReturnsContract': 2,\n 'scrapy.contracts.default.ScrapesContract': 3,\n}\n", "path": "scrapy/settings/default_settings.py" } ]
[ { "content": "\"\"\"\nThis module contains the default values for all settings used by Scrapy.\n\nFor more information about these settings you can read the settings\ndocumentation in docs/topics/settings.rst\n\nScrapy developers, if you add a setting here remember to:\n\n* add it in alphabetical order\n* group similar settings without leaving blank lines\n* add its documentation to the available settings documentation\n (docs/topics/settings.rst)\n\n\"\"\"\n\nimport sys\nfrom importlib import import_module\nfrom os.path import join, abspath, dirname\n\nAJAXCRAWL_ENABLED = False\n\nASYNCIO_REACTOR = False\n\nAUTOTHROTTLE_ENABLED = False\nAUTOTHROTTLE_DEBUG = False\nAUTOTHROTTLE_MAX_DELAY = 60.0\nAUTOTHROTTLE_START_DELAY = 5.0\nAUTOTHROTTLE_TARGET_CONCURRENCY = 1.0\n\nBOT_NAME = 'scrapybot'\n\nCLOSESPIDER_TIMEOUT = 0\nCLOSESPIDER_PAGECOUNT = 0\nCLOSESPIDER_ITEMCOUNT = 0\nCLOSESPIDER_ERRORCOUNT = 0\n\nCOMMANDS_MODULE = ''\n\nCOMPRESSION_ENABLED = True\n\nCONCURRENT_ITEMS = 100\n\nCONCURRENT_REQUESTS = 16\nCONCURRENT_REQUESTS_PER_DOMAIN = 8\nCONCURRENT_REQUESTS_PER_IP = 0\n\nCOOKIES_ENABLED = True\nCOOKIES_DEBUG = False\n\nDEFAULT_ITEM_CLASS = 'scrapy.item.Item'\n\nDEFAULT_REQUEST_HEADERS = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en',\n}\n\nDEPTH_LIMIT = 0\nDEPTH_STATS_VERBOSE = False\nDEPTH_PRIORITY = 0\n\nDNSCACHE_ENABLED = True\nDNSCACHE_SIZE = 10000\nDNS_RESOLVER = 'scrapy.resolver.CachingThreadedResolver'\nDNS_TIMEOUT = 60\n\nDOWNLOAD_DELAY = 0\n\nDOWNLOAD_HANDLERS = {}\nDOWNLOAD_HANDLERS_BASE = {\n 'data': 'scrapy.core.downloader.handlers.datauri.DataURIDownloadHandler',\n 'file': 'scrapy.core.downloader.handlers.file.FileDownloadHandler',\n 'http': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',\n 'https': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',\n 's3': 'scrapy.core.downloader.handlers.s3.S3DownloadHandler',\n 'ftp': 'scrapy.core.downloader.handlers.ftp.FTPDownloadHandler',\n}\n\nDOWNLOAD_TIMEOUT = 180 # 3mins\n\nDOWNLOAD_MAXSIZE = 1024*1024*1024 # 1024m\nDOWNLOAD_WARNSIZE = 32*1024*1024 # 32m\n\nDOWNLOAD_FAIL_ON_DATALOSS = True\n\nDOWNLOADER = 'scrapy.core.downloader.Downloader'\n\nDOWNLOADER_HTTPCLIENTFACTORY = 'scrapy.core.downloader.webclient.ScrapyHTTPClientFactory'\nDOWNLOADER_CLIENTCONTEXTFACTORY = 'scrapy.core.downloader.contextfactory.ScrapyClientContextFactory'\nDOWNLOADER_CLIENT_TLS_CIPHERS = 'DEFAULT'\n# Use highest TLS/SSL protocol version supported by the platform, also allowing negotiation:\nDOWNLOADER_CLIENT_TLS_METHOD = 'TLS'\nDOWNLOADER_CLIENT_TLS_VERBOSE_LOGGING = False\n\nDOWNLOADER_MIDDLEWARES = {}\n\nDOWNLOADER_MIDDLEWARES_BASE = {\n # Engine side\n 'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': 100,\n 'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware': 300,\n 'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware': 350,\n 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': 400,\n 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 500,\n 'scrapy.downloadermiddlewares.retry.RetryMiddleware': 550,\n 'scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware': 560,\n 'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware': 580,\n 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 590,\n 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': 600,\n 'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 700,\n 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 750,\n 'scrapy.downloadermiddlewares.stats.DownloaderStats': 850,\n 'scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware': 900,\n # Downloader side\n}\n\nDOWNLOADER_STATS = True\n\nDUPEFILTER_CLASS = 'scrapy.dupefilters.RFPDupeFilter'\n\nEDITOR = 'vi'\nif sys.platform == 'win32':\n EDITOR = '%s -m idlelib.idle'\n\nEXTENSIONS = {}\n\nEXTENSIONS_BASE = {\n 'scrapy.extensions.corestats.CoreStats': 0,\n 'scrapy.extensions.telnet.TelnetConsole': 0,\n 'scrapy.extensions.memusage.MemoryUsage': 0,\n 'scrapy.extensions.memdebug.MemoryDebugger': 0,\n 'scrapy.extensions.closespider.CloseSpider': 0,\n 'scrapy.extensions.feedexport.FeedExporter': 0,\n 'scrapy.extensions.logstats.LogStats': 0,\n 'scrapy.extensions.spiderstate.SpiderState': 0,\n 'scrapy.extensions.throttle.AutoThrottle': 0,\n}\n\nFEED_TEMPDIR = None\nFEED_URI = None\nFEED_URI_PARAMS = None # a function to extend uri arguments\nFEED_FORMAT = 'jsonlines'\nFEED_STORE_EMPTY = False\nFEED_EXPORT_ENCODING = None\nFEED_EXPORT_FIELDS = None\nFEED_STORAGES = {}\nFEED_STORAGES_BASE = {\n '': 'scrapy.extensions.feedexport.FileFeedStorage',\n 'file': 'scrapy.extensions.feedexport.FileFeedStorage',\n 'stdout': 'scrapy.extensions.feedexport.StdoutFeedStorage',\n 's3': 'scrapy.extensions.feedexport.S3FeedStorage',\n 'ftp': 'scrapy.extensions.feedexport.FTPFeedStorage',\n}\nFEED_EXPORTERS = {}\nFEED_EXPORTERS_BASE = {\n 'json': 'scrapy.exporters.JsonItemExporter',\n 'jsonlines': 'scrapy.exporters.JsonLinesItemExporter',\n 'jl': 'scrapy.exporters.JsonLinesItemExporter',\n 'csv': 'scrapy.exporters.CsvItemExporter',\n 'xml': 'scrapy.exporters.XmlItemExporter',\n 'marshal': 'scrapy.exporters.MarshalItemExporter',\n 'pickle': 'scrapy.exporters.PickleItemExporter',\n}\nFEED_EXPORT_INDENT = 0\n\nFEED_STORAGE_FTP_ACTIVE = False\nFEED_STORAGE_S3_ACL = ''\n\nFILES_STORE_S3_ACL = 'private'\nFILES_STORE_GCS_ACL = ''\n\nFTP_USER = 'anonymous'\nFTP_PASSWORD = 'guest'\nFTP_PASSIVE_MODE = True\n\nHTTPCACHE_ENABLED = False\nHTTPCACHE_DIR = 'httpcache'\nHTTPCACHE_IGNORE_MISSING = False\nHTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'\nHTTPCACHE_EXPIRATION_SECS = 0\nHTTPCACHE_ALWAYS_STORE = False\nHTTPCACHE_IGNORE_HTTP_CODES = []\nHTTPCACHE_IGNORE_SCHEMES = ['file']\nHTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS = []\nHTTPCACHE_DBM_MODULE = 'dbm'\nHTTPCACHE_POLICY = 'scrapy.extensions.httpcache.DummyPolicy'\nHTTPCACHE_GZIP = False\n\nHTTPPROXY_ENABLED = True\nHTTPPROXY_AUTH_ENCODING = 'latin-1'\n\nIMAGES_STORE_S3_ACL = 'private'\nIMAGES_STORE_GCS_ACL = ''\n\nITEM_PROCESSOR = 'scrapy.pipelines.ItemPipelineManager'\n\nITEM_PIPELINES = {}\nITEM_PIPELINES_BASE = {}\n\nLOG_ENABLED = True\nLOG_ENCODING = 'utf-8'\nLOG_FORMATTER = 'scrapy.logformatter.LogFormatter'\nLOG_FORMAT = '%(asctime)s [%(name)s] %(levelname)s: %(message)s'\nLOG_DATEFORMAT = '%Y-%m-%d %H:%M:%S'\nLOG_STDOUT = False\nLOG_LEVEL = 'DEBUG'\nLOG_FILE = None\nLOG_SHORT_NAMES = False\n\nSCHEDULER_DEBUG = False\n\nLOGSTATS_INTERVAL = 60.0\n\nMAIL_HOST = 'localhost'\nMAIL_PORT = 25\nMAIL_FROM = 'scrapy@localhost'\nMAIL_PASS = None\nMAIL_USER = None\n\nMEMDEBUG_ENABLED = False # enable memory debugging\nMEMDEBUG_NOTIFY = [] # send memory debugging report by mail at engine shutdown\n\nMEMUSAGE_CHECK_INTERVAL_SECONDS = 60.0\nMEMUSAGE_ENABLED = True\nMEMUSAGE_LIMIT_MB = 0\nMEMUSAGE_NOTIFY_MAIL = []\nMEMUSAGE_WARNING_MB = 0\n\nMETAREFRESH_ENABLED = True\nMETAREFRESH_IGNORE_TAGS = []\nMETAREFRESH_MAXDELAY = 100\n\nNEWSPIDER_MODULE = ''\n\nRANDOMIZE_DOWNLOAD_DELAY = True\n\nREACTOR_THREADPOOL_MAXSIZE = 10\n\nREDIRECT_ENABLED = True\nREDIRECT_MAX_TIMES = 20 # uses Firefox default setting\nREDIRECT_PRIORITY_ADJUST = +2\n\nREFERER_ENABLED = True\nREFERRER_POLICY = 'scrapy.spidermiddlewares.referer.DefaultReferrerPolicy'\n\nRETRY_ENABLED = True\nRETRY_TIMES = 2 # initial response + 2 retries = 3 requests\nRETRY_HTTP_CODES = [500, 502, 503, 504, 522, 524, 408, 429]\nRETRY_PRIORITY_ADJUST = -1\n\nROBOTSTXT_OBEY = False\nROBOTSTXT_PARSER = 'scrapy.robotstxt.ProtegoRobotParser'\nROBOTSTXT_USER_AGENT = None\n\nSCHEDULER = 'scrapy.core.scheduler.Scheduler'\nSCHEDULER_DISK_QUEUE = 'scrapy.squeues.PickleLifoDiskQueue'\nSCHEDULER_MEMORY_QUEUE = 'scrapy.squeues.LifoMemoryQueue'\nSCHEDULER_PRIORITY_QUEUE = 'scrapy.pqueues.ScrapyPriorityQueue'\n\nSCRAPER_SLOT_MAX_ACTIVE_SIZE = 5000000\n\nSPIDER_LOADER_CLASS = 'scrapy.spiderloader.SpiderLoader'\nSPIDER_LOADER_WARN_ONLY = False\n\nSPIDER_MIDDLEWARES = {}\n\nSPIDER_MIDDLEWARES_BASE = {\n # Engine side\n 'scrapy.spidermiddlewares.httperror.HttpErrorMiddleware': 50,\n 'scrapy.spidermiddlewares.offsite.OffsiteMiddleware': 500,\n 'scrapy.spidermiddlewares.referer.RefererMiddleware': 700,\n 'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware': 800,\n 'scrapy.spidermiddlewares.depth.DepthMiddleware': 900,\n # Spider side\n}\n\nSPIDER_MODULES = []\n\nSTATS_CLASS = 'scrapy.statscollectors.MemoryStatsCollector'\nSTATS_DUMP = True\n\nSTATSMAILER_RCPTS = []\n\nTEMPLATES_DIR = abspath(join(dirname(__file__), '..', 'templates'))\n\nURLLENGTH_LIMIT = 2083\n\nUSER_AGENT = 'Scrapy/%s (+https://scrapy.org)' % import_module('scrapy').__version__\n\nTELNETCONSOLE_ENABLED = 1\nTELNETCONSOLE_PORT = [6023, 6073]\nTELNETCONSOLE_HOST = '127.0.0.1'\nTELNETCONSOLE_USERNAME = 'scrapy'\nTELNETCONSOLE_PASSWORD = None\n\nSPIDER_CONTRACTS = {}\nSPIDER_CONTRACTS_BASE = {\n 'scrapy.contracts.default.UrlContract': 1,\n 'scrapy.contracts.default.CallbackKeywordArgumentsContract': 1,\n 'scrapy.contracts.default.ReturnsContract': 2,\n 'scrapy.contracts.default.ScrapesContract': 3,\n}\n", "path": "scrapy/settings/default_settings.py" } ]
diff --git a/docs/topics/downloader-middleware.rst b/docs/topics/downloader-middleware.rst index 8a760e53be0..3ec6e0c17cc 100644 --- a/docs/topics/downloader-middleware.rst +++ b/docs/topics/downloader-middleware.rst @@ -868,7 +868,7 @@ Whether the Meta Refresh middleware will be enabled. METAREFRESH_IGNORE_TAGS ^^^^^^^^^^^^^^^^^^^^^^^ -Default: ``['script', 'noscript']`` +Default: ``[]`` Meta tags within these tags are ignored. diff --git a/scrapy/settings/default_settings.py b/scrapy/settings/default_settings.py index c10dc1a1cb3..1a7d35b130e 100644 --- a/scrapy/settings/default_settings.py +++ b/scrapy/settings/default_settings.py @@ -225,7 +225,7 @@ MEMUSAGE_WARNING_MB = 0 METAREFRESH_ENABLED = True -METAREFRESH_IGNORE_TAGS = ['script', 'noscript'] +METAREFRESH_IGNORE_TAGS = [] METAREFRESH_MAXDELAY = 100 NEWSPIDER_MODULE = '' diff --git a/tests/test_downloadermiddleware_redirect.py b/tests/test_downloadermiddleware_redirect.py index e7faf14a7f0..e0f145d0efe 100644 --- a/tests/test_downloadermiddleware_redirect.py +++ b/tests/test_downloadermiddleware_redirect.py @@ -300,19 +300,21 @@ def test_ignore_tags_default(self): body = ('''<noscript><meta http-equiv="refresh" ''' '''content="0;URL='http://example.org/newpage'"></noscript>''') rsp = HtmlResponse(req.url, body=body.encode()) - response = self.mw.process_response(req, rsp, self.spider) - assert isinstance(response, Response) + req2 = self.mw.process_response(req, rsp, self.spider) + assert isinstance(req2, Request) + self.assertEqual(req2.url, 'http://example.org/newpage') - def test_ignore_tags_empty_list(self): - crawler = get_crawler(Spider, {'METAREFRESH_IGNORE_TAGS': []}) + def test_ignore_tags_1_x_list(self): + """Test that Scrapy 1.x behavior remains possible""" + settings = {'METAREFRESH_IGNORE_TAGS': ['script', 'noscript']} + crawler = get_crawler(Spider, settings) mw = MetaRefreshMiddleware.from_crawler(crawler) req = Request(url='http://example.org') body = ('''<noscript><meta http-equiv="refresh" ''' '''content="0;URL='http://example.org/newpage'"></noscript>''') rsp = HtmlResponse(req.url, body=body.encode()) - req2 = mw.process_response(req, rsp, self.spider) - assert isinstance(req2, Request) - self.assertEqual(req2.url, 'http://example.org/newpage') + response = mw.process_response(req, rsp, self.spider) + assert isinstance(response, Response) if __name__ == "__main__": unittest.main()
streamlit__streamlit-6507
pandas 2.0 support ### Checklist - [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues. - [X] I added a very descriptive title to this issue. - [X] I have provided sufficient information below to help reproduce this issue. ### Summary ``` The conflict is caused by: The user requested pandas==2.0.0 streamlit 1.20.0 depends on pandas<2 and >=0.25 ``` ### Reproducible Code Example ```Python pip install pandas==2.0.0 pip install streamlit==1.20.0 ``` ### Steps To Reproduce _No response_ ### Expected Behavior _No response_ ### Current Behavior _No response_ ### Is this a regression? - [ ] Yes, this used to work in a previous version. ### Debug info - Streamlit version: 1.20.0 - Python version: 3.11.1 - Operating System: - Browser: - Virtual environment: ### Additional Information the following line should be updated [https://github.com/streamlit/streamlit/blob/11950acfa537475109b421fea6da43c9d410542c/lib/setup.py#L40](url) ### Are you willing to submit a PR? - [ ] Yes, I am willing to submit a PR!
[ { "content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nfrom pathlib import Path\n\nimport setuptools\nfrom setuptools.command.install import install\n\nTHIS_DIRECTORY = Path(__file__).parent\n\nVERSION = \"1.21.0\" # PEP-440\n\nNAME = \"streamlit\"\n\n# IMPORTANT: We should try very hard *not* to add dependencies to Streamlit.\n# And if you do add one, make the required version as general as possible.\n# But include relevant lower bounds for any features we use from our dependencies.\nINSTALL_REQUIRES = [\n \"altair<5,>=3.2.0\",\n \"blinker>=1.0.0\",\n \"cachetools>=4.0\",\n \"click>=7.0\",\n # 1.4 introduced the functionality found in python 3.8's importlib.metadata module\n \"importlib-metadata>=1.4\",\n \"numpy\",\n \"packaging>=14.1\",\n \"pandas<2,>=0.25\",\n \"pillow>=6.2.0\",\n \"protobuf<4,>=3.12\",\n \"pyarrow>=4.0\",\n \"pympler>=0.9\",\n \"python-dateutil\",\n \"requests>=2.4\",\n \"rich>=10.11.0\",\n \"toml\",\n \"typing-extensions>=3.10.0.0\",\n \"tzlocal>=1.1\",\n \"validators>=0.2\",\n # Don't require watchdog on MacOS, since it'll fail without xcode tools.\n # Without watchdog, we fallback to a polling file watcher to check for app changes.\n \"watchdog; platform_system != 'Darwin'\",\n]\n\n# We want to exclude some dependencies in our internal Snowpark conda distribution of\n# Streamlit. These dependencies will be installed normally for both regular conda builds\n# and PyPI builds (that is, for people installing streamlit using either\n# `pip install streamlit` or `conda install -c conda-forge streamlit`)\nSNOWPARK_CONDA_EXCLUDED_DEPENDENCIES = [\n \"gitpython!=3.1.19\",\n \"pydeck>=0.1.dev5\",\n # Tornado 6.0.3 was the current Tornado version when Python 3.8, our earliest supported Python version,\n # was released (Oct 14, 2019).\n \"tornado>=6.0.3\",\n]\n\nif not os.getenv(\"SNOWPARK_CONDA_BUILD\"):\n INSTALL_REQUIRES.extend(SNOWPARK_CONDA_EXCLUDED_DEPENDENCIES)\n\nEXTRA_REQUIRES = {\"snowflake\": [\"snowflake-snowpark-python; python_version=='3.8'\"]}\n\n\nclass VerifyVersionCommand(install):\n \"\"\"Custom command to verify that the git tag matches our version\"\"\"\n\n description = \"verify that the git tag matches our version\"\n\n def run(self):\n tag = os.getenv(\"TAG\")\n\n if tag != VERSION:\n info = \"Git tag: {0} does not match the version of this app: {1}\".format(\n tag, VERSION\n )\n sys.exit(info)\n\n\nreadme_path = THIS_DIRECTORY / \"..\" / \"README.md\"\nif readme_path.exists():\n long_description = readme_path.read_text()\nelse:\n # In some build environments (specifically in conda), we may not have the README file\n # readily available. In these cases, just let long_description be the empty string.\n # Note that long_description isn't used at all in these build environments, so it\n # being missing isn't problematic.\n long_description = \"\"\n\nsetuptools.setup(\n name=NAME,\n version=VERSION,\n description=\"A faster way to build and share data apps\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://streamlit.io\",\n project_urls={\n \"Source Code\": \"https://github.com/streamlit/streamlit\",\n \"Bug Tracker\": \"https://github.com/streamlit/streamlit/issues\",\n \"Release notes\": \"https://docs.streamlit.io/library/changelog\",\n \"Documentation\": \"https://docs.streamlit.io/\",\n \"Community\": \"https://discuss.streamlit.io/\",\n \"Twitter\": \"https://twitter.com/streamlit\",\n },\n author=\"Snowflake Inc\",\n author_email=\"[email protected]\",\n license=\"Apache License 2.0\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Database :: Front-Ends\",\n \"Topic :: Office/Business :: Financial :: Spreadsheet\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Topic :: Software Development :: Libraries :: Application Frameworks\",\n \"Topic :: Software Development :: Widget Sets\",\n ],\n # We exclude Python 3.9.7 from our compatible versions due to a bug in that version\n # with typing.Protocol. See https://github.com/streamlit/streamlit/issues/5140 and\n # https://bugs.python.org/issue45121\n python_requires=\">=3.7, !=3.9.7\",\n # PEP 561: https://mypy.readthedocs.io/en/stable/installed_packages.html\n package_data={\"streamlit\": [\"py.typed\", \"hello/**/*.py\"]},\n packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n # Requirements\n install_requires=INSTALL_REQUIRES,\n extras_require=EXTRA_REQUIRES,\n zip_safe=False, # install source files not egg\n include_package_data=True, # copy html and friends\n entry_points={\"console_scripts\": [\"streamlit = streamlit.web.cli:main\"]},\n # For Windows so that streamlit * commands work ie.\n # - streamlit version\n # - streamlit hello\n scripts=[\"bin/streamlit.cmd\"],\n cmdclass={\n \"verify\": VerifyVersionCommand,\n },\n)\n", "path": "lib/setup.py" } ]
[ { "content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nfrom pathlib import Path\n\nimport setuptools\nfrom setuptools.command.install import install\n\nTHIS_DIRECTORY = Path(__file__).parent\n\nVERSION = \"1.21.0\" # PEP-440\n\nNAME = \"streamlit\"\n\n# IMPORTANT: We should try very hard *not* to add dependencies to Streamlit.\n# And if you do add one, make the required version as general as possible.\n# But include relevant lower bounds for any features we use from our dependencies.\nINSTALL_REQUIRES = [\n \"altair<5,>=3.2.0\",\n \"blinker>=1.0.0\",\n \"cachetools>=4.0\",\n \"click>=7.0\",\n # 1.4 introduced the functionality found in python 3.8's importlib.metadata module\n \"importlib-metadata>=1.4\",\n \"numpy\",\n \"packaging>=14.1\",\n \"pandas<3,>=0.25\",\n \"pillow>=6.2.0\",\n \"protobuf<4,>=3.12\",\n \"pyarrow>=4.0\",\n \"pympler>=0.9\",\n \"python-dateutil\",\n \"requests>=2.4\",\n \"rich>=10.11.0\",\n \"toml\",\n \"typing-extensions>=3.10.0.0\",\n \"tzlocal>=1.1\",\n \"validators>=0.2\",\n # Don't require watchdog on MacOS, since it'll fail without xcode tools.\n # Without watchdog, we fallback to a polling file watcher to check for app changes.\n \"watchdog; platform_system != 'Darwin'\",\n]\n\n# We want to exclude some dependencies in our internal Snowpark conda distribution of\n# Streamlit. These dependencies will be installed normally for both regular conda builds\n# and PyPI builds (that is, for people installing streamlit using either\n# `pip install streamlit` or `conda install -c conda-forge streamlit`)\nSNOWPARK_CONDA_EXCLUDED_DEPENDENCIES = [\n \"gitpython!=3.1.19\",\n \"pydeck>=0.1.dev5\",\n # Tornado 6.0.3 was the current Tornado version when Python 3.8, our earliest supported Python version,\n # was released (Oct 14, 2019).\n \"tornado>=6.0.3\",\n]\n\nif not os.getenv(\"SNOWPARK_CONDA_BUILD\"):\n INSTALL_REQUIRES.extend(SNOWPARK_CONDA_EXCLUDED_DEPENDENCIES)\n\nEXTRA_REQUIRES = {\"snowflake\": [\"snowflake-snowpark-python; python_version=='3.8'\"]}\n\n\nclass VerifyVersionCommand(install):\n \"\"\"Custom command to verify that the git tag matches our version\"\"\"\n\n description = \"verify that the git tag matches our version\"\n\n def run(self):\n tag = os.getenv(\"TAG\")\n\n if tag != VERSION:\n info = \"Git tag: {0} does not match the version of this app: {1}\".format(\n tag, VERSION\n )\n sys.exit(info)\n\n\nreadme_path = THIS_DIRECTORY / \"..\" / \"README.md\"\nif readme_path.exists():\n long_description = readme_path.read_text()\nelse:\n # In some build environments (specifically in conda), we may not have the README file\n # readily available. In these cases, just let long_description be the empty string.\n # Note that long_description isn't used at all in these build environments, so it\n # being missing isn't problematic.\n long_description = \"\"\n\nsetuptools.setup(\n name=NAME,\n version=VERSION,\n description=\"A faster way to build and share data apps\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://streamlit.io\",\n project_urls={\n \"Source Code\": \"https://github.com/streamlit/streamlit\",\n \"Bug Tracker\": \"https://github.com/streamlit/streamlit/issues\",\n \"Release notes\": \"https://docs.streamlit.io/library/changelog\",\n \"Documentation\": \"https://docs.streamlit.io/\",\n \"Community\": \"https://discuss.streamlit.io/\",\n \"Twitter\": \"https://twitter.com/streamlit\",\n },\n author=\"Snowflake Inc\",\n author_email=\"[email protected]\",\n license=\"Apache License 2.0\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Database :: Front-Ends\",\n \"Topic :: Office/Business :: Financial :: Spreadsheet\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Topic :: Software Development :: Libraries :: Application Frameworks\",\n \"Topic :: Software Development :: Widget Sets\",\n ],\n # We exclude Python 3.9.7 from our compatible versions due to a bug in that version\n # with typing.Protocol. See https://github.com/streamlit/streamlit/issues/5140 and\n # https://bugs.python.org/issue45121\n python_requires=\">=3.7, !=3.9.7\",\n # PEP 561: https://mypy.readthedocs.io/en/stable/installed_packages.html\n package_data={\"streamlit\": [\"py.typed\", \"hello/**/*.py\"]},\n packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n # Requirements\n install_requires=INSTALL_REQUIRES,\n extras_require=EXTRA_REQUIRES,\n zip_safe=False, # install source files not egg\n include_package_data=True, # copy html and friends\n entry_points={\"console_scripts\": [\"streamlit = streamlit.web.cli:main\"]},\n # For Windows so that streamlit * commands work ie.\n # - streamlit version\n # - streamlit hello\n scripts=[\"bin/streamlit.cmd\"],\n cmdclass={\n \"verify\": VerifyVersionCommand,\n },\n)\n", "path": "lib/setup.py" } ]
diff --git a/lib/setup.py b/lib/setup.py index 7e9a41d5acb9..74af9808bc31 100644 --- a/lib/setup.py +++ b/lib/setup.py @@ -37,7 +37,7 @@ "importlib-metadata>=1.4", "numpy", "packaging>=14.1", - "pandas<2,>=0.25", + "pandas<3,>=0.25", "pillow>=6.2.0", "protobuf<4,>=3.12", "pyarrow>=4.0", diff --git a/lib/tests/streamlit/elements/data_editor_test.py b/lib/tests/streamlit/elements/data_editor_test.py index e90fe44cbbc8..6540d6f49e41 100644 --- a/lib/tests/streamlit/elements/data_editor_test.py +++ b/lib/tests/streamlit/elements/data_editor_test.py @@ -420,7 +420,7 @@ def test_with_supported_index(self, index: pd.Index): self.assertIsInstance(return_df, pd.DataFrame) @unittest.skipIf( - is_pandas_version_less_than("2.0.0rc1") is False, + is_pandas_version_less_than("2.0.0") is False, "This test only runs if pandas is < 2.0.0", ) def test_with_old_supported_index(self):
dask__distributed-3672
Adding zoom tools to performance_report In some cases ["the devil is in the detail"]( https://en.wikipedia.org/wiki/The_devil_is_in_the_detail ), it would be useful to have the zoom tooltips included in other [`performance_report`]( https://distributed.dask.org/en/latest/diagnosing-performance.html#performance-reports ) panels to allow closer investigation of where time is being spent.
[ { "content": "\"\"\" This module contains utility functions to construct and manipulate counting\ndata structures for frames.\n\nWhen performing statistical profiling we obtain many call stacks. We aggregate\nthese call stacks into data structures that maintain counts of how many times\neach function in that call stack has been called. Because these stacks will\noverlap this aggregation counting structure forms a tree, such as is commonly\nvisualized by profiling tools.\n\nWe represent this tree as a nested dictionary with the following form:\n\n {\n 'identifier': 'root',\n 'description': 'A long description of the line of code being run.',\n 'count': 10 # the number of times we have seen this line\n 'children': { # callers of this line. Recursive dicts\n 'ident-b': {'description': ...\n 'identifier': 'ident-a',\n 'count': ...\n 'children': {...}},\n 'ident-b': {'description': ...\n 'identifier': 'ident-b',\n 'count': ...\n 'children': {...}}}\n }\n\"\"\"\nimport bisect\nfrom collections import defaultdict, deque\nimport linecache\nimport sys\nimport threading\nfrom time import sleep\n\nimport tlz as toolz\n\nfrom .metrics import time\nfrom .utils import format_time, color_of, parse_timedelta\n\n\ndef identifier(frame):\n \"\"\" A string identifier from a frame\n\n Strings are cheaper to use as indexes into dicts than tuples or dicts\n \"\"\"\n if frame is None:\n return \"None\"\n else:\n return \";\".join(\n (\n frame.f_code.co_name,\n frame.f_code.co_filename,\n str(frame.f_code.co_firstlineno),\n )\n )\n\n\ndef repr_frame(frame):\n \"\"\" Render a frame as a line for inclusion into a text traceback \"\"\"\n co = frame.f_code\n text = ' File \"%s\", line %s, in %s' % (co.co_filename, frame.f_lineno, co.co_name)\n line = linecache.getline(co.co_filename, frame.f_lineno, frame.f_globals).lstrip()\n return text + \"\\n\\t\" + line\n\n\ndef info_frame(frame):\n co = frame.f_code\n line = linecache.getline(co.co_filename, frame.f_lineno, frame.f_globals).lstrip()\n return {\n \"filename\": co.co_filename,\n \"name\": co.co_name,\n \"line_number\": frame.f_lineno,\n \"line\": line,\n }\n\n\ndef process(frame, child, state, stop=None, omit=None):\n \"\"\" Add counts from a frame stack onto existing state\n\n This recursively adds counts to the existing state dictionary and creates\n new entries for new functions.\n\n Example\n -------\n >>> import sys, threading\n >>> ident = threading.get_ident() # replace with your thread of interest\n >>> frame = sys._current_frames()[ident]\n >>> state = {'children': {}, 'count': 0, 'description': 'root',\n ... 'identifier': 'root'}\n >>> process(frame, None, state)\n >>> state\n {'count': 1,\n 'identifier': 'root',\n 'description': 'root',\n 'children': {'...'}}\n \"\"\"\n if omit is not None and any(frame.f_code.co_filename.endswith(o) for o in omit):\n return False\n\n prev = frame.f_back\n if prev is not None and (\n stop is None or not prev.f_code.co_filename.endswith(stop)\n ):\n state = process(prev, frame, state, stop=stop)\n if state is False:\n return False\n\n ident = identifier(frame)\n\n try:\n d = state[\"children\"][ident]\n except KeyError:\n d = {\n \"count\": 0,\n \"description\": info_frame(frame),\n \"children\": {},\n \"identifier\": ident,\n }\n state[\"children\"][ident] = d\n\n state[\"count\"] += 1\n\n if child is not None:\n return d\n else:\n d[\"count\"] += 1\n\n\ndef merge(*args):\n \"\"\" Merge multiple frame states together \"\"\"\n if not args:\n return create()\n s = {arg[\"identifier\"] for arg in args}\n if len(s) != 1:\n raise ValueError(\"Expected identifiers, got %s\" % str(s))\n children = defaultdict(list)\n for arg in args:\n for child in arg[\"children\"]:\n children[child].append(arg[\"children\"][child])\n\n children = {k: merge(*v) for k, v in children.items()}\n count = sum(arg[\"count\"] for arg in args)\n return {\n \"description\": args[0][\"description\"],\n \"children\": dict(children),\n \"count\": count,\n \"identifier\": args[0][\"identifier\"],\n }\n\n\ndef create():\n return {\n \"count\": 0,\n \"children\": {},\n \"identifier\": \"root\",\n \"description\": {\"filename\": \"\", \"name\": \"\", \"line_number\": 0, \"line\": \"\"},\n }\n\n\ndef call_stack(frame):\n \"\"\" Create a call text stack from a frame\n\n Returns\n -------\n list of strings\n \"\"\"\n L = []\n while frame:\n L.append(repr_frame(frame))\n frame = frame.f_back\n return L[::-1]\n\n\ndef plot_data(state, profile_interval=0.010):\n \"\"\" Convert a profile state into data useful by Bokeh\n\n See Also\n --------\n plot_figure\n distributed.bokeh.components.ProfilePlot\n \"\"\"\n starts = []\n stops = []\n heights = []\n widths = []\n colors = []\n states = []\n times = []\n\n filenames = []\n lines = []\n line_numbers = []\n names = []\n\n def traverse(state, start, stop, height):\n if not state[\"count\"]:\n return\n starts.append(start)\n stops.append(stop)\n heights.append(height)\n width = stop - start\n widths.append(width)\n states.append(state)\n times.append(format_time(state[\"count\"] * profile_interval))\n\n desc = state[\"description\"]\n filenames.append(desc[\"filename\"])\n lines.append(desc[\"line\"])\n line_numbers.append(desc[\"line_number\"])\n names.append(desc[\"name\"])\n\n ident = state[\"identifier\"]\n\n try:\n fn = desc[\"filename\"]\n except IndexError:\n colors.append(\"gray\")\n else:\n if fn == \"<low-level>\":\n colors.append(\"lightgray\")\n else:\n colors.append(color_of(fn))\n\n delta = (stop - start) / state[\"count\"]\n\n x = start\n\n for name, child in state[\"children\"].items():\n width = child[\"count\"] * delta\n traverse(child, x, x + width, height + 1)\n x += width\n\n traverse(state, 0, 1, 0)\n percentages = [\"{:.1f}%\".format(100 * w) for w in widths]\n return {\n \"left\": starts,\n \"right\": stops,\n \"bottom\": heights,\n \"width\": widths,\n \"top\": [x + 1 for x in heights],\n \"color\": colors,\n \"states\": states,\n \"filename\": filenames,\n \"line\": lines,\n \"line_number\": line_numbers,\n \"name\": names,\n \"time\": times,\n \"percentage\": percentages,\n }\n\n\ndef _watch(thread_id, log, interval=\"20ms\", cycle=\"2s\", omit=None, stop=lambda: False):\n interval = parse_timedelta(interval)\n cycle = parse_timedelta(cycle)\n\n recent = create()\n last = time()\n\n while not stop():\n if time() > last + cycle:\n log.append((time(), recent))\n recent = create()\n last = time()\n try:\n frame = sys._current_frames()[thread_id]\n except KeyError:\n return\n\n process(frame, None, recent, omit=omit)\n sleep(interval)\n\n\ndef watch(\n thread_id=None,\n interval=\"20ms\",\n cycle=\"2s\",\n maxlen=1000,\n omit=None,\n stop=lambda: False,\n):\n \"\"\" Gather profile information on a particular thread\n\n This starts a new thread to watch a particular thread and returns a deque\n that holds periodic profile information.\n\n Parameters\n ----------\n thread_id: int\n interval: str\n Time per sample\n cycle: str\n Time per refreshing to a new profile state\n maxlen: int\n Passed onto deque, maximum number of periods\n omit: str\n Don't include entries that start with this filename\n stop: callable\n Function to call to see if we should stop\n\n Returns\n -------\n deque\n \"\"\"\n if thread_id is None:\n thread_id = threading.get_ident()\n\n log = deque(maxlen=maxlen)\n\n thread = threading.Thread(\n target=_watch,\n name=\"Profile\",\n kwargs={\n \"thread_id\": thread_id,\n \"interval\": interval,\n \"cycle\": cycle,\n \"log\": log,\n \"omit\": omit,\n \"stop\": stop,\n },\n )\n thread.daemon = True\n thread.start()\n\n return log\n\n\ndef get_profile(history, recent=None, start=None, stop=None, key=None):\n \"\"\" Collect profile information from a sequence of profile states\n\n Parameters\n ----------\n history: Sequence[Tuple[time, Dict]]\n A list or deque of profile states\n recent: dict\n The most recent accumulating state\n start: time\n stop: time\n \"\"\"\n now = time()\n if start is None:\n istart = 0\n else:\n istart = bisect.bisect_left(history, (start,))\n\n if stop is None:\n istop = None\n else:\n istop = bisect.bisect_right(history, (stop,)) + 1\n if istop >= len(history):\n istop = None # include end\n\n if istart == 0 and istop is None:\n history = list(history)\n else:\n iistop = len(history) if istop is None else istop\n history = [history[i] for i in range(istart, iistop)]\n\n prof = merge(*toolz.pluck(1, history))\n\n if not history:\n return create()\n\n if recent:\n prof = merge(prof, recent)\n\n return prof\n\n\ndef plot_figure(data, **kwargs):\n \"\"\" Plot profile data using Bokeh\n\n This takes the output from the function ``plot_data`` and produces a Bokeh\n figure\n\n See Also\n --------\n plot_data\n \"\"\"\n from bokeh.plotting import ColumnDataSource, figure\n from bokeh.models import HoverTool\n\n if \"states\" in data:\n data = toolz.dissoc(data, \"states\")\n\n source = ColumnDataSource(data=data)\n\n fig = figure(tools=\"tap\", **kwargs)\n r = fig.quad(\n \"left\",\n \"right\",\n \"top\",\n \"bottom\",\n color=\"color\",\n line_color=\"black\",\n line_width=2,\n source=source,\n )\n\n r.selection_glyph = None\n r.nonselection_glyph = None\n\n hover = HoverTool(\n point_policy=\"follow_mouse\",\n tooltips=\"\"\"\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">Name:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@name</span>\n </div>\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">Filename:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@filename</span>\n </div>\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">Line number:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@line_number</span>\n </div>\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">Line:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@line</span>\n </div>\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">Time:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@time</span>\n </div>\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">Percentage:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@percentage</span>\n </div>\n \"\"\",\n )\n fig.add_tools(hover)\n\n fig.xaxis.visible = False\n fig.yaxis.visible = False\n fig.grid.visible = False\n\n return fig, source\n\n\ndef _remove_py_stack(frames):\n for entry in frames:\n if entry.is_python:\n break\n yield entry\n\n\ndef llprocess(frames, child, state):\n \"\"\" Add counts from low level profile information onto existing state\n\n This uses the ``stacktrace`` module to collect low level stack trace\n information and place it onto the given sttate.\n\n It is configured with the ``distributed.worker.profile.low-level`` config\n entry.\n\n See Also\n --------\n process\n ll_get_stack\n \"\"\"\n if not frames:\n return\n frame = frames.pop()\n if frames:\n state = llprocess(frames, frame, state)\n\n addr = hex(frame.addr - frame.offset)\n ident = \";\".join(map(str, (frame.name, \"<low-level>\", addr)))\n try:\n d = state[\"children\"][ident]\n except KeyError:\n d = {\n \"count\": 0,\n \"description\": {\n \"filename\": \"<low-level>\",\n \"name\": frame.name,\n \"line_number\": 0,\n \"line\": str(frame),\n },\n \"children\": {},\n \"identifier\": ident,\n }\n state[\"children\"][ident] = d\n\n state[\"count\"] += 1\n\n if child is not None:\n return d\n else:\n d[\"count\"] += 1\n\n\ndef ll_get_stack(tid):\n \"\"\" Collect low level stack information from thread id \"\"\"\n from stacktrace import get_thread_stack\n\n frames = get_thread_stack(tid, show_python=False)\n llframes = list(_remove_py_stack(frames))[::-1]\n return llframes\n", "path": "distributed/profile.py" } ]
[ { "content": "\"\"\" This module contains utility functions to construct and manipulate counting\ndata structures for frames.\n\nWhen performing statistical profiling we obtain many call stacks. We aggregate\nthese call stacks into data structures that maintain counts of how many times\neach function in that call stack has been called. Because these stacks will\noverlap this aggregation counting structure forms a tree, such as is commonly\nvisualized by profiling tools.\n\nWe represent this tree as a nested dictionary with the following form:\n\n {\n 'identifier': 'root',\n 'description': 'A long description of the line of code being run.',\n 'count': 10 # the number of times we have seen this line\n 'children': { # callers of this line. Recursive dicts\n 'ident-b': {'description': ...\n 'identifier': 'ident-a',\n 'count': ...\n 'children': {...}},\n 'ident-b': {'description': ...\n 'identifier': 'ident-b',\n 'count': ...\n 'children': {...}}}\n }\n\"\"\"\nimport bisect\nfrom collections import defaultdict, deque\nimport linecache\nimport sys\nimport threading\nfrom time import sleep\n\nimport tlz as toolz\n\nfrom .metrics import time\nfrom .utils import format_time, color_of, parse_timedelta\n\n\ndef identifier(frame):\n \"\"\" A string identifier from a frame\n\n Strings are cheaper to use as indexes into dicts than tuples or dicts\n \"\"\"\n if frame is None:\n return \"None\"\n else:\n return \";\".join(\n (\n frame.f_code.co_name,\n frame.f_code.co_filename,\n str(frame.f_code.co_firstlineno),\n )\n )\n\n\ndef repr_frame(frame):\n \"\"\" Render a frame as a line for inclusion into a text traceback \"\"\"\n co = frame.f_code\n text = ' File \"%s\", line %s, in %s' % (co.co_filename, frame.f_lineno, co.co_name)\n line = linecache.getline(co.co_filename, frame.f_lineno, frame.f_globals).lstrip()\n return text + \"\\n\\t\" + line\n\n\ndef info_frame(frame):\n co = frame.f_code\n line = linecache.getline(co.co_filename, frame.f_lineno, frame.f_globals).lstrip()\n return {\n \"filename\": co.co_filename,\n \"name\": co.co_name,\n \"line_number\": frame.f_lineno,\n \"line\": line,\n }\n\n\ndef process(frame, child, state, stop=None, omit=None):\n \"\"\" Add counts from a frame stack onto existing state\n\n This recursively adds counts to the existing state dictionary and creates\n new entries for new functions.\n\n Example\n -------\n >>> import sys, threading\n >>> ident = threading.get_ident() # replace with your thread of interest\n >>> frame = sys._current_frames()[ident]\n >>> state = {'children': {}, 'count': 0, 'description': 'root',\n ... 'identifier': 'root'}\n >>> process(frame, None, state)\n >>> state\n {'count': 1,\n 'identifier': 'root',\n 'description': 'root',\n 'children': {'...'}}\n \"\"\"\n if omit is not None and any(frame.f_code.co_filename.endswith(o) for o in omit):\n return False\n\n prev = frame.f_back\n if prev is not None and (\n stop is None or not prev.f_code.co_filename.endswith(stop)\n ):\n state = process(prev, frame, state, stop=stop)\n if state is False:\n return False\n\n ident = identifier(frame)\n\n try:\n d = state[\"children\"][ident]\n except KeyError:\n d = {\n \"count\": 0,\n \"description\": info_frame(frame),\n \"children\": {},\n \"identifier\": ident,\n }\n state[\"children\"][ident] = d\n\n state[\"count\"] += 1\n\n if child is not None:\n return d\n else:\n d[\"count\"] += 1\n\n\ndef merge(*args):\n \"\"\" Merge multiple frame states together \"\"\"\n if not args:\n return create()\n s = {arg[\"identifier\"] for arg in args}\n if len(s) != 1:\n raise ValueError(\"Expected identifiers, got %s\" % str(s))\n children = defaultdict(list)\n for arg in args:\n for child in arg[\"children\"]:\n children[child].append(arg[\"children\"][child])\n\n children = {k: merge(*v) for k, v in children.items()}\n count = sum(arg[\"count\"] for arg in args)\n return {\n \"description\": args[0][\"description\"],\n \"children\": dict(children),\n \"count\": count,\n \"identifier\": args[0][\"identifier\"],\n }\n\n\ndef create():\n return {\n \"count\": 0,\n \"children\": {},\n \"identifier\": \"root\",\n \"description\": {\"filename\": \"\", \"name\": \"\", \"line_number\": 0, \"line\": \"\"},\n }\n\n\ndef call_stack(frame):\n \"\"\" Create a call text stack from a frame\n\n Returns\n -------\n list of strings\n \"\"\"\n L = []\n while frame:\n L.append(repr_frame(frame))\n frame = frame.f_back\n return L[::-1]\n\n\ndef plot_data(state, profile_interval=0.010):\n \"\"\" Convert a profile state into data useful by Bokeh\n\n See Also\n --------\n plot_figure\n distributed.bokeh.components.ProfilePlot\n \"\"\"\n starts = []\n stops = []\n heights = []\n widths = []\n colors = []\n states = []\n times = []\n\n filenames = []\n lines = []\n line_numbers = []\n names = []\n\n def traverse(state, start, stop, height):\n if not state[\"count\"]:\n return\n starts.append(start)\n stops.append(stop)\n heights.append(height)\n width = stop - start\n widths.append(width)\n states.append(state)\n times.append(format_time(state[\"count\"] * profile_interval))\n\n desc = state[\"description\"]\n filenames.append(desc[\"filename\"])\n lines.append(desc[\"line\"])\n line_numbers.append(desc[\"line_number\"])\n names.append(desc[\"name\"])\n\n ident = state[\"identifier\"]\n\n try:\n fn = desc[\"filename\"]\n except IndexError:\n colors.append(\"gray\")\n else:\n if fn == \"<low-level>\":\n colors.append(\"lightgray\")\n else:\n colors.append(color_of(fn))\n\n delta = (stop - start) / state[\"count\"]\n\n x = start\n\n for name, child in state[\"children\"].items():\n width = child[\"count\"] * delta\n traverse(child, x, x + width, height + 1)\n x += width\n\n traverse(state, 0, 1, 0)\n percentages = [\"{:.1f}%\".format(100 * w) for w in widths]\n return {\n \"left\": starts,\n \"right\": stops,\n \"bottom\": heights,\n \"width\": widths,\n \"top\": [x + 1 for x in heights],\n \"color\": colors,\n \"states\": states,\n \"filename\": filenames,\n \"line\": lines,\n \"line_number\": line_numbers,\n \"name\": names,\n \"time\": times,\n \"percentage\": percentages,\n }\n\n\ndef _watch(thread_id, log, interval=\"20ms\", cycle=\"2s\", omit=None, stop=lambda: False):\n interval = parse_timedelta(interval)\n cycle = parse_timedelta(cycle)\n\n recent = create()\n last = time()\n\n while not stop():\n if time() > last + cycle:\n log.append((time(), recent))\n recent = create()\n last = time()\n try:\n frame = sys._current_frames()[thread_id]\n except KeyError:\n return\n\n process(frame, None, recent, omit=omit)\n sleep(interval)\n\n\ndef watch(\n thread_id=None,\n interval=\"20ms\",\n cycle=\"2s\",\n maxlen=1000,\n omit=None,\n stop=lambda: False,\n):\n \"\"\" Gather profile information on a particular thread\n\n This starts a new thread to watch a particular thread and returns a deque\n that holds periodic profile information.\n\n Parameters\n ----------\n thread_id: int\n interval: str\n Time per sample\n cycle: str\n Time per refreshing to a new profile state\n maxlen: int\n Passed onto deque, maximum number of periods\n omit: str\n Don't include entries that start with this filename\n stop: callable\n Function to call to see if we should stop\n\n Returns\n -------\n deque\n \"\"\"\n if thread_id is None:\n thread_id = threading.get_ident()\n\n log = deque(maxlen=maxlen)\n\n thread = threading.Thread(\n target=_watch,\n name=\"Profile\",\n kwargs={\n \"thread_id\": thread_id,\n \"interval\": interval,\n \"cycle\": cycle,\n \"log\": log,\n \"omit\": omit,\n \"stop\": stop,\n },\n )\n thread.daemon = True\n thread.start()\n\n return log\n\n\ndef get_profile(history, recent=None, start=None, stop=None, key=None):\n \"\"\" Collect profile information from a sequence of profile states\n\n Parameters\n ----------\n history: Sequence[Tuple[time, Dict]]\n A list or deque of profile states\n recent: dict\n The most recent accumulating state\n start: time\n stop: time\n \"\"\"\n now = time()\n if start is None:\n istart = 0\n else:\n istart = bisect.bisect_left(history, (start,))\n\n if stop is None:\n istop = None\n else:\n istop = bisect.bisect_right(history, (stop,)) + 1\n if istop >= len(history):\n istop = None # include end\n\n if istart == 0 and istop is None:\n history = list(history)\n else:\n iistop = len(history) if istop is None else istop\n history = [history[i] for i in range(istart, iistop)]\n\n prof = merge(*toolz.pluck(1, history))\n\n if not history:\n return create()\n\n if recent:\n prof = merge(prof, recent)\n\n return prof\n\n\ndef plot_figure(data, **kwargs):\n \"\"\" Plot profile data using Bokeh\n\n This takes the output from the function ``plot_data`` and produces a Bokeh\n figure\n\n See Also\n --------\n plot_data\n \"\"\"\n from bokeh.plotting import ColumnDataSource, figure\n from bokeh.models import HoverTool\n\n if \"states\" in data:\n data = toolz.dissoc(data, \"states\")\n\n source = ColumnDataSource(data=data)\n\n fig = figure(tools=\"tap,box_zoom,xwheel_zoom,reset\", **kwargs)\n r = fig.quad(\n \"left\",\n \"right\",\n \"top\",\n \"bottom\",\n color=\"color\",\n line_color=\"black\",\n line_width=2,\n source=source,\n )\n\n r.selection_glyph = None\n r.nonselection_glyph = None\n\n hover = HoverTool(\n point_policy=\"follow_mouse\",\n tooltips=\"\"\"\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">Name:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@name</span>\n </div>\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">Filename:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@filename</span>\n </div>\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">Line number:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@line_number</span>\n </div>\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">Line:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@line</span>\n </div>\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">Time:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@time</span>\n </div>\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">Percentage:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@percentage</span>\n </div>\n \"\"\",\n )\n fig.add_tools(hover)\n\n fig.xaxis.visible = False\n fig.yaxis.visible = False\n fig.grid.visible = False\n\n return fig, source\n\n\ndef _remove_py_stack(frames):\n for entry in frames:\n if entry.is_python:\n break\n yield entry\n\n\ndef llprocess(frames, child, state):\n \"\"\" Add counts from low level profile information onto existing state\n\n This uses the ``stacktrace`` module to collect low level stack trace\n information and place it onto the given sttate.\n\n It is configured with the ``distributed.worker.profile.low-level`` config\n entry.\n\n See Also\n --------\n process\n ll_get_stack\n \"\"\"\n if not frames:\n return\n frame = frames.pop()\n if frames:\n state = llprocess(frames, frame, state)\n\n addr = hex(frame.addr - frame.offset)\n ident = \";\".join(map(str, (frame.name, \"<low-level>\", addr)))\n try:\n d = state[\"children\"][ident]\n except KeyError:\n d = {\n \"count\": 0,\n \"description\": {\n \"filename\": \"<low-level>\",\n \"name\": frame.name,\n \"line_number\": 0,\n \"line\": str(frame),\n },\n \"children\": {},\n \"identifier\": ident,\n }\n state[\"children\"][ident] = d\n\n state[\"count\"] += 1\n\n if child is not None:\n return d\n else:\n d[\"count\"] += 1\n\n\ndef ll_get_stack(tid):\n \"\"\" Collect low level stack information from thread id \"\"\"\n from stacktrace import get_thread_stack\n\n frames = get_thread_stack(tid, show_python=False)\n llframes = list(_remove_py_stack(frames))[::-1]\n return llframes\n", "path": "distributed/profile.py" } ]
diff --git a/distributed/profile.py b/distributed/profile.py index 5bf071e20da..1bf81ad6ff0 100644 --- a/distributed/profile.py +++ b/distributed/profile.py @@ -383,7 +383,7 @@ def plot_figure(data, **kwargs): source = ColumnDataSource(data=data) - fig = figure(tools="tap", **kwargs) + fig = figure(tools="tap,box_zoom,xwheel_zoom,reset", **kwargs) r = fig.quad( "left", "right",
dask__dask-5627
Support chunksize parameter for read_parquet with a single file I'd like to be able to read a single parquet file into multiple partitions, determined by the chunksize. Without chunksize ```python import pandas as pd import dask.dataframe as dd ​ df = pd.DataFrame({"a":range(100000), "b":range(100000)}) df.to_parquet("out.parquet") ​ df = dd.read_parquet("out.parquet", gather_statistics=True, split_row_groups=True) df.npartitions 1 ``` With chunksize ```python import pandas as pd import dask.dataframe as dd df = pd.DataFrame({"a":range(100000), "b":range(100000)}) df.to_parquet("out.parquet") df = dd.read_parquet("out.parquet", chunksize="10 MiB", gather_statistics=True, split_row_groups=True) --------------------------------------------------------------------------- KeyError Traceback (most recent call last) <ipython-input-20-25c43bb02cd0> in <module> 7 df = dd.read_parquet("out.parquet", 8 chunksize="10 MiB", ----> 9 gather_statistics=True, split_row_groups=True) /opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/io/parquet/core.py in read_parquet(path, columns, filters, categories, index, storage_options, engine, gather_statistics, split_row_groups, chunksize, **kwargs) 229 # Parse dataset statistics from metadata (if available) 230 parts, divisions, index, index_in_columns = process_statistics( --> 231 parts, statistics, filters, index, chunksize 232 ) 233 /opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/io/parquet/core.py in process_statistics(parts, statistics, filters, index, chunksize) 619 # Aggregate parts/statistics if we are splitting by row-group 620 if chunksize: --> 621 parts, statistics = aggregate_row_groups(parts, statistics, chunksize) 622 623 out = sorted_columns(statistics) /opt/conda/envs/rapids/lib/python3.7/site-packages/dask/dataframe/io/parquet/core.py in aggregate_row_groups(parts, stats, chunksize) 722 723 def aggregate_row_groups(parts, stats, chunksize): --> 724 if not stats[0]["file_path_0"]: 725 return parts, stats 726 KeyError: 'file_path_0' ```
[ { "content": "from distutils.version import LooseVersion\n\nimport toolz\nimport warnings\nfrom ....bytes import core # noqa\nfrom fsspec.core import get_fs_token_paths\nfrom fsspec.utils import stringify_path\n\nfrom ...core import DataFrame, new_dd_object\nfrom ....base import tokenize\nfrom ....utils import import_required, natural_sort_key, parse_bytes\nfrom collections.abc import Mapping\nfrom ...methods import concat\n\n\ntry:\n import snappy\n\n snappy.compress\nexcept (ImportError, AttributeError):\n snappy = None\n\n\n__all__ = (\"read_parquet\", \"to_parquet\")\n\n# ----------------------------------------------------------------------\n# User API\n\n\nclass ParquetSubgraph(Mapping):\n \"\"\"\n Subgraph for reading Parquet files.\n\n Enables optimiziations (see optimize_read_parquet_getitem).\n \"\"\"\n\n def __init__(self, name, engine, fs, meta, columns, index, parts, kwargs):\n self.name = name\n self.engine = engine\n self.fs = fs\n self.meta = meta\n self.columns = columns\n self.index = index\n self.parts = parts\n self.kwargs = kwargs\n\n def __repr__(self):\n return \"ParquetSubgraph<name='{}', n_parts={}>\".format(\n self.name, len(self.parts)\n )\n\n def __getitem__(self, key):\n try:\n name, i = key\n except ValueError:\n # too many / few values to unpack\n raise KeyError(key) from None\n\n if name != self.name:\n raise KeyError(key)\n\n if i < 0 or i >= len(self.parts):\n raise KeyError(key)\n\n part = self.parts[i]\n if not isinstance(part, list):\n part = [part]\n\n return (\n read_parquet_part,\n self.engine.read_partition,\n self.fs,\n self.meta,\n [p[\"piece\"] for p in part],\n self.columns,\n self.index,\n toolz.merge(part[0][\"kwargs\"], self.kwargs or {}),\n )\n\n def __len__(self):\n return len(self.parts)\n\n def __iter__(self):\n for i in range(len(self)):\n yield (self.name, i)\n\n\ndef read_parquet(\n path,\n columns=None,\n filters=None,\n categories=None,\n index=None,\n storage_options=None,\n engine=\"auto\",\n gather_statistics=None,\n split_row_groups=True,\n chunksize=None,\n **kwargs\n):\n \"\"\"\n Read a Parquet file into a Dask DataFrame\n\n This reads a directory of Parquet data into a Dask.dataframe, one file per\n partition. It selects the index among the sorted columns if any exist.\n\n Parameters\n ----------\n path : string or list\n Source directory for data, or path(s) to individual parquet files.\n Prefix with a protocol like ``s3://`` to read from alternative\n filesystems. To read from multiple files you can pass a globstring or a\n list of paths, with the caveat that they must all have the same\n protocol.\n columns : string, list or None (default)\n Field name(s) to read in as columns in the output. By default all\n non-index fields will be read (as determined by the pandas parquet\n metadata, if present). Provide a single field name instead of a list to\n read in the data as a Series.\n filters : list\n List of filters to apply, like ``[('x', '>', 0), ...]``. This\n implements row-group (partition) -level filtering only, i.e., to\n prevent the loading of some chunks of the data, and only if relevant\n statistics have been included in the metadata.\n index : string, list, False or None (default)\n Field name(s) to use as the output frame index. By default will be\n inferred from the pandas parquet file metadata (if present). Use False\n to read all fields as columns.\n categories : list, dict or None\n For any fields listed here, if the parquet encoding is Dictionary,\n the column will be created with dtype category. Use only if it is\n guaranteed that the column is encoded as dictionary in all row-groups.\n If a list, assumes up to 2**16-1 labels; if a dict, specify the number\n of labels expected; if None, will load categories automatically for\n data written by dask/fastparquet, not otherwise.\n storage_options : dict\n Key/value pairs to be passed on to the file-system backend, if any.\n engine : {'auto', 'fastparquet', 'pyarrow'}, default 'auto'\n Parquet reader library to use. If only one library is installed, it\n will use that one; if both, it will use 'fastparquet'\n gather_statistics : bool or None (default).\n Gather the statistics for each dataset partition. By default,\n this will only be done if the _metadata file is available. Otherwise,\n statistics will only be gathered if True, because the footer of\n every file will be parsed (which is very slow on some systems).\n split_row_groups : bool\n If True (default) then output dataframe partitions will correspond\n to parquet-file row-groups (when enough row-group metadata is\n available). Otherwise, partitions correspond to distinct files.\n Only the \"pyarrow\" engine currently supports this argument.\n chunksize : int, str\n The target task partition size. If set, consecutive row-groups\n from the same file will be aggregated into the same output\n partition until the aggregate size reaches this value.\n **kwargs: dict (of dicts)\n Passthrough key-word arguments for read backend.\n The top-level keys correspond to the appropriate operation type, and\n the second level corresponds to the kwargs that will be passed on to\n the underlying `pyarrow` or `fastparquet` function.\n Supported top-level keys: 'dataset' (for opening a `pyarrow` dataset),\n 'file' (for opening a `fastparquet` `ParquetFile`), and 'read' (for the\n backend read function)\n\n Examples\n --------\n >>> df = dd.read_parquet('s3://bucket/my-parquet-data') # doctest: +SKIP\n\n See Also\n --------\n to_parquet\n \"\"\"\n\n if isinstance(columns, str):\n df = read_parquet(\n path,\n [columns],\n filters,\n categories,\n index,\n storage_options,\n engine,\n gather_statistics,\n )\n return df[columns]\n\n if columns is not None:\n columns = list(columns)\n\n name = \"read-parquet-\" + tokenize(\n path,\n columns,\n filters,\n categories,\n index,\n storage_options,\n engine,\n gather_statistics,\n )\n\n if isinstance(engine, str):\n engine = get_engine(engine)\n\n if hasattr(path, \"name\"):\n path = stringify_path(path)\n fs, _, paths = get_fs_token_paths(path, mode=\"rb\", storage_options=storage_options)\n\n paths = sorted(paths, key=natural_sort_key) # numeric rather than glob ordering\n\n auto_index_allowed = False\n if index is None:\n # User is allowing auto-detected index\n auto_index_allowed = True\n if index and isinstance(index, str):\n index = [index]\n\n meta, statistics, parts = engine.read_metadata(\n fs,\n paths,\n categories=categories,\n index=index,\n gather_statistics=gather_statistics,\n filters=filters,\n split_row_groups=split_row_groups,\n **kwargs\n )\n if meta.index.name is not None:\n index = meta.index.name\n\n # Parse dataset statistics from metadata (if available)\n parts, divisions, index, index_in_columns = process_statistics(\n parts, statistics, filters, index, chunksize\n )\n\n # Account for index and columns arguments.\n # Modify `meta` dataframe accordingly\n meta, index, columns = set_index_columns(\n meta, index, columns, index_in_columns, auto_index_allowed\n )\n\n subgraph = ParquetSubgraph(name, engine, fs, meta, columns, index, parts, kwargs)\n\n # Set the index that was previously treated as a column\n if index_in_columns:\n meta = meta.set_index(index)\n\n if len(divisions) < 2:\n # empty dataframe - just use meta\n subgraph = {(name, 0): meta}\n divisions = (None, None)\n\n return new_dd_object(subgraph, name, meta, divisions)\n\n\ndef read_parquet_part(func, fs, meta, part, columns, index, kwargs):\n \"\"\" Read a part of a parquet dataset\n\n This function is used by `read_parquet`.\"\"\"\n if isinstance(part, list):\n dfs = [func(fs, rg, columns.copy(), index, **kwargs) for rg in part]\n df = concat(dfs, axis=0)\n else:\n df = func(fs, part, columns, index, **kwargs)\n\n if meta.columns.name:\n df.columns.name = meta.columns.name\n columns = columns or []\n index = index or []\n return df[[c for c in columns if c not in index]]\n\n\ndef to_parquet(\n df,\n path,\n engine=\"auto\",\n compression=\"default\",\n write_index=True,\n append=False,\n ignore_divisions=False,\n partition_on=None,\n storage_options=None,\n write_metadata_file=True,\n compute=True,\n **kwargs\n):\n \"\"\"Store Dask.dataframe to Parquet files\n\n Notes\n -----\n Each partition will be written to a separate file.\n\n Parameters\n ----------\n df : dask.dataframe.DataFrame\n path : string or pathlib.Path\n Destination directory for data. Prepend with protocol like ``s3://``\n or ``hdfs://`` for remote data.\n engine : {'auto', 'fastparquet', 'pyarrow'}, default 'auto'\n Parquet library to use. If only one library is installed, it will use\n that one; if both, it will use 'fastparquet'.\n compression : string or dict, optional\n Either a string like ``\"snappy\"`` or a dictionary mapping column names\n to compressors like ``{\"name\": \"gzip\", \"values\": \"snappy\"}``. The\n default is ``\"default\"``, which uses the default compression for\n whichever engine is selected.\n write_index : boolean, optional\n Whether or not to write the index. Defaults to True.\n append : bool, optional\n If False (default), construct data-set from scratch. If True, add new\n row-group(s) to an existing data-set. In the latter case, the data-set\n must exist, and the schema must match the input data.\n ignore_divisions : bool, optional\n If False (default) raises error when previous divisions overlap with\n the new appended divisions. Ignored if append=False.\n partition_on : list, optional\n Construct directory-based partitioning by splitting on these fields'\n values. Each dask partition will result in one or more datafiles,\n there will be no global groupby.\n storage_options : dict, optional\n Key/value pairs to be passed on to the file-system backend, if any.\n write_metadata_file : bool, optional\n Whether to write the special \"_metadata\" file.\n compute : bool, optional\n If True (default) then the result is computed immediately. If False\n then a ``dask.delayed`` object is returned for future computation.\n **kwargs :\n Extra options to be passed on to the specific backend.\n\n Examples\n --------\n >>> df = dd.read_csv(...) # doctest: +SKIP\n >>> dd.to_parquet(df, '/path/to/output/',...) # doctest: +SKIP\n\n See Also\n --------\n read_parquet: Read parquet data to dask.dataframe\n \"\"\"\n from dask import delayed\n\n if compression == \"default\":\n if snappy is not None:\n compression = \"snappy\"\n else:\n compression = None\n\n partition_on = partition_on or []\n if isinstance(partition_on, str):\n partition_on = [partition_on]\n\n if set(partition_on) - set(df.columns):\n raise ValueError(\n \"Partitioning on non-existent column. \"\n \"partition_on=%s .\"\n \"columns=%s\" % (str(partition_on), str(list(df.columns)))\n )\n\n if isinstance(engine, str):\n engine = get_engine(engine)\n\n if hasattr(path, \"name\"):\n path = stringify_path(path)\n fs, _, _ = get_fs_token_paths(path, mode=\"wb\", storage_options=storage_options)\n # Trim any protocol information from the path before forwarding\n path = fs._strip_protocol(path)\n\n # Save divisions and corresponding index name. This is necessary,\n # because we may be resetting the index to write the file\n division_info = {\"divisions\": df.divisions, \"name\": df.index.name}\n if division_info[\"name\"] is None:\n # As of 0.24.2, pandas will rename an index with name=None\n # when df.reset_index() is called. The default name is \"index\",\n # (or \"level_0\" if \"index\" is already a column name)\n division_info[\"name\"] = \"index\" if \"index\" not in df.columns else \"level_0\"\n\n # If write_index==True (default), reset the index and record the\n # name of the original index in `index_cols` (will be `index` if None,\n # or `level_0` if `index` is already a column name).\n # `fastparquet` will use `index_cols` to specify the index column(s)\n # in the metadata. `pyarrow` will revert the `reset_index` call\n # below if `index_cols` is populated (because pyarrow will want to handle\n # index preservation itself). For both engines, the column index\n # will be written to \"pandas metadata\" if write_index=True\n index_cols = []\n if write_index:\n real_cols = set(df.columns)\n df = df.reset_index()\n index_cols = [c for c in set(df.columns).difference(real_cols)]\n else:\n # Not writing index - might as well drop it\n df = df.reset_index(drop=True)\n\n _to_parquet_kwargs = {\n \"engine\",\n \"compression\",\n \"write_index\",\n \"append\",\n \"ignore_divisions\",\n \"partition_on\",\n \"storage_options\",\n \"write_metadata_file\",\n \"compute\",\n }\n kwargs_pass = {k: v for k, v in kwargs.items() if k not in _to_parquet_kwargs}\n\n # Engine-specific initialization steps to write the dataset.\n # Possibly create parquet metadata, and load existing stuff if appending\n meta, i_offset = engine.initialize_write(\n df,\n fs,\n path,\n append=append,\n ignore_divisions=ignore_divisions,\n partition_on=partition_on,\n division_info=division_info,\n index_cols=index_cols,\n **kwargs_pass\n )\n\n # Use i_offset and df.npartitions to define file-name list\n filenames = [\"part.%i.parquet\" % (i + i_offset) for i in range(df.npartitions)]\n\n # write parts\n dwrite = delayed(engine.write_partition)\n parts = [\n dwrite(\n d,\n path,\n fs,\n filename,\n partition_on,\n write_metadata_file,\n fmd=meta,\n compression=compression,\n index_cols=index_cols,\n **kwargs_pass\n )\n for d, filename in zip(df.to_delayed(), filenames)\n ]\n\n # single task to complete\n out = delayed(lambda x: None)(parts)\n if write_metadata_file:\n out = delayed(engine.write_metadata)(\n parts, meta, fs, path, append=append, compression=compression\n )\n\n if compute:\n out = out.compute()\n return out\n\n\n_ENGINES = {}\n\n\ndef get_engine(engine):\n \"\"\"Get the parquet engine backend implementation.\n\n Parameters\n ----------\n engine : {'auto', 'fastparquet', 'pyarrow'}, default 'auto'\n Parquet reader library to use. Defaults to fastparquet if both are\n installed\n\n Returns\n -------\n A dict containing a ``'read'`` and ``'write'`` function.\n \"\"\"\n if engine in _ENGINES:\n return _ENGINES[engine]\n\n if engine == \"auto\":\n for eng in [\"fastparquet\", \"pyarrow\"]:\n try:\n return get_engine(eng)\n except RuntimeError:\n pass\n else:\n raise RuntimeError(\"Please install either fastparquet or pyarrow\")\n\n elif engine == \"fastparquet\":\n import_required(\"fastparquet\", \"`fastparquet` not installed\")\n from .fastparquet import FastParquetEngine\n\n _ENGINES[\"fastparquet\"] = eng = FastParquetEngine\n return eng\n\n elif engine == \"pyarrow\" or engine == \"arrow\":\n pa = import_required(\"pyarrow\", \"`pyarrow` not installed\")\n from .arrow import ArrowEngine\n\n if LooseVersion(pa.__version__) < \"0.13.1\":\n raise RuntimeError(\"PyArrow version >= 0.13.1 required\")\n\n _ENGINES[\"pyarrow\"] = eng = ArrowEngine\n return eng\n\n else:\n raise ValueError(\n 'Unsupported engine: \"{0}\".'.format(engine)\n + ' Valid choices include \"pyarrow\" and \"fastparquet\".'\n )\n\n\n#####################\n# Utility Functions #\n#####################\n\n\ndef sorted_columns(statistics):\n \"\"\" Find sorted columns given row-group statistics\n\n This finds all columns that are sorted, along with appropriate divisions\n values for those columns\n\n Returns\n -------\n out: List of {'name': str, 'divisions': List[str]} dictionaries\n \"\"\"\n if not statistics:\n return []\n\n out = []\n for i, c in enumerate(statistics[0][\"columns\"]):\n if not all(\n \"min\" in s[\"columns\"][i] and \"max\" in s[\"columns\"][i] for s in statistics\n ):\n continue\n divisions = [c[\"min\"]]\n max = c[\"max\"]\n success = True\n for stats in statistics[1:]:\n c = stats[\"columns\"][i]\n if c[\"min\"] is None:\n success = False\n break\n if c[\"min\"] >= max:\n divisions.append(c[\"min\"])\n max = c[\"max\"]\n else:\n success = False\n break\n\n if success:\n divisions.append(max)\n assert divisions == sorted(divisions)\n out.append({\"name\": c[\"name\"], \"divisions\": divisions})\n\n return out\n\n\ndef apply_filters(parts, statistics, filters):\n \"\"\" Apply filters onto parts/statistics pairs\n\n Parameters\n ----------\n parts: list\n Tokens corresponding to row groups to read in the future\n statistics: List[dict]\n List of statistics for each part, including min and max values\n filters: List[Tuple[str, str, Any]]\n List like [('x', '>', 5), ('y', '==', 'Alice')]\n\n Returns\n -------\n parts, statistics: the same as the input, but possibly a subset\n \"\"\"\n for column, operator, value in filters:\n out_parts = []\n out_statistics = []\n for part, stats in zip(parts, statistics):\n if \"filter\" in stats and stats[\"filter\"]:\n continue # Filtered by engine\n try:\n c = toolz.groupby(\"name\", stats[\"columns\"])[column][0]\n min = c[\"min\"]\n max = c[\"max\"]\n except KeyError:\n out_parts.append(part)\n out_statistics.append(stats)\n else:\n if (\n operator == \"==\"\n and min <= value <= max\n or operator == \"<\"\n and min < value\n or operator == \"<=\"\n and min <= value\n or operator == \">\"\n and max > value\n or operator == \">=\"\n and max >= value\n ):\n out_parts.append(part)\n out_statistics.append(stats)\n\n parts, statistics = out_parts, out_statistics\n\n return parts, statistics\n\n\ndef process_statistics(parts, statistics, filters, index, chunksize):\n \"\"\"Process row-group column statistics in metadata\n Used in read_parquet.\n \"\"\"\n index_in_columns = False\n if statistics:\n result = list(\n zip(\n *[\n (part, stats)\n for part, stats in zip(parts, statistics)\n if stats[\"num-rows\"] > 0\n ]\n )\n )\n parts, statistics = result or [[], []]\n if filters:\n parts, statistics = apply_filters(parts, statistics, filters)\n\n # Aggregate parts/statistics if we are splitting by row-group\n if chunksize:\n parts, statistics = aggregate_row_groups(parts, statistics, chunksize)\n\n out = sorted_columns(statistics)\n\n if index and isinstance(index, str):\n index = [index]\n if index and out:\n # Only one valid column\n out = [o for o in out if o[\"name\"] in index]\n if index is not False and len(out) == 1:\n # Use only sorted column with statistics as the index\n divisions = out[0][\"divisions\"]\n if index is None:\n index_in_columns = True\n index = [out[0][\"name\"]]\n elif index != [out[0][\"name\"]]:\n raise ValueError(\"Specified index is invalid.\\nindex: {}\".format(index))\n elif index is not False and len(out) > 1:\n if any(o[\"name\"] == \"index\" for o in out):\n # Use sorted column named \"index\" as the index\n [o] = [o for o in out if o[\"name\"] == \"index\"]\n divisions = o[\"divisions\"]\n if index is None:\n index = [o[\"name\"]]\n index_in_columns = True\n elif index != [o[\"name\"]]:\n raise ValueError(\n \"Specified index is invalid.\\nindex: {}\".format(index)\n )\n else:\n # Multiple sorted columns found, cannot autodetect the index\n warnings.warn(\n \"Multiple sorted columns found %s, cannot\\n \"\n \"autodetect index. Will continue without an index.\\n\"\n \"To pick an index column, use the index= keyword; to \\n\"\n \"silence this warning use index=False.\"\n \"\" % [o[\"name\"] for o in out],\n RuntimeWarning,\n )\n index = False\n divisions = [None] * (len(parts) + 1)\n else:\n divisions = [None] * (len(parts) + 1)\n else:\n divisions = [None] * (len(parts) + 1)\n\n return parts, divisions, index, index_in_columns\n\n\ndef set_index_columns(meta, index, columns, index_in_columns, auto_index_allowed):\n \"\"\"Handle index/column arguments, and modify `meta`\n Used in read_parquet.\n \"\"\"\n ignore_index_column_intersection = False\n if columns is None:\n # User didn't specify columns, so ignore any intersection\n # of auto-detected values with the index (if necessary)\n ignore_index_column_intersection = True\n columns = [c for c in meta.columns]\n\n if not set(columns).issubset(set(meta.columns)):\n raise ValueError(\n \"The following columns were not found in the dataset %s\\n\"\n \"The following columns were found %s\"\n % (set(columns) - set(meta.columns), meta.columns)\n )\n\n if index:\n if isinstance(index, str):\n index = [index]\n if isinstance(columns, str):\n columns = [columns]\n\n if ignore_index_column_intersection:\n columns = [col for col in columns if col not in index]\n if set(index).intersection(columns):\n if auto_index_allowed:\n raise ValueError(\n \"Specified index and column arguments must not intersect\"\n \" (set index=False or remove the detected index from columns).\\n\"\n \"index: {} | column: {}\".format(index, columns)\n )\n else:\n raise ValueError(\n \"Specified index and column arguments must not intersect.\\n\"\n \"index: {} | column: {}\".format(index, columns)\n )\n\n # Leaving index as a column in `meta`, because the index\n # will be reset below (in case the index was detected after\n # meta was created)\n if index_in_columns:\n meta = meta[columns + index]\n else:\n meta = meta[columns]\n\n else:\n meta = meta[list(columns)]\n\n return meta, index, columns\n\n\ndef aggregate_row_groups(parts, stats, chunksize):\n if not stats[0][\"file_path_0\"]:\n return parts, stats\n\n parts_agg = []\n stats_agg = []\n chunksize = parse_bytes(chunksize)\n next_part, next_stat = [parts[0].copy()], stats[0].copy()\n for i in range(1, len(parts)):\n stat, part = stats[i], parts[i]\n if (stat[\"file_path_0\"] == next_stat[\"file_path_0\"]) and (\n (next_stat[\"total_byte_size\"] + stat[\"total_byte_size\"]) <= chunksize\n ):\n # Update part list\n next_part.append(part)\n\n # Update Statistics\n next_stat[\"total_byte_size\"] += stat[\"total_byte_size\"]\n next_stat[\"num-rows\"] += stat[\"num-rows\"]\n for col, col_add in zip(next_stat[\"columns\"], stat[\"columns\"]):\n if col[\"name\"] != col_add[\"name\"]:\n raise ValueError(\"Columns are different!!\")\n if \"null_count\" in col:\n col[\"null_count\"] += col_add[\"null_count\"]\n if \"min\" in col:\n col[\"min\"] = min(col[\"min\"], col_add[\"min\"])\n if \"max\" in col:\n col[\"max\"] = max(col[\"max\"], col_add[\"max\"])\n else:\n parts_agg.append(next_part)\n stats_agg.append(next_stat)\n next_part, next_stat = [part.copy()], stat.copy()\n\n parts_agg.append(next_part)\n stats_agg.append(next_stat)\n\n return parts_agg, stats_agg\n\n\nDataFrame.to_parquet.__doc__ = to_parquet.__doc__\n", "path": "dask/dataframe/io/parquet/core.py" } ]
[ { "content": "from distutils.version import LooseVersion\n\nimport toolz\nimport warnings\nfrom ....bytes import core # noqa\nfrom fsspec.core import get_fs_token_paths\nfrom fsspec.utils import stringify_path\n\nfrom ...core import DataFrame, new_dd_object\nfrom ....base import tokenize\nfrom ....utils import import_required, natural_sort_key, parse_bytes\nfrom collections.abc import Mapping\nfrom ...methods import concat\n\n\ntry:\n import snappy\n\n snappy.compress\nexcept (ImportError, AttributeError):\n snappy = None\n\n\n__all__ = (\"read_parquet\", \"to_parquet\")\n\n# ----------------------------------------------------------------------\n# User API\n\n\nclass ParquetSubgraph(Mapping):\n \"\"\"\n Subgraph for reading Parquet files.\n\n Enables optimiziations (see optimize_read_parquet_getitem).\n \"\"\"\n\n def __init__(self, name, engine, fs, meta, columns, index, parts, kwargs):\n self.name = name\n self.engine = engine\n self.fs = fs\n self.meta = meta\n self.columns = columns\n self.index = index\n self.parts = parts\n self.kwargs = kwargs\n\n def __repr__(self):\n return \"ParquetSubgraph<name='{}', n_parts={}>\".format(\n self.name, len(self.parts)\n )\n\n def __getitem__(self, key):\n try:\n name, i = key\n except ValueError:\n # too many / few values to unpack\n raise KeyError(key) from None\n\n if name != self.name:\n raise KeyError(key)\n\n if i < 0 or i >= len(self.parts):\n raise KeyError(key)\n\n part = self.parts[i]\n if not isinstance(part, list):\n part = [part]\n\n return (\n read_parquet_part,\n self.engine.read_partition,\n self.fs,\n self.meta,\n [p[\"piece\"] for p in part],\n self.columns,\n self.index,\n toolz.merge(part[0][\"kwargs\"], self.kwargs or {}),\n )\n\n def __len__(self):\n return len(self.parts)\n\n def __iter__(self):\n for i in range(len(self)):\n yield (self.name, i)\n\n\ndef read_parquet(\n path,\n columns=None,\n filters=None,\n categories=None,\n index=None,\n storage_options=None,\n engine=\"auto\",\n gather_statistics=None,\n split_row_groups=True,\n chunksize=None,\n **kwargs\n):\n \"\"\"\n Read a Parquet file into a Dask DataFrame\n\n This reads a directory of Parquet data into a Dask.dataframe, one file per\n partition. It selects the index among the sorted columns if any exist.\n\n Parameters\n ----------\n path : string or list\n Source directory for data, or path(s) to individual parquet files.\n Prefix with a protocol like ``s3://`` to read from alternative\n filesystems. To read from multiple files you can pass a globstring or a\n list of paths, with the caveat that they must all have the same\n protocol.\n columns : string, list or None (default)\n Field name(s) to read in as columns in the output. By default all\n non-index fields will be read (as determined by the pandas parquet\n metadata, if present). Provide a single field name instead of a list to\n read in the data as a Series.\n filters : list\n List of filters to apply, like ``[('x', '>', 0), ...]``. This\n implements row-group (partition) -level filtering only, i.e., to\n prevent the loading of some chunks of the data, and only if relevant\n statistics have been included in the metadata.\n index : string, list, False or None (default)\n Field name(s) to use as the output frame index. By default will be\n inferred from the pandas parquet file metadata (if present). Use False\n to read all fields as columns.\n categories : list, dict or None\n For any fields listed here, if the parquet encoding is Dictionary,\n the column will be created with dtype category. Use only if it is\n guaranteed that the column is encoded as dictionary in all row-groups.\n If a list, assumes up to 2**16-1 labels; if a dict, specify the number\n of labels expected; if None, will load categories automatically for\n data written by dask/fastparquet, not otherwise.\n storage_options : dict\n Key/value pairs to be passed on to the file-system backend, if any.\n engine : {'auto', 'fastparquet', 'pyarrow'}, default 'auto'\n Parquet reader library to use. If only one library is installed, it\n will use that one; if both, it will use 'fastparquet'\n gather_statistics : bool or None (default).\n Gather the statistics for each dataset partition. By default,\n this will only be done if the _metadata file is available. Otherwise,\n statistics will only be gathered if True, because the footer of\n every file will be parsed (which is very slow on some systems).\n split_row_groups : bool\n If True (default) then output dataframe partitions will correspond\n to parquet-file row-groups (when enough row-group metadata is\n available). Otherwise, partitions correspond to distinct files.\n Only the \"pyarrow\" engine currently supports this argument.\n chunksize : int, str\n The target task partition size. If set, consecutive row-groups\n from the same file will be aggregated into the same output\n partition until the aggregate size reaches this value.\n **kwargs: dict (of dicts)\n Passthrough key-word arguments for read backend.\n The top-level keys correspond to the appropriate operation type, and\n the second level corresponds to the kwargs that will be passed on to\n the underlying `pyarrow` or `fastparquet` function.\n Supported top-level keys: 'dataset' (for opening a `pyarrow` dataset),\n 'file' (for opening a `fastparquet` `ParquetFile`), and 'read' (for the\n backend read function)\n\n Examples\n --------\n >>> df = dd.read_parquet('s3://bucket/my-parquet-data') # doctest: +SKIP\n\n See Also\n --------\n to_parquet\n \"\"\"\n\n if isinstance(columns, str):\n df = read_parquet(\n path,\n [columns],\n filters,\n categories,\n index,\n storage_options,\n engine,\n gather_statistics,\n )\n return df[columns]\n\n if columns is not None:\n columns = list(columns)\n\n name = \"read-parquet-\" + tokenize(\n path,\n columns,\n filters,\n categories,\n index,\n storage_options,\n engine,\n gather_statistics,\n )\n\n if isinstance(engine, str):\n engine = get_engine(engine)\n\n if hasattr(path, \"name\"):\n path = stringify_path(path)\n fs, _, paths = get_fs_token_paths(path, mode=\"rb\", storage_options=storage_options)\n\n paths = sorted(paths, key=natural_sort_key) # numeric rather than glob ordering\n\n auto_index_allowed = False\n if index is None:\n # User is allowing auto-detected index\n auto_index_allowed = True\n if index and isinstance(index, str):\n index = [index]\n\n meta, statistics, parts = engine.read_metadata(\n fs,\n paths,\n categories=categories,\n index=index,\n gather_statistics=gather_statistics,\n filters=filters,\n split_row_groups=split_row_groups,\n **kwargs\n )\n if meta.index.name is not None:\n index = meta.index.name\n\n # Parse dataset statistics from metadata (if available)\n parts, divisions, index, index_in_columns = process_statistics(\n parts, statistics, filters, index, chunksize\n )\n\n # Account for index and columns arguments.\n # Modify `meta` dataframe accordingly\n meta, index, columns = set_index_columns(\n meta, index, columns, index_in_columns, auto_index_allowed\n )\n\n subgraph = ParquetSubgraph(name, engine, fs, meta, columns, index, parts, kwargs)\n\n # Set the index that was previously treated as a column\n if index_in_columns:\n meta = meta.set_index(index)\n\n if len(divisions) < 2:\n # empty dataframe - just use meta\n subgraph = {(name, 0): meta}\n divisions = (None, None)\n\n return new_dd_object(subgraph, name, meta, divisions)\n\n\ndef read_parquet_part(func, fs, meta, part, columns, index, kwargs):\n \"\"\" Read a part of a parquet dataset\n\n This function is used by `read_parquet`.\"\"\"\n if isinstance(part, list):\n dfs = [func(fs, rg, columns.copy(), index, **kwargs) for rg in part]\n df = concat(dfs, axis=0)\n else:\n df = func(fs, part, columns, index, **kwargs)\n\n if meta.columns.name:\n df.columns.name = meta.columns.name\n columns = columns or []\n index = index or []\n return df[[c for c in columns if c not in index]]\n\n\ndef to_parquet(\n df,\n path,\n engine=\"auto\",\n compression=\"default\",\n write_index=True,\n append=False,\n ignore_divisions=False,\n partition_on=None,\n storage_options=None,\n write_metadata_file=True,\n compute=True,\n **kwargs\n):\n \"\"\"Store Dask.dataframe to Parquet files\n\n Notes\n -----\n Each partition will be written to a separate file.\n\n Parameters\n ----------\n df : dask.dataframe.DataFrame\n path : string or pathlib.Path\n Destination directory for data. Prepend with protocol like ``s3://``\n or ``hdfs://`` for remote data.\n engine : {'auto', 'fastparquet', 'pyarrow'}, default 'auto'\n Parquet library to use. If only one library is installed, it will use\n that one; if both, it will use 'fastparquet'.\n compression : string or dict, optional\n Either a string like ``\"snappy\"`` or a dictionary mapping column names\n to compressors like ``{\"name\": \"gzip\", \"values\": \"snappy\"}``. The\n default is ``\"default\"``, which uses the default compression for\n whichever engine is selected.\n write_index : boolean, optional\n Whether or not to write the index. Defaults to True.\n append : bool, optional\n If False (default), construct data-set from scratch. If True, add new\n row-group(s) to an existing data-set. In the latter case, the data-set\n must exist, and the schema must match the input data.\n ignore_divisions : bool, optional\n If False (default) raises error when previous divisions overlap with\n the new appended divisions. Ignored if append=False.\n partition_on : list, optional\n Construct directory-based partitioning by splitting on these fields'\n values. Each dask partition will result in one or more datafiles,\n there will be no global groupby.\n storage_options : dict, optional\n Key/value pairs to be passed on to the file-system backend, if any.\n write_metadata_file : bool, optional\n Whether to write the special \"_metadata\" file.\n compute : bool, optional\n If True (default) then the result is computed immediately. If False\n then a ``dask.delayed`` object is returned for future computation.\n **kwargs :\n Extra options to be passed on to the specific backend.\n\n Examples\n --------\n >>> df = dd.read_csv(...) # doctest: +SKIP\n >>> dd.to_parquet(df, '/path/to/output/',...) # doctest: +SKIP\n\n See Also\n --------\n read_parquet: Read parquet data to dask.dataframe\n \"\"\"\n from dask import delayed\n\n if compression == \"default\":\n if snappy is not None:\n compression = \"snappy\"\n else:\n compression = None\n\n partition_on = partition_on or []\n if isinstance(partition_on, str):\n partition_on = [partition_on]\n\n if set(partition_on) - set(df.columns):\n raise ValueError(\n \"Partitioning on non-existent column. \"\n \"partition_on=%s .\"\n \"columns=%s\" % (str(partition_on), str(list(df.columns)))\n )\n\n if isinstance(engine, str):\n engine = get_engine(engine)\n\n if hasattr(path, \"name\"):\n path = stringify_path(path)\n fs, _, _ = get_fs_token_paths(path, mode=\"wb\", storage_options=storage_options)\n # Trim any protocol information from the path before forwarding\n path = fs._strip_protocol(path)\n\n # Save divisions and corresponding index name. This is necessary,\n # because we may be resetting the index to write the file\n division_info = {\"divisions\": df.divisions, \"name\": df.index.name}\n if division_info[\"name\"] is None:\n # As of 0.24.2, pandas will rename an index with name=None\n # when df.reset_index() is called. The default name is \"index\",\n # (or \"level_0\" if \"index\" is already a column name)\n division_info[\"name\"] = \"index\" if \"index\" not in df.columns else \"level_0\"\n\n # If write_index==True (default), reset the index and record the\n # name of the original index in `index_cols` (will be `index` if None,\n # or `level_0` if `index` is already a column name).\n # `fastparquet` will use `index_cols` to specify the index column(s)\n # in the metadata. `pyarrow` will revert the `reset_index` call\n # below if `index_cols` is populated (because pyarrow will want to handle\n # index preservation itself). For both engines, the column index\n # will be written to \"pandas metadata\" if write_index=True\n index_cols = []\n if write_index:\n real_cols = set(df.columns)\n df = df.reset_index()\n index_cols = [c for c in set(df.columns).difference(real_cols)]\n else:\n # Not writing index - might as well drop it\n df = df.reset_index(drop=True)\n\n _to_parquet_kwargs = {\n \"engine\",\n \"compression\",\n \"write_index\",\n \"append\",\n \"ignore_divisions\",\n \"partition_on\",\n \"storage_options\",\n \"write_metadata_file\",\n \"compute\",\n }\n kwargs_pass = {k: v for k, v in kwargs.items() if k not in _to_parquet_kwargs}\n\n # Engine-specific initialization steps to write the dataset.\n # Possibly create parquet metadata, and load existing stuff if appending\n meta, i_offset = engine.initialize_write(\n df,\n fs,\n path,\n append=append,\n ignore_divisions=ignore_divisions,\n partition_on=partition_on,\n division_info=division_info,\n index_cols=index_cols,\n **kwargs_pass\n )\n\n # Use i_offset and df.npartitions to define file-name list\n filenames = [\"part.%i.parquet\" % (i + i_offset) for i in range(df.npartitions)]\n\n # write parts\n dwrite = delayed(engine.write_partition)\n parts = [\n dwrite(\n d,\n path,\n fs,\n filename,\n partition_on,\n write_metadata_file,\n fmd=meta,\n compression=compression,\n index_cols=index_cols,\n **kwargs_pass\n )\n for d, filename in zip(df.to_delayed(), filenames)\n ]\n\n # single task to complete\n out = delayed(lambda x: None)(parts)\n if write_metadata_file:\n out = delayed(engine.write_metadata)(\n parts, meta, fs, path, append=append, compression=compression\n )\n\n if compute:\n out = out.compute()\n return out\n\n\n_ENGINES = {}\n\n\ndef get_engine(engine):\n \"\"\"Get the parquet engine backend implementation.\n\n Parameters\n ----------\n engine : {'auto', 'fastparquet', 'pyarrow'}, default 'auto'\n Parquet reader library to use. Defaults to fastparquet if both are\n installed\n\n Returns\n -------\n A dict containing a ``'read'`` and ``'write'`` function.\n \"\"\"\n if engine in _ENGINES:\n return _ENGINES[engine]\n\n if engine == \"auto\":\n for eng in [\"fastparquet\", \"pyarrow\"]:\n try:\n return get_engine(eng)\n except RuntimeError:\n pass\n else:\n raise RuntimeError(\"Please install either fastparquet or pyarrow\")\n\n elif engine == \"fastparquet\":\n import_required(\"fastparquet\", \"`fastparquet` not installed\")\n from .fastparquet import FastParquetEngine\n\n _ENGINES[\"fastparquet\"] = eng = FastParquetEngine\n return eng\n\n elif engine == \"pyarrow\" or engine == \"arrow\":\n pa = import_required(\"pyarrow\", \"`pyarrow` not installed\")\n from .arrow import ArrowEngine\n\n if LooseVersion(pa.__version__) < \"0.13.1\":\n raise RuntimeError(\"PyArrow version >= 0.13.1 required\")\n\n _ENGINES[\"pyarrow\"] = eng = ArrowEngine\n return eng\n\n else:\n raise ValueError(\n 'Unsupported engine: \"{0}\".'.format(engine)\n + ' Valid choices include \"pyarrow\" and \"fastparquet\".'\n )\n\n\n#####################\n# Utility Functions #\n#####################\n\n\ndef sorted_columns(statistics):\n \"\"\" Find sorted columns given row-group statistics\n\n This finds all columns that are sorted, along with appropriate divisions\n values for those columns\n\n Returns\n -------\n out: List of {'name': str, 'divisions': List[str]} dictionaries\n \"\"\"\n if not statistics:\n return []\n\n out = []\n for i, c in enumerate(statistics[0][\"columns\"]):\n if not all(\n \"min\" in s[\"columns\"][i] and \"max\" in s[\"columns\"][i] for s in statistics\n ):\n continue\n divisions = [c[\"min\"]]\n max = c[\"max\"]\n success = True\n for stats in statistics[1:]:\n c = stats[\"columns\"][i]\n if c[\"min\"] is None:\n success = False\n break\n if c[\"min\"] >= max:\n divisions.append(c[\"min\"])\n max = c[\"max\"]\n else:\n success = False\n break\n\n if success:\n divisions.append(max)\n assert divisions == sorted(divisions)\n out.append({\"name\": c[\"name\"], \"divisions\": divisions})\n\n return out\n\n\ndef apply_filters(parts, statistics, filters):\n \"\"\" Apply filters onto parts/statistics pairs\n\n Parameters\n ----------\n parts: list\n Tokens corresponding to row groups to read in the future\n statistics: List[dict]\n List of statistics for each part, including min and max values\n filters: List[Tuple[str, str, Any]]\n List like [('x', '>', 5), ('y', '==', 'Alice')]\n\n Returns\n -------\n parts, statistics: the same as the input, but possibly a subset\n \"\"\"\n for column, operator, value in filters:\n out_parts = []\n out_statistics = []\n for part, stats in zip(parts, statistics):\n if \"filter\" in stats and stats[\"filter\"]:\n continue # Filtered by engine\n try:\n c = toolz.groupby(\"name\", stats[\"columns\"])[column][0]\n min = c[\"min\"]\n max = c[\"max\"]\n except KeyError:\n out_parts.append(part)\n out_statistics.append(stats)\n else:\n if (\n operator == \"==\"\n and min <= value <= max\n or operator == \"<\"\n and min < value\n or operator == \"<=\"\n and min <= value\n or operator == \">\"\n and max > value\n or operator == \">=\"\n and max >= value\n ):\n out_parts.append(part)\n out_statistics.append(stats)\n\n parts, statistics = out_parts, out_statistics\n\n return parts, statistics\n\n\ndef process_statistics(parts, statistics, filters, index, chunksize):\n \"\"\"Process row-group column statistics in metadata\n Used in read_parquet.\n \"\"\"\n index_in_columns = False\n if statistics:\n result = list(\n zip(\n *[\n (part, stats)\n for part, stats in zip(parts, statistics)\n if stats[\"num-rows\"] > 0\n ]\n )\n )\n parts, statistics = result or [[], []]\n if filters:\n parts, statistics = apply_filters(parts, statistics, filters)\n\n # Aggregate parts/statistics if we are splitting by row-group\n if chunksize:\n parts, statistics = aggregate_row_groups(parts, statistics, chunksize)\n\n out = sorted_columns(statistics)\n\n if index and isinstance(index, str):\n index = [index]\n if index and out:\n # Only one valid column\n out = [o for o in out if o[\"name\"] in index]\n if index is not False and len(out) == 1:\n # Use only sorted column with statistics as the index\n divisions = out[0][\"divisions\"]\n if index is None:\n index_in_columns = True\n index = [out[0][\"name\"]]\n elif index != [out[0][\"name\"]]:\n raise ValueError(\"Specified index is invalid.\\nindex: {}\".format(index))\n elif index is not False and len(out) > 1:\n if any(o[\"name\"] == \"index\" for o in out):\n # Use sorted column named \"index\" as the index\n [o] = [o for o in out if o[\"name\"] == \"index\"]\n divisions = o[\"divisions\"]\n if index is None:\n index = [o[\"name\"]]\n index_in_columns = True\n elif index != [o[\"name\"]]:\n raise ValueError(\n \"Specified index is invalid.\\nindex: {}\".format(index)\n )\n else:\n # Multiple sorted columns found, cannot autodetect the index\n warnings.warn(\n \"Multiple sorted columns found %s, cannot\\n \"\n \"autodetect index. Will continue without an index.\\n\"\n \"To pick an index column, use the index= keyword; to \\n\"\n \"silence this warning use index=False.\"\n \"\" % [o[\"name\"] for o in out],\n RuntimeWarning,\n )\n index = False\n divisions = [None] * (len(parts) + 1)\n else:\n divisions = [None] * (len(parts) + 1)\n else:\n divisions = [None] * (len(parts) + 1)\n\n return parts, divisions, index, index_in_columns\n\n\ndef set_index_columns(meta, index, columns, index_in_columns, auto_index_allowed):\n \"\"\"Handle index/column arguments, and modify `meta`\n Used in read_parquet.\n \"\"\"\n ignore_index_column_intersection = False\n if columns is None:\n # User didn't specify columns, so ignore any intersection\n # of auto-detected values with the index (if necessary)\n ignore_index_column_intersection = True\n columns = [c for c in meta.columns]\n\n if not set(columns).issubset(set(meta.columns)):\n raise ValueError(\n \"The following columns were not found in the dataset %s\\n\"\n \"The following columns were found %s\"\n % (set(columns) - set(meta.columns), meta.columns)\n )\n\n if index:\n if isinstance(index, str):\n index = [index]\n if isinstance(columns, str):\n columns = [columns]\n\n if ignore_index_column_intersection:\n columns = [col for col in columns if col not in index]\n if set(index).intersection(columns):\n if auto_index_allowed:\n raise ValueError(\n \"Specified index and column arguments must not intersect\"\n \" (set index=False or remove the detected index from columns).\\n\"\n \"index: {} | column: {}\".format(index, columns)\n )\n else:\n raise ValueError(\n \"Specified index and column arguments must not intersect.\\n\"\n \"index: {} | column: {}\".format(index, columns)\n )\n\n # Leaving index as a column in `meta`, because the index\n # will be reset below (in case the index was detected after\n # meta was created)\n if index_in_columns:\n meta = meta[columns + index]\n else:\n meta = meta[columns]\n\n else:\n meta = meta[list(columns)]\n\n return meta, index, columns\n\n\ndef aggregate_row_groups(parts, stats, chunksize):\n if not stats[0].get(\"file_path_0\", None):\n return parts, stats\n\n parts_agg = []\n stats_agg = []\n chunksize = parse_bytes(chunksize)\n next_part, next_stat = [parts[0].copy()], stats[0].copy()\n for i in range(1, len(parts)):\n stat, part = stats[i], parts[i]\n if (stat[\"file_path_0\"] == next_stat[\"file_path_0\"]) and (\n (next_stat[\"total_byte_size\"] + stat[\"total_byte_size\"]) <= chunksize\n ):\n # Update part list\n next_part.append(part)\n\n # Update Statistics\n next_stat[\"total_byte_size\"] += stat[\"total_byte_size\"]\n next_stat[\"num-rows\"] += stat[\"num-rows\"]\n for col, col_add in zip(next_stat[\"columns\"], stat[\"columns\"]):\n if col[\"name\"] != col_add[\"name\"]:\n raise ValueError(\"Columns are different!!\")\n if \"null_count\" in col:\n col[\"null_count\"] += col_add[\"null_count\"]\n if \"min\" in col:\n col[\"min\"] = min(col[\"min\"], col_add[\"min\"])\n if \"max\" in col:\n col[\"max\"] = max(col[\"max\"], col_add[\"max\"])\n else:\n parts_agg.append(next_part)\n stats_agg.append(next_stat)\n next_part, next_stat = [part.copy()], stat.copy()\n\n parts_agg.append(next_part)\n stats_agg.append(next_stat)\n\n return parts_agg, stats_agg\n\n\nDataFrame.to_parquet.__doc__ = to_parquet.__doc__\n", "path": "dask/dataframe/io/parquet/core.py" } ]
diff --git a/dask/dataframe/io/parquet/core.py b/dask/dataframe/io/parquet/core.py index 7cee9105cb3..1032fe30a6a 100644 --- a/dask/dataframe/io/parquet/core.py +++ b/dask/dataframe/io/parquet/core.py @@ -721,7 +721,7 @@ def set_index_columns(meta, index, columns, index_in_columns, auto_index_allowed def aggregate_row_groups(parts, stats, chunksize): - if not stats[0]["file_path_0"]: + if not stats[0].get("file_path_0", None): return parts, stats parts_agg = [] diff --git a/dask/dataframe/io/tests/test_parquet.py b/dask/dataframe/io/tests/test_parquet.py index c43fd66d898..5a7abdbb7a7 100644 --- a/dask/dataframe/io/tests/test_parquet.py +++ b/dask/dataframe/io/tests/test_parquet.py @@ -2170,3 +2170,22 @@ def test_chunksize(tmpdir, chunksize, engine, metadata): remainder = (df_byte_size % parse_bytes(chunksize)) > 0 expected += int(remainder) * nparts assert ddf2.npartitions == max(nparts, expected) + + +@write_read_engines() +def test_roundtrip_pandas_chunksize(tmpdir, write_engine, read_engine): + path = str(tmpdir.join("test.parquet")) + pdf = df.copy() + pdf.index.name = "index" + pdf.to_parquet(path, engine=write_engine) + + ddf_read = dd.read_parquet( + path, + engine=read_engine, + chunksize="10 kiB", + gather_statistics=True, + split_row_groups=True, + index="index", + ) + + assert_eq(pdf, ddf_read)
django-json-api__django-rest-framework-json-api-833
Add DRF 3.12 support See https://www.django-rest-framework.org/community/3.12-announcement/
[ { "content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\n\nneeds_wheel = {'bdist_wheel'}.intersection(sys.argv)\nwheel = ['wheel'] if needs_wheel else []\n\n\ndef read(*paths):\n \"\"\"\n Build a file path from paths and return the contents.\n \"\"\"\n with open(os.path.join(*paths), 'r') as f:\n return f.read()\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\ndef get_package_data(package):\n \"\"\"\n Return all files under the root package, that are not in a\n package themselves.\n \"\"\"\n walk = [(dirpath.replace(package + os.sep, '', 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)\n if not os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename)\n for filename in filenames])\n return {package: filepaths}\n\n\nif sys.argv[-1] == 'publish':\n os.system(\"python setup.py sdist upload\")\n os.system(\"python setup.py bdist_wheel upload\")\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a {0} -m 'version {0}'\".format(\n get_version('rest_framework_json_api')))\n print(\" git push --tags\")\n sys.exit()\n\nsetup(\n name='djangorestframework-jsonapi',\n version=get_version('rest_framework_json_api'),\n url='https://github.com/django-json-api/django-rest-framework-json-api',\n license='BSD',\n description='A Django REST framework API adapter for the JSON API spec.',\n long_description=read('README.rst'),\n author='Jerel Unruh',\n author_email='',\n packages=get_packages('rest_framework_json_api'),\n package_data=get_package_data('rest_framework_json_api'),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=[\n 'inflection>=0.3.0',\n 'djangorestframework>=3.10,<3.12',\n 'django>=2.2,<3.1',\n ],\n extras_require={\n 'django-polymorphic': ['django-polymorphic>=2.0'],\n 'django-filter': ['django-filter>=2.0']\n },\n setup_requires=wheel,\n python_requires=\">=3.5\",\n zip_safe=False,\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\n\nneeds_wheel = {'bdist_wheel'}.intersection(sys.argv)\nwheel = ['wheel'] if needs_wheel else []\n\n\ndef read(*paths):\n \"\"\"\n Build a file path from paths and return the contents.\n \"\"\"\n with open(os.path.join(*paths), 'r') as f:\n return f.read()\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\ndef get_package_data(package):\n \"\"\"\n Return all files under the root package, that are not in a\n package themselves.\n \"\"\"\n walk = [(dirpath.replace(package + os.sep, '', 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)\n if not os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename)\n for filename in filenames])\n return {package: filepaths}\n\n\nif sys.argv[-1] == 'publish':\n os.system(\"python setup.py sdist upload\")\n os.system(\"python setup.py bdist_wheel upload\")\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a {0} -m 'version {0}'\".format(\n get_version('rest_framework_json_api')))\n print(\" git push --tags\")\n sys.exit()\n\nsetup(\n name='djangorestframework-jsonapi',\n version=get_version('rest_framework_json_api'),\n url='https://github.com/django-json-api/django-rest-framework-json-api',\n license='BSD',\n description='A Django REST framework API adapter for the JSON API spec.',\n long_description=read('README.rst'),\n author='Jerel Unruh',\n author_email='',\n packages=get_packages('rest_framework_json_api'),\n package_data=get_package_data('rest_framework_json_api'),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=[\n 'inflection>=0.3.0',\n 'djangorestframework>=3.12,<3.13',\n 'django>=2.2,<3.1',\n ],\n extras_require={\n 'django-polymorphic': ['django-polymorphic>=2.0'],\n 'django-filter': ['django-filter>=2.0']\n },\n setup_requires=wheel,\n python_requires=\">=3.5\",\n zip_safe=False,\n)\n", "path": "setup.py" } ]
diff --git a/.travis.yml b/.travis.yml index ad495df9..65266132 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,40 +20,34 @@ matrix: env: TOXENV=docs - python: 3.5 - env: TOXENV=py35-django22-drf310 - - python: 3.5 - env: TOXENV=py35-django22-drf311 + env: TOXENV=py35-django22-drf312 - python: 3.5 env: TOXENV=py35-django22-drfmaster - python: 3.6 - env: TOXENV=py36-django22-drf310 - - python: 3.6 - env: TOXENV=py36-django22-drf311 + env: TOXENV=py36-django22-drf312 - python: 3.6 env: TOXENV=py36-django22-drfmaster - python: 3.6 - env: TOXENV=py36-django30-drf311 + env: TOXENV=py36-django30-drf312 - python: 3.6 env: TOXENV=py36-django30-drfmaster - python: 3.7 - env: TOXENV=py37-django22-drf310 - - python: 3.7 - env: TOXENV=py37-django22-drf311 + env: TOXENV=py37-django22-drf312 - python: 3.7 env: TOXENV=py37-django22-drfmaster - python: 3.7 - env: TOXENV=py37-django30-drf311 + env: TOXENV=py37-django30-drf312 - python: 3.7 env: TOXENV=py37-django30-drfmaster - python: 3.8 - env: TOXENV=py38-django22-drf311 + env: TOXENV=py38-django22-drf312 - python: 3.8 env: TOXENV=py38-django22-drfmaster - python: 3.8 - env: TOXENV=py38-django30-drf311 + env: TOXENV=py38-django30-drf312 - python: 3.8 env: TOXENV=py38-django30-drfmaster diff --git a/CHANGELOG.md b/CHANGELOG.md index f4b110ee..4b872109 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,10 +14,15 @@ any parts of the framework not mentioned in the documentation should generally b * Removed support for Django 1.11. * Removed support for Django 2.1. +* Removed support for Django REST framework 3.10, 3.11 + +### Added +* Added support for Django REST framework 3.12 + ## [3.2.0] - 2020-08-26 -This is the last release supporting Django 1.11 and Django 2.1. +This is the last release supporting Django 1.11, Django 2.1, DRF 3.10 and DRF 3.11. ### Added diff --git a/README.rst b/README.rst index 07f18a8d..89656f22 100644 --- a/README.rst +++ b/README.rst @@ -89,7 +89,7 @@ Requirements 1. Python (3.5, 3.6, 3.7, 3.8) 2. Django (2.2, 3.0) -3. Django REST Framework (3.10, 3.11) +3. Django REST Framework (3.12) We **highly** recommend and only officially support the latest patch release of each Python, Django and REST Framework series. diff --git a/docs/getting-started.md b/docs/getting-started.md index 39ef6a88..d6c88a3d 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -53,7 +53,7 @@ like the following: 1. Python (3.5, 3.6, 3.7, 3.8) 2. Django (2.2, 3.0) -3. Django REST Framework (3.10, 3.11) +3. Django REST Framework (3.12) We **highly** recommend and only officially support the latest patch release of each Python, Django and REST Framework series. diff --git a/setup.py b/setup.py index 42d7d8c4..19c1fa74 100755 --- a/setup.py +++ b/setup.py @@ -90,7 +90,7 @@ def get_package_data(package): ], install_requires=[ 'inflection>=0.3.0', - 'djangorestframework>=3.10,<3.12', + 'djangorestframework>=3.12,<3.13', 'django>=2.2,<3.1', ], extras_require={ diff --git a/tox.ini b/tox.ini index 58956ee5..e4d1bb15 100644 --- a/tox.ini +++ b/tox.ini @@ -1,16 +1,14 @@ [tox] envlist = - py{35,36,37}-django22-drf{310,311,master}, - py38-django22-drf{311,master}, - py{36,37,38}-django30-drf{311,master}, + py{35,36,37,38}-django22-drf{312,master}, + py{36,37,38}-django30-drf{312,master}, lint,docs [testenv] deps = django22: Django>=2.2,<2.3 django30: Django>=3.0,<3.1 - drf310: djangorestframework>=3.10.2,<3.11 - drf311: djangorestframework>=3.11,<3.12 + drf312: djangorestframework>=3.12,<3.13 drfmaster: https://github.com/encode/django-rest-framework/archive/master.zip -rrequirements/requirements-testing.txt -rrequirements/requirements-optionals.txt
doccano__doccano-363
New user signup page question Hi i'm trying to understand the user structure. I see a few posts about only being able to assign users to specific projects through the django admin screen, but my question is about the 'sign up' page you get offered when you click login, is this totally non functional? That is, is the *only* way to make new users of any kind through the django admin page? Thanks, Z
[ { "content": "from django.shortcuts import render\nfrom .forms import SignupForm\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.utils.encoding import force_bytes\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.template.loader import render_to_string\nfrom .tokens import account_activation_token\nfrom django.core.mail import EmailMessage\nfrom django.views.generic import TemplateView\nfrom django.shortcuts import redirect\n\nfrom app import settings\n\n\nclass SignupView(TemplateView):\n template_name = 'signup.html'\n form_class = SignupForm\n\n def get(self, request, *args, **kwargs):\n form = self.form_class()\n return render(request, self.template_name, {'form': form, 'allow_signup': bool(settings.ALLOW_SIGNUP)})\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST)\n\n # here we make sure that a post request won't trigger a subscription in case allow_signup is False\n if not bool(settings.ALLOW_SIGNUP):\n return redirect('signup')\n\n if not hasattr(settings, \"EMAIL_BACKEND\") and not hasattr(settings, \"EMAIL_HOST\"):\n return render(request, 'email_not_set.html')\n\n if form.is_valid():\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n current_site = get_current_site(request)\n mail_subject = 'Activate your account.'\n message = render_to_string('acc_active_email.html', {\n 'user': user,\n 'domain': current_site.domain,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)).decode(),\n 'token': account_activation_token.make_token(user),\n })\n to_email = form.cleaned_data.get('email')\n email = EmailMessage(\n mail_subject, message, to=[to_email]\n )\n email.send()\n return render(request, 'validate_mail_address_complete.html')\n else:\n return render(request, self.template_name, {'form': form, 'allow_signup': bool(settings.ALLOW_SIGNUP)})\n", "path": "app/authentification/views.py" } ]
[ { "content": "from django.shortcuts import render\nfrom .forms import SignupForm\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.utils.encoding import force_bytes\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.template.loader import render_to_string\nfrom .tokens import account_activation_token\nfrom django.core.mail import EmailMessage\nfrom django.views.generic import TemplateView\nfrom django.shortcuts import redirect\n\nfrom django.conf import settings\n\n\nclass SignupView(TemplateView):\n template_name = 'signup.html'\n form_class = SignupForm\n\n def get(self, request, *args, **kwargs):\n form = self.form_class()\n return render(request, self.template_name, {'form': form, 'allow_signup': bool(settings.ALLOW_SIGNUP)})\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST)\n\n # here we make sure that a post request won't trigger a subscription in case allow_signup is False\n if not bool(settings.ALLOW_SIGNUP):\n return redirect('signup')\n\n if not hasattr(settings, \"EMAIL_BACKEND\") and not hasattr(settings, \"EMAIL_HOST\"):\n return render(request, 'email_not_set.html')\n\n if form.is_valid():\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n current_site = get_current_site(request)\n mail_subject = 'Activate your account.'\n message = render_to_string('acc_active_email.html', {\n 'user': user,\n 'domain': current_site.domain,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)).decode(),\n 'token': account_activation_token.make_token(user),\n })\n to_email = form.cleaned_data.get('email')\n email = EmailMessage(\n mail_subject, message, to=[to_email]\n )\n email.send()\n return render(request, 'validate_mail_address_complete.html')\n else:\n return render(request, self.template_name, {'form': form, 'allow_signup': bool(settings.ALLOW_SIGNUP)})\n", "path": "app/authentification/views.py" } ]
diff --git a/app/authentification/tests/test_template.py b/app/authentification/tests/test_template.py index c20539543d..85a909fbf0 100644 --- a/app/authentification/tests/test_template.py +++ b/app/authentification/tests/test_template.py @@ -1,7 +1,7 @@ from django.test import SimpleTestCase, TestCase, RequestFactory, override_settings from django.http import HttpRequest from ..views import SignupView -from app import settings +from django.conf import settings from api.tests.test_config import setenv @override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage') diff --git a/app/authentification/views.py b/app/authentification/views.py index 718e44f3a0..d57df65078 100644 --- a/app/authentification/views.py +++ b/app/authentification/views.py @@ -9,7 +9,7 @@ from django.views.generic import TemplateView from django.shortcuts import redirect -from app import settings +from django.conf import settings class SignupView(TemplateView):
e2nIEE__pandapower-221
pp.runpp fails with "Generators with different voltage setpoints connected to the same bus", BUT all setpoints are equal in grid model. Hi, in build_gen.py (Line 463) an equality check is made. But due to some conversions made before, this check fails: ``` python values = [1.00999999 1.00999999 1.00999999 1.00999999 1. 1.01 1. ] values_equal = [1.00999999 1.00999999 1.00999999 1.00999999 1. 1.00999999 1. ] ``` Attached is the problematic grid in pickle, using pandapower 1.6.0 develop commit b7136d72ca66a1fcfdcf2460d40c35dac38f02a0 and python 3.7 ``` Traceback (most recent call last): File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\run.py", line 294, in runpp _powerflow(net, **kwargs) File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\powerflow.py", line 66, in _powerflow ppc, ppci = _pd2ppc(net) File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\pd2ppc.py", line 114, in _pd2ppc _check_voltage_setpoints_at_same_bus(ppc) File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\build_gen.py", line 437, in _check_voltage_setpoints_at_same_bus raise UserWarning("Generators with different voltage setpoints connected to the same bus") UserWarning: Generators with different voltage setpoints connected to the same bus ``` BR V3 pp.runpp fails with "Generators with different voltage setpoints connected to the same bus", BUT all setpoints are equal in grid model. Hi, in build_gen.py (Line 463) an equality check is made. But due to some conversions made before, this check fails: ``` python values = [1.00999999 1.00999999 1.00999999 1.00999999 1. 1.01 1. ] values_equal = [1.00999999 1.00999999 1.00999999 1.00999999 1. 1.00999999 1. ] ``` Attached is the problematic grid in pickle, using pandapower 1.6.0 develop commit b7136d72ca66a1fcfdcf2460d40c35dac38f02a0 and python 3.7 ``` Traceback (most recent call last): File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\run.py", line 294, in runpp _powerflow(net, **kwargs) File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\powerflow.py", line 66, in _powerflow ppc, ppci = _pd2ppc(net) File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\pd2ppc.py", line 114, in _pd2ppc _check_voltage_setpoints_at_same_bus(ppc) File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\build_gen.py", line 437, in _check_voltage_setpoints_at_same_bus raise UserWarning("Generators with different voltage setpoints connected to the same bus") UserWarning: Generators with different voltage setpoints connected to the same bus ``` BR V3
[ { "content": "# -*- coding: utf-8 -*-\r\n\r\n# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics\r\n# and Energy System Technology (IEE), Kassel. All rights reserved.\r\n\r\n\r\nimport numpy as np\r\nimport numpy.core.numeric as ncn\r\nfrom numpy import array, zeros, isnan\r\nfrom pandas import DataFrame\r\nfrom pandapower.idx_bus import PV, REF, VA, VM, BUS_TYPE, NONE, VMAX, VMIN, PQ\r\nfrom pandapower.idx_gen import QMIN, QMAX, PMIN, PMAX, GEN_STATUS, GEN_BUS, PG, VG, QG\r\n\r\n\r\ndef _build_gen_ppc(net, ppc):\r\n '''\r\n Takes the empty ppc network and fills it with the gen values. The gen\r\n datatype will be float afterwards.\r\n\r\n **INPUT**:\r\n **net** -The pandapower format network\r\n\r\n **ppc** - The PYPOWER format network to fill in values\r\n '''\r\n\r\n mode = net[\"_options\"][\"mode\"]\r\n\r\n # if mode == power flow or short circuit...\r\n if mode == \"pf\" or mode == \"sc\":\r\n\r\n # get in service elements\r\n _is_elements = net[\"_is_elements\"]\r\n eg_is_mask = _is_elements['ext_grid']\r\n gen_is_mask = _is_elements['gen']\r\n\r\n eg_end = np.sum(eg_is_mask)\r\n gen_end = eg_end + np.sum(gen_is_mask)\r\n xw_end = gen_end + len(net[\"xward\"])\r\n\r\n # define default q limits\r\n q_lim_default = 1e9 # which is 1000 TW - should be enough for distribution grids.\r\n p_lim_default = 1e9\r\n\r\n _init_ppc_gen(ppc, xw_end, 0)\r\n if mode == \"sc\":\r\n return\r\n # add generator / pv data\r\n if gen_end > eg_end:\r\n _build_pp_gen(net, ppc, gen_is_mask, eg_end, gen_end, q_lim_default, p_lim_default)\r\n\r\n _build_pp_ext_grid(net, ppc, eg_is_mask, eg_end)\r\n\r\n # add extended ward pv node data\r\n if xw_end > gen_end:\r\n _build_pp_xward(net, ppc, gen_end, xw_end, q_lim_default)\r\n\r\n # if mode == optimal power flow...\r\n if mode == \"opf\":\r\n\r\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\r\n calculate_voltage_angles = net[\"_options\"][\"calculate_voltage_angles\"]\r\n\r\n if len(net.dcline) > 0:\r\n ppc[\"dcline\"] = net.dcline[[\"loss_kw\", \"loss_percent\"]].values\r\n # get in service elements\r\n _is_elements = net[\"_is_elements\"]\r\n eg_is = net[\"ext_grid\"][_is_elements['ext_grid']]\r\n gen_is = net[\"gen\"][_is_elements['gen']]\r\n sg_is = net.sgen[(net.sgen.in_service & net.sgen.controllable) == True] \\\r\n if \"controllable\" in net.sgen.columns else DataFrame()\r\n l_is = net.load[(net.load.in_service & net.load.controllable) == True] \\\r\n if \"controllable\" in net.load.columns else DataFrame()\r\n stor_is = net.storage[(net.storage.in_service & net.storage.controllable) == True] \\\r\n if \"controllable\" in net.storage.columns else DataFrame()\r\n\r\n _is_elements[\"sgen_controllable\"] = sg_is\r\n _is_elements[\"load_controllable\"] = l_is\r\n _is_elements[\"storage_controllable\"] = stor_is\r\n eg_end = len(eg_is)\r\n gen_end = eg_end + len(gen_is)\r\n sg_end = gen_end + len(sg_is)\r\n l_end = sg_end + len(l_is)\r\n stor_end = l_end + len(stor_is)\r\n\r\n q_lim_default = 1e9 # which is 1000 TW - should be enough for distribution grids.\r\n p_lim_default = 1e9 # changes must be considered in check_opf_data\r\n delta = net[\"_options\"][\"delta\"]\r\n\r\n # initialize generator matrix\r\n ppc[\"gen\"] = zeros(shape=(stor_end, 21), dtype=float)\r\n ppc[\"gen\"][:] = array([0, 0, 0, q_lim_default, -q_lim_default, 1., 1., 1, p_lim_default,\r\n -p_lim_default, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\r\n\r\n # add sgens first so pv bus types won't be overwritten\r\n if sg_end > gen_end:\r\n gen_buses = bus_lookup[sg_is[\"bus\"].values]\r\n\r\n ppc[\"gen\"][gen_end:sg_end, GEN_BUS] = gen_buses\r\n ppc[\"gen\"][gen_end:sg_end, PG] = - sg_is[\"p_kw\"].values * 1e-3 * sg_is[\"scaling\"].values\r\n ppc[\"gen\"][gen_end:sg_end, QG] = sg_is[\"q_kvar\"].values * 1e-3 * sg_is[\"scaling\"].values\r\n\r\n # set bus values for generator buses\r\n ppc[\"bus\"][gen_buses, BUS_TYPE] = PQ\r\n\r\n # set constraints for controllable sgens\r\n if \"min_q_kvar\" in sg_is.columns:\r\n ppc[\"gen\"][gen_end:sg_end, QMAX] = - (sg_is[\"min_q_kvar\"].values * 1e-3 - delta)\r\n max_q_kvar = ppc[\"gen\"][gen_end:sg_end, [QMAX]]\r\n ncn.copyto(max_q_kvar, -q_lim_default, where=isnan(max_q_kvar))\r\n ppc[\"gen\"][gen_end:sg_end, [QMAX]] = max_q_kvar\r\n\r\n if \"max_q_kvar\" in sg_is.columns:\r\n ppc[\"gen\"][gen_end:sg_end, QMIN] = - (sg_is[\"max_q_kvar\"].values * 1e-3 + delta)\r\n min_q_kvar = ppc[\"gen\"][gen_end:sg_end, [QMIN]]\r\n ncn.copyto(min_q_kvar, q_lim_default, where=isnan(min_q_kvar))\r\n ppc[\"gen\"][gen_end:sg_end, [QMIN]] = min_q_kvar\r\n\r\n if \"max_p_kw\" in sg_is.columns:\r\n ppc[\"gen\"][gen_end:sg_end, PMIN] = - (sg_is[\"max_p_kw\"].values * 1e-3 + delta)\r\n max_p_kw = ppc[\"gen\"][gen_end:sg_end, [PMIN]]\r\n ncn.copyto(max_p_kw, -p_lim_default, where=isnan(max_p_kw))\r\n ppc[\"gen\"][gen_end:sg_end, [PMIN]] = max_p_kw\r\n\r\n if \"min_p_kw\" in sg_is.columns:\r\n ppc[\"gen\"][gen_end:sg_end, PMAX] = - (sg_is[\"min_p_kw\"].values * 1e-3 - delta)\r\n min_p_kw = ppc[\"gen\"][gen_end:sg_end, [PMAX]]\r\n ncn.copyto(min_p_kw, p_lim_default, where=isnan(min_p_kw))\r\n ppc[\"gen\"][gen_end:sg_end, [PMAX]] = min_p_kw\r\n\r\n # add controllable loads\r\n if l_end > sg_end:\r\n load_buses = bus_lookup[l_is[\"bus\"].values]\r\n\r\n ppc[\"gen\"][sg_end:l_end, GEN_BUS] = load_buses\r\n ppc[\"gen\"][sg_end:l_end, PG] = - l_is[\"p_kw\"].values * 1e-3 * l_is[\"scaling\"].values\r\n ppc[\"gen\"][sg_end:l_end, QG] = l_is[\"q_kvar\"].values * 1e-3 * l_is[\"scaling\"].values\r\n\r\n # set bus values for controllable loads\r\n ppc[\"bus\"][load_buses, BUS_TYPE] = PQ\r\n\r\n # set constraints for controllable loads\r\n if \"min_q_kvar\" in l_is.columns:\r\n ppc[\"gen\"][sg_end:l_end, QMAX] = - (l_is[\"min_q_kvar\"].values * 1e-3 - delta)\r\n max_q_kvar = ppc[\"gen\"][sg_end:l_end, [QMAX]]\r\n ncn.copyto(max_q_kvar, -q_lim_default, where=isnan(max_q_kvar))\r\n ppc[\"gen\"][sg_end:l_end, [QMAX]] = max_q_kvar\r\n\r\n if \"max_q_kvar\" in l_is.columns:\r\n ppc[\"gen\"][sg_end:l_end, QMIN] = - (l_is[\"max_q_kvar\"].values * 1e-3 + delta)\r\n min_q_kvar = ppc[\"gen\"][sg_end:l_end, [QMIN]]\r\n ncn.copyto(min_q_kvar, q_lim_default, where=isnan(min_q_kvar))\r\n ppc[\"gen\"][sg_end:l_end, [QMIN]] = min_q_kvar\r\n\r\n if \"min_p_kw\" in l_is.columns:\r\n ppc[\"gen\"][sg_end:l_end, PMIN] = - (l_is[\"max_p_kw\"].values * 1e-3 + delta)\r\n max_p_kw = ppc[\"gen\"][sg_end:l_end, [PMIN]]\r\n ncn.copyto(max_p_kw, -p_lim_default, where=isnan(max_p_kw))\r\n ppc[\"gen\"][sg_end:l_end, [PMIN]] = max_p_kw\r\n\r\n if \"max_p_kw\" in l_is.columns:\r\n ppc[\"gen\"][sg_end:l_end, PMAX] = - (l_is[\"min_p_kw\"].values * 1e-3 - delta)\r\n min_p_kw = ppc[\"gen\"][sg_end:l_end, [PMAX]]\r\n ncn.copyto(min_p_kw, p_lim_default, where=isnan(min_p_kw))\r\n ppc[\"gen\"][sg_end:l_end, [PMAX]] = min_p_kw\r\n\r\n # add controllable storages\r\n if stor_end > l_end:\r\n stor_buses = bus_lookup[stor_is[\"bus\"].values]\r\n\r\n ppc[\"gen\"][l_end:stor_end, GEN_BUS] = stor_buses\r\n ppc[\"gen\"][l_end:stor_end, PG] = - stor_is[\"p_kw\"].values * 1e-3 * stor_is[\"scaling\"].values\r\n ppc[\"gen\"][l_end:stor_end, QG] = stor_is[\"q_kvar\"].values * 1e-3 * stor_is[\"scaling\"].values\r\n\r\n # set bus values for generator buses\r\n ppc[\"bus\"][stor_buses, BUS_TYPE] = PQ\r\n\r\n # set constraints for controllable sgens\r\n if \"min_q_kvar\" in stor_is.columns:\r\n ppc[\"gen\"][l_end:stor_end, QMAX] = - (stor_is[\"min_q_kvar\"].values * 1e-3 - delta)\r\n max_q_kvar = ppc[\"gen\"][l_end:stor_end, [QMAX]]\r\n ncn.copyto(max_q_kvar, -q_lim_default, where=isnan(max_q_kvar))\r\n ppc[\"gen\"][l_end:stor_end, [QMIN]] = max_q_kvar\r\n\r\n if \"max_q_kvar\" in stor_is.columns:\r\n ppc[\"gen\"][l_end:stor_end, QMIN] = - (stor_is[\"max_q_kvar\"].values * 1e-3 + delta)\r\n min_q_kvar = ppc[\"gen\"][l_end:stor_end, [QMIN]]\r\n ncn.copyto(min_q_kvar, q_lim_default, where=isnan(min_q_kvar))\r\n ppc[\"gen\"][l_end:stor_end, [QMIN]] = min_q_kvar\r\n\r\n if \"max_p_kw\" in stor_is.columns:\r\n ppc[\"gen\"][l_end:stor_end, PMIN] = - (stor_is[\"max_p_kw\"].values * 1e-3 + delta)\r\n max_p_kw = ppc[\"gen\"][l_end:stor_end, [PMIN]]\r\n ncn.copyto(max_p_kw, -p_lim_default, where=isnan(max_p_kw))\r\n ppc[\"gen\"][l_end:stor_end, [PMIN]] = max_p_kw\r\n\r\n if \"min_p_kw\" in stor_is.columns:\r\n ppc[\"gen\"][l_end:stor_end, PMAX] = - (stor_is[\"min_p_kw\"].values * 1e-3 - delta)\r\n min_p_kw = ppc[\"gen\"][l_end:stor_end, [PMAX]]\r\n ncn.copyto(min_p_kw, p_lim_default, where=isnan(min_p_kw))\r\n ppc[\"gen\"][l_end:stor_end, [PMAX]] = min_p_kw\r\n\r\n # add ext grid / slack data\r\n ppc[\"gen\"][:eg_end, GEN_BUS] = bus_lookup[eg_is[\"bus\"].values]\r\n ppc[\"gen\"][:eg_end, VG] = eg_is[\"vm_pu\"].values\r\n ppc[\"gen\"][:eg_end, GEN_STATUS] = eg_is[\"in_service\"].values\r\n if \"max_p_kw\" in eg_is.columns:\r\n ppc[\"gen\"][:eg_end, PMIN] = - (eg_is[\"max_p_kw\"].values * 1e-3 - delta)\r\n max_p_kw = ppc[\"gen\"][:eg_end, [PMIN]]\r\n ncn.copyto(max_p_kw, -p_lim_default, where=isnan(max_p_kw))\r\n ppc[\"gen\"][:eg_end, [PMIN]] = max_p_kw\r\n\r\n if \"min_p_kw\" in eg_is.columns:\r\n ppc[\"gen\"][:eg_end, PMAX] = - (eg_is[\"min_p_kw\"].values * 1e-3 + delta)\r\n min_p_kw = ppc[\"gen\"][:eg_end, [PMAX]]\r\n ncn.copyto(min_p_kw, p_lim_default, where=isnan(min_p_kw))\r\n ppc[\"gen\"][:eg_end, [PMAX]] = min_p_kw\r\n\r\n if \"min_q_kvar\" in eg_is.columns:\r\n ppc[\"gen\"][:eg_end, QMAX] = - (eg_is[\"min_q_kvar\"].values * 1e-3 - delta)\r\n max_q_kvar = ppc[\"gen\"][:eg_end, [QMAX]]\r\n ncn.copyto(max_q_kvar, -q_lim_default, where=isnan(max_q_kvar))\r\n ppc[\"gen\"][:eg_end, [QMAX]] = max_q_kvar\r\n\r\n if \"max_q_kvar\" in eg_is.columns:\r\n ppc[\"gen\"][:eg_end, QMIN] = - (eg_is[\"max_q_kvar\"].values * 1e-3 + delta)\r\n min_q_kvar = ppc[\"gen\"][:eg_end, [QMIN]]\r\n ncn.copyto(min_q_kvar, q_lim_default, where=isnan(min_q_kvar))\r\n ppc[\"gen\"][:eg_end, [QMIN]] = min_q_kvar\r\n\r\n # set bus values for external grid buses\r\n eg_buses = bus_lookup[eg_is[\"bus\"].values]\r\n if calculate_voltage_angles:\r\n ppc[\"bus\"][eg_buses, VA] = eg_is[\"va_degree\"].values\r\n ppc[\"bus\"][eg_buses, BUS_TYPE] = REF\r\n ppc[\"bus\"][eg_buses, VM] = eg_is[\"vm_pu\"].values\r\n\r\n # REF busses don't have flexible voltages by definition:\r\n ppc[\"bus\"][eg_buses, VMAX] = ppc[\"bus\"][ppc[\"bus\"][:, BUS_TYPE] == REF, VM]\r\n ppc[\"bus\"][eg_buses, VMIN] = ppc[\"bus\"][ppc[\"bus\"][:, BUS_TYPE] == REF, VM]\r\n\r\n # add generator / pv data\r\n if gen_end > eg_end:\r\n ppc[\"gen\"][eg_end:gen_end, GEN_BUS] = bus_lookup[gen_is[\"bus\"].values]\r\n ppc[\"gen\"][eg_end:gen_end, PG] = - gen_is[\"p_kw\"].values * 1e-3 * gen_is[\"scaling\"].values\r\n ppc[\"gen\"][eg_end:gen_end, VG] = gen_is[\"vm_pu\"].values\r\n\r\n # set bus values for generator buses\r\n gen_buses = bus_lookup[gen_is[\"bus\"].values]\r\n ppc[\"bus\"][gen_buses, BUS_TYPE] = PV\r\n ppc[\"bus\"][gen_buses, VM] = gen_is[\"vm_pu\"].values\r\n\r\n # set constraints for PV generators\r\n _copy_q_limits_to_ppc(net, ppc, eg_end, gen_end, _is_elements['gen'])\r\n _copy_p_limits_to_ppc(net, ppc, eg_end, gen_end, _is_elements['gen'])\r\n\r\n _replace_nans_with_default_q_limits_in_ppc(ppc, eg_end, gen_end, q_lim_default)\r\n _replace_nans_with_default_p_limits_in_ppc(ppc, eg_end, gen_end, p_lim_default)\r\n\r\n\r\ndef _init_ppc_gen(ppc, xw_end, q_lim_default):\r\n # initialize generator matrix\r\n ppc[\"gen\"] = np.zeros(shape=(xw_end, 21), dtype=float)\r\n ppc[\"gen\"][:] = np.array([0, 0, 0, q_lim_default, -q_lim_default, 1.,\r\n 1., 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\r\n\r\n\r\ndef _build_pp_ext_grid(net, ppc, eg_is_mask, eg_end):\r\n calculate_voltage_angles = net[\"_options\"][\"calculate_voltage_angles\"]\r\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\r\n # add ext grid / slack data\r\n eg_buses = bus_lookup[net[\"ext_grid\"][\"bus\"].values[eg_is_mask]]\r\n ppc[\"gen\"][:eg_end, GEN_BUS] = eg_buses\r\n ppc[\"gen\"][:eg_end, VG] = net[\"ext_grid\"][\"vm_pu\"].values[eg_is_mask]\r\n ppc[\"gen\"][:eg_end, GEN_STATUS] = True\r\n\r\n # set bus values for external grid buses\r\n if calculate_voltage_angles:\r\n ppc[\"bus\"][eg_buses, VA] = net[\"ext_grid\"][\"va_degree\"].values[eg_is_mask]\r\n ppc[\"bus\"][eg_buses, BUS_TYPE] = REF\r\n # _build_gen_lookups(net, \"ext_grid\", 0, eg_end)\r\n\r\n\r\ndef _build_pp_gen(net, ppc, gen_is_mask, eg_end, gen_end, q_lim_default, p_lim_default):\r\n\r\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\r\n copy_constraints_to_ppc = net[\"_options\"][\"copy_constraints_to_ppc\"]\r\n\r\n gen_buses = bus_lookup[net[\"gen\"][\"bus\"].values[gen_is_mask]]\r\n gen_is_vm = net[\"gen\"][\"vm_pu\"].values[gen_is_mask]\r\n ppc[\"gen\"][eg_end:gen_end, GEN_BUS] = gen_buses\r\n ppc[\"gen\"][eg_end:gen_end, PG] = - (net[\"gen\"][\"p_kw\"].values[gen_is_mask] * 1e-3 *\r\n net[\"gen\"][\"scaling\"].values[gen_is_mask])\r\n ppc[\"gen\"][eg_end:gen_end, VG] = gen_is_vm\r\n\r\n # set bus values for generator buses\r\n\r\n ppc[\"bus\"][gen_buses, BUS_TYPE] = PV\r\n ppc[\"bus\"][gen_buses, VM] = gen_is_vm\r\n\r\n _copy_q_limits_to_ppc(net, ppc, eg_end, gen_end, gen_is_mask)\r\n _replace_nans_with_default_q_limits_in_ppc(ppc, eg_end, gen_end, q_lim_default)\r\n\r\n if copy_constraints_to_ppc:\r\n _copy_p_limits_to_ppc(net, ppc, eg_end, gen_end, gen_is_mask)\r\n _replace_nans_with_default_p_limits_in_ppc(ppc, eg_end, gen_end, p_lim_default)\r\n\r\n # _build_gen_lookups(net, \"gen\", eg_end, gen_end)\r\n\r\n\r\ndef _build_pp_xward(net, ppc, gen_end, xw_end, q_lim_default, update_lookup=True):\r\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\r\n xw = net[\"xward\"]\r\n xw_is = net[\"_is_elements\"]['xward']\r\n if update_lookup:\r\n ppc[\"gen\"][gen_end:xw_end, GEN_BUS] = bus_lookup[xw[\"ad_bus\"].values]\r\n ppc[\"gen\"][gen_end:xw_end, VG] = xw[\"vm_pu\"].values\r\n ppc[\"gen\"][gen_end:xw_end, GEN_STATUS] = xw_is\r\n ppc[\"gen\"][gen_end:xw_end, QMIN] = -q_lim_default\r\n ppc[\"gen\"][gen_end:xw_end, QMAX] = q_lim_default\r\n\r\n xward_buses = bus_lookup[net[\"xward\"][\"ad_bus\"].values]\r\n ppc[\"bus\"][xward_buses[xw_is], BUS_TYPE] = PV\r\n ppc[\"bus\"][xward_buses[~xw_is], BUS_TYPE] = NONE\r\n ppc[\"bus\"][xward_buses, VM] = net[\"xward\"][\"vm_pu\"].values\r\n\r\n\r\n\r\n\r\ndef _update_gen_ppc(net, ppc):\r\n '''\r\n Takes the ppc network and updates the gen values from the values in net.\r\n\r\n **INPUT**:\r\n **net** -The pandapower format network\r\n\r\n **ppc** - The PYPOWER format network to fill in values\r\n '''\r\n # get options from net\r\n calculate_voltage_angles = net[\"_options\"][\"calculate_voltage_angles\"]\r\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\r\n # get in service elements\r\n _is_elements = net[\"_is_elements\"]\r\n gen_is_mask = _is_elements['gen']\r\n # TODO maybe speed up things here, too\r\n eg_is = net[\"ext_grid\"][_is_elements['ext_grid']]\r\n gen_is = net[\"gen\"][_is_elements['gen']]\r\n\r\n eg_end = len(eg_is)\r\n gen_end = eg_end + len(gen_is)\r\n xw_end = gen_end + len(net[\"xward\"])\r\n\r\n q_lim_default = 1e9 # which is 1000 TW - should be enough for distribution grids.\r\n\r\n # add ext grid / slack data\r\n ext_grid_lookup = net[\"_pd2ppc_lookups\"][\"ext_grid\"]\r\n ext_grid_idx_ppc = ext_grid_lookup[eg_is.index]\r\n ppc[\"gen\"][ext_grid_idx_ppc, VG] = eg_is[\"vm_pu\"].values\r\n ppc[\"gen\"][ext_grid_idx_ppc, GEN_STATUS] = eg_is[\"in_service\"].values\r\n\r\n # set bus values for external grid buses\r\n if calculate_voltage_angles:\r\n # eg_buses = bus_lookup[eg_is[\"bus\"].values]\r\n ppc[\"bus\"][ext_grid_idx_ppc, VA] = eg_is[\"va_degree\"].values\r\n\r\n # add generator / pv data\r\n if gen_end > eg_end:\r\n gen_lookup = net[\"_pd2ppc_lookups\"][\"gen\"]\r\n gen_idx_ppc = gen_lookup[gen_is.index]\r\n ppc[\"gen\"][gen_idx_ppc, PG] = - gen_is[\"p_kw\"].values * 1e-3 * gen_is[\"scaling\"].values\r\n ppc[\"gen\"][gen_idx_ppc, VG] = gen_is[\"vm_pu\"].values\r\n\r\n # set bus values for generator buses\r\n gen_buses = bus_lookup[gen_is[\"bus\"].values]\r\n ppc[\"bus\"][gen_buses, VM] = gen_is[\"vm_pu\"].values\r\n\r\n _copy_q_limits_to_ppc(net, ppc, eg_end, gen_end, gen_is_mask)\r\n _replace_nans_with_default_q_limits_in_ppc(ppc, eg_end, gen_end, q_lim_default)\r\n\r\n # add extended ward pv node data\r\n if xw_end > gen_end:\r\n # ToDo: this must be tested in combination with recycle. Maybe the placement of the updated value in ppc[\"gen\"]\r\n # ToDo: is wrong. -> I'll better raise en error\r\n raise NotImplementedError(\"xwards in combination with recycle is not properly implemented\")\r\n # _build_pp_xward(net, ppc, gen_end, xw_end, q_lim_default,\r\n # update_lookup=False)\r\n\r\n\r\ndef _copy_q_limits_to_ppc(net, ppc, eg_end, gen_end, gen_is_mask):\r\n # Note: Pypower has generator reference system, pandapower uses load reference\r\n # system (max <-> min)\r\n\r\n delta = net[\"_options\"][\"delta\"]\r\n\r\n if \"max_q_kvar\" in net[\"gen\"].columns:\r\n ppc[\"gen\"][eg_end:gen_end, QMIN] = -net[\"gen\"][\"max_q_kvar\"].values[gen_is_mask] * 1e-3 - delta\r\n if \"min_q_kvar\" in net[\"gen\"].columns:\r\n ppc[\"gen\"][eg_end:gen_end, QMAX] = -net[\"gen\"][\"min_q_kvar\"].values[gen_is_mask] * 1e-3 + delta\r\n\r\n\r\ndef _copy_p_limits_to_ppc(net, ppc, eg_end, gen_end, gen_is_mask):\r\n delta = net[\"_options\"][\"delta\"]\r\n\r\n if \"max_p_kw\" in net[\"gen\"].columns:\r\n ppc[\"gen\"][eg_end:gen_end, PMIN] = -net[\"gen\"][\"max_p_kw\"].values[gen_is_mask] * 1e-3 + delta\r\n if \"min_p_kw\" in net[\"gen\"].columns:\r\n ppc[\"gen\"][eg_end:gen_end, PMAX] = -net[\"gen\"][\"min_p_kw\"].values[gen_is_mask] * 1e-3 - delta\r\n\r\n\r\ndef _replace_nans_with_default_q_limits_in_ppc(ppc, eg_end, gen_end, q_lim_default):\r\n # Note: Pypower has generator reference system, pandapower uses load reference system (max <-> min)\r\n max_q_kvar = ppc[\"gen\"][eg_end:gen_end, [QMIN]]\r\n ncn.copyto(max_q_kvar, -q_lim_default, where=np.isnan(max_q_kvar))\r\n ppc[\"gen\"][eg_end:gen_end, [QMIN]] = max_q_kvar\r\n\r\n min_q_kvar = ppc[\"gen\"][eg_end:gen_end, [QMAX]]\r\n ncn.copyto(min_q_kvar, q_lim_default, where=np.isnan(min_q_kvar))\r\n ppc[\"gen\"][eg_end:gen_end, [QMAX]] = min_q_kvar\r\n\r\n\r\ndef _replace_nans_with_default_p_limits_in_ppc(ppc, eg_end, gen_end, p_lim_default):\r\n # Note: Pypower has generator reference system, pandapower uses load reference system (max <-> min)\r\n max_p_kw = ppc[\"gen\"][eg_end:gen_end, [PMIN]]\r\n ncn.copyto(max_p_kw, -p_lim_default, where=isnan(max_p_kw))\r\n ppc[\"gen\"][eg_end:gen_end, [PMIN]] = max_p_kw\r\n\r\n min_p_kw = ppc[\"gen\"][eg_end:gen_end, [PMAX]]\r\n ncn.copyto(min_p_kw, p_lim_default, where=isnan(min_p_kw))\r\n ppc[\"gen\"][eg_end:gen_end, [PMAX]] = min_p_kw\r\n\r\n\r\ndef _check_voltage_setpoints_at_same_bus(ppc):\r\n # generator buses:\r\n gen_bus = ppc['gen'][:, GEN_BUS].astype(int)\r\n # generator setpoints:\r\n gen_vm = ppc['gen'][:, VG]\r\n if _different_values_at_one_bus(gen_bus, gen_vm):\r\n raise UserWarning(\"Generators with different voltage setpoints connected to the same bus\")\r\n\r\ndef _check_voltage_angles_at_same_bus(net, ppc):\r\n gen_va = net.ext_grid.va_degree[net._is_elements[\"ext_grid\"]].values\r\n eg_gens = net._pd2ppc_lookups[\"ext_grid\"][net.ext_grid.index[net._is_elements[\"ext_grid\"]]]\r\n gen_bus = ppc[\"gen\"][eg_gens, GEN_BUS].astype(int)\r\n if _different_values_at_one_bus(gen_bus, gen_va):\r\n raise UserWarning(\"Ext grids with different voltage angle setpoints connected to the same bus\")\r\n\r\n\r\ndef _different_values_at_one_bus(buses, values):\r\n \"\"\"\r\n checks if there are different values in any of the\r\n\r\n \"\"\"\r\n # buses with one or more generators and their index\r\n unique_bus, index_first_bus = np.unique(buses, return_index=True)\r\n\r\n # voltage setpoint lookup with the voltage of the first occurence of that bus\r\n first_values = -np.ones(buses.max() + 1)\r\n first_values[unique_bus] = values[index_first_bus]\r\n\r\n # generate voltage setpoints where all generators at the same bus\r\n # have the voltage of the first generator at that bus\r\n values_equal = first_values[buses]\r\n\r\n return not np.array_equal(values, values_equal)\r\n", "path": "pandapower/build_gen.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\r\n\r\n# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics\r\n# and Energy System Technology (IEE), Kassel. All rights reserved.\r\n\r\n\r\nimport numpy as np\r\nimport numpy.core.numeric as ncn\r\nfrom numpy import array, zeros, isnan\r\nfrom pandas import DataFrame\r\nfrom pandapower.idx_bus import PV, REF, VA, VM, BUS_TYPE, NONE, VMAX, VMIN, PQ\r\nfrom pandapower.idx_gen import QMIN, QMAX, PMIN, PMAX, GEN_STATUS, GEN_BUS, PG, VG, QG\r\n\r\n\r\ndef _build_gen_ppc(net, ppc):\r\n '''\r\n Takes the empty ppc network and fills it with the gen values. The gen\r\n datatype will be float afterwards.\r\n\r\n **INPUT**:\r\n **net** -The pandapower format network\r\n\r\n **ppc** - The PYPOWER format network to fill in values\r\n '''\r\n\r\n mode = net[\"_options\"][\"mode\"]\r\n\r\n # if mode == power flow or short circuit...\r\n if mode == \"pf\" or mode == \"sc\":\r\n\r\n # get in service elements\r\n _is_elements = net[\"_is_elements\"]\r\n eg_is_mask = _is_elements['ext_grid']\r\n gen_is_mask = _is_elements['gen']\r\n\r\n eg_end = np.sum(eg_is_mask)\r\n gen_end = eg_end + np.sum(gen_is_mask)\r\n xw_end = gen_end + len(net[\"xward\"])\r\n\r\n # define default q limits\r\n q_lim_default = 1e9 # which is 1000 TW - should be enough for distribution grids.\r\n p_lim_default = 1e9\r\n\r\n _init_ppc_gen(ppc, xw_end, 0)\r\n if mode == \"sc\":\r\n return\r\n # add generator / pv data\r\n if gen_end > eg_end:\r\n _build_pp_gen(net, ppc, gen_is_mask, eg_end, gen_end, q_lim_default, p_lim_default)\r\n\r\n _build_pp_ext_grid(net, ppc, eg_is_mask, eg_end)\r\n\r\n # add extended ward pv node data\r\n if xw_end > gen_end:\r\n _build_pp_xward(net, ppc, gen_end, xw_end, q_lim_default)\r\n\r\n # if mode == optimal power flow...\r\n if mode == \"opf\":\r\n\r\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\r\n calculate_voltage_angles = net[\"_options\"][\"calculate_voltage_angles\"]\r\n\r\n if len(net.dcline) > 0:\r\n ppc[\"dcline\"] = net.dcline[[\"loss_kw\", \"loss_percent\"]].values\r\n # get in service elements\r\n _is_elements = net[\"_is_elements\"]\r\n eg_is = net[\"ext_grid\"][_is_elements['ext_grid']]\r\n gen_is = net[\"gen\"][_is_elements['gen']]\r\n sg_is = net.sgen[(net.sgen.in_service & net.sgen.controllable) == True] \\\r\n if \"controllable\" in net.sgen.columns else DataFrame()\r\n l_is = net.load[(net.load.in_service & net.load.controllable) == True] \\\r\n if \"controllable\" in net.load.columns else DataFrame()\r\n stor_is = net.storage[(net.storage.in_service & net.storage.controllable) == True] \\\r\n if \"controllable\" in net.storage.columns else DataFrame()\r\n\r\n _is_elements[\"sgen_controllable\"] = sg_is\r\n _is_elements[\"load_controllable\"] = l_is\r\n _is_elements[\"storage_controllable\"] = stor_is\r\n eg_end = len(eg_is)\r\n gen_end = eg_end + len(gen_is)\r\n sg_end = gen_end + len(sg_is)\r\n l_end = sg_end + len(l_is)\r\n stor_end = l_end + len(stor_is)\r\n\r\n q_lim_default = 1e9 # which is 1000 TW - should be enough for distribution grids.\r\n p_lim_default = 1e9 # changes must be considered in check_opf_data\r\n delta = net[\"_options\"][\"delta\"]\r\n\r\n # initialize generator matrix\r\n ppc[\"gen\"] = zeros(shape=(stor_end, 21), dtype=float)\r\n ppc[\"gen\"][:] = array([0, 0, 0, q_lim_default, -q_lim_default, 1., 1., 1, p_lim_default,\r\n -p_lim_default, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\r\n\r\n # add sgens first so pv bus types won't be overwritten\r\n if sg_end > gen_end:\r\n gen_buses = bus_lookup[sg_is[\"bus\"].values]\r\n\r\n ppc[\"gen\"][gen_end:sg_end, GEN_BUS] = gen_buses\r\n ppc[\"gen\"][gen_end:sg_end, PG] = - sg_is[\"p_kw\"].values * 1e-3 * sg_is[\"scaling\"].values\r\n ppc[\"gen\"][gen_end:sg_end, QG] = sg_is[\"q_kvar\"].values * 1e-3 * sg_is[\"scaling\"].values\r\n\r\n # set bus values for generator buses\r\n ppc[\"bus\"][gen_buses, BUS_TYPE] = PQ\r\n\r\n # set constraints for controllable sgens\r\n if \"min_q_kvar\" in sg_is.columns:\r\n ppc[\"gen\"][gen_end:sg_end, QMAX] = - (sg_is[\"min_q_kvar\"].values * 1e-3 - delta)\r\n max_q_kvar = ppc[\"gen\"][gen_end:sg_end, [QMAX]]\r\n ncn.copyto(max_q_kvar, -q_lim_default, where=isnan(max_q_kvar))\r\n ppc[\"gen\"][gen_end:sg_end, [QMAX]] = max_q_kvar\r\n\r\n if \"max_q_kvar\" in sg_is.columns:\r\n ppc[\"gen\"][gen_end:sg_end, QMIN] = - (sg_is[\"max_q_kvar\"].values * 1e-3 + delta)\r\n min_q_kvar = ppc[\"gen\"][gen_end:sg_end, [QMIN]]\r\n ncn.copyto(min_q_kvar, q_lim_default, where=isnan(min_q_kvar))\r\n ppc[\"gen\"][gen_end:sg_end, [QMIN]] = min_q_kvar\r\n\r\n if \"max_p_kw\" in sg_is.columns:\r\n ppc[\"gen\"][gen_end:sg_end, PMIN] = - (sg_is[\"max_p_kw\"].values * 1e-3 + delta)\r\n max_p_kw = ppc[\"gen\"][gen_end:sg_end, [PMIN]]\r\n ncn.copyto(max_p_kw, -p_lim_default, where=isnan(max_p_kw))\r\n ppc[\"gen\"][gen_end:sg_end, [PMIN]] = max_p_kw\r\n\r\n if \"min_p_kw\" in sg_is.columns:\r\n ppc[\"gen\"][gen_end:sg_end, PMAX] = - (sg_is[\"min_p_kw\"].values * 1e-3 - delta)\r\n min_p_kw = ppc[\"gen\"][gen_end:sg_end, [PMAX]]\r\n ncn.copyto(min_p_kw, p_lim_default, where=isnan(min_p_kw))\r\n ppc[\"gen\"][gen_end:sg_end, [PMAX]] = min_p_kw\r\n\r\n # add controllable loads\r\n if l_end > sg_end:\r\n load_buses = bus_lookup[l_is[\"bus\"].values]\r\n\r\n ppc[\"gen\"][sg_end:l_end, GEN_BUS] = load_buses\r\n ppc[\"gen\"][sg_end:l_end, PG] = - l_is[\"p_kw\"].values * 1e-3 * l_is[\"scaling\"].values\r\n ppc[\"gen\"][sg_end:l_end, QG] = l_is[\"q_kvar\"].values * 1e-3 * l_is[\"scaling\"].values\r\n\r\n # set bus values for controllable loads\r\n ppc[\"bus\"][load_buses, BUS_TYPE] = PQ\r\n\r\n # set constraints for controllable loads\r\n if \"min_q_kvar\" in l_is.columns:\r\n ppc[\"gen\"][sg_end:l_end, QMAX] = - (l_is[\"min_q_kvar\"].values * 1e-3 - delta)\r\n max_q_kvar = ppc[\"gen\"][sg_end:l_end, [QMAX]]\r\n ncn.copyto(max_q_kvar, -q_lim_default, where=isnan(max_q_kvar))\r\n ppc[\"gen\"][sg_end:l_end, [QMAX]] = max_q_kvar\r\n\r\n if \"max_q_kvar\" in l_is.columns:\r\n ppc[\"gen\"][sg_end:l_end, QMIN] = - (l_is[\"max_q_kvar\"].values * 1e-3 + delta)\r\n min_q_kvar = ppc[\"gen\"][sg_end:l_end, [QMIN]]\r\n ncn.copyto(min_q_kvar, q_lim_default, where=isnan(min_q_kvar))\r\n ppc[\"gen\"][sg_end:l_end, [QMIN]] = min_q_kvar\r\n\r\n if \"min_p_kw\" in l_is.columns:\r\n ppc[\"gen\"][sg_end:l_end, PMIN] = - (l_is[\"max_p_kw\"].values * 1e-3 + delta)\r\n max_p_kw = ppc[\"gen\"][sg_end:l_end, [PMIN]]\r\n ncn.copyto(max_p_kw, -p_lim_default, where=isnan(max_p_kw))\r\n ppc[\"gen\"][sg_end:l_end, [PMIN]] = max_p_kw\r\n\r\n if \"max_p_kw\" in l_is.columns:\r\n ppc[\"gen\"][sg_end:l_end, PMAX] = - (l_is[\"min_p_kw\"].values * 1e-3 - delta)\r\n min_p_kw = ppc[\"gen\"][sg_end:l_end, [PMAX]]\r\n ncn.copyto(min_p_kw, p_lim_default, where=isnan(min_p_kw))\r\n ppc[\"gen\"][sg_end:l_end, [PMAX]] = min_p_kw\r\n\r\n # add controllable storages\r\n if stor_end > l_end:\r\n stor_buses = bus_lookup[stor_is[\"bus\"].values]\r\n\r\n ppc[\"gen\"][l_end:stor_end, GEN_BUS] = stor_buses\r\n ppc[\"gen\"][l_end:stor_end, PG] = - stor_is[\"p_kw\"].values * 1e-3 * stor_is[\"scaling\"].values\r\n ppc[\"gen\"][l_end:stor_end, QG] = stor_is[\"q_kvar\"].values * 1e-3 * stor_is[\"scaling\"].values\r\n\r\n # set bus values for generator buses\r\n ppc[\"bus\"][stor_buses, BUS_TYPE] = PQ\r\n\r\n # set constraints for controllable sgens\r\n if \"min_q_kvar\" in stor_is.columns:\r\n ppc[\"gen\"][l_end:stor_end, QMAX] = - (stor_is[\"min_q_kvar\"].values * 1e-3 - delta)\r\n max_q_kvar = ppc[\"gen\"][l_end:stor_end, [QMAX]]\r\n ncn.copyto(max_q_kvar, -q_lim_default, where=isnan(max_q_kvar))\r\n ppc[\"gen\"][l_end:stor_end, [QMIN]] = max_q_kvar\r\n\r\n if \"max_q_kvar\" in stor_is.columns:\r\n ppc[\"gen\"][l_end:stor_end, QMIN] = - (stor_is[\"max_q_kvar\"].values * 1e-3 + delta)\r\n min_q_kvar = ppc[\"gen\"][l_end:stor_end, [QMIN]]\r\n ncn.copyto(min_q_kvar, q_lim_default, where=isnan(min_q_kvar))\r\n ppc[\"gen\"][l_end:stor_end, [QMIN]] = min_q_kvar\r\n\r\n if \"max_p_kw\" in stor_is.columns:\r\n ppc[\"gen\"][l_end:stor_end, PMIN] = - (stor_is[\"max_p_kw\"].values * 1e-3 + delta)\r\n max_p_kw = ppc[\"gen\"][l_end:stor_end, [PMIN]]\r\n ncn.copyto(max_p_kw, -p_lim_default, where=isnan(max_p_kw))\r\n ppc[\"gen\"][l_end:stor_end, [PMIN]] = max_p_kw\r\n\r\n if \"min_p_kw\" in stor_is.columns:\r\n ppc[\"gen\"][l_end:stor_end, PMAX] = - (stor_is[\"min_p_kw\"].values * 1e-3 - delta)\r\n min_p_kw = ppc[\"gen\"][l_end:stor_end, [PMAX]]\r\n ncn.copyto(min_p_kw, p_lim_default, where=isnan(min_p_kw))\r\n ppc[\"gen\"][l_end:stor_end, [PMAX]] = min_p_kw\r\n\r\n # add ext grid / slack data\r\n ppc[\"gen\"][:eg_end, GEN_BUS] = bus_lookup[eg_is[\"bus\"].values]\r\n ppc[\"gen\"][:eg_end, VG] = eg_is[\"vm_pu\"].values\r\n ppc[\"gen\"][:eg_end, GEN_STATUS] = eg_is[\"in_service\"].values\r\n if \"max_p_kw\" in eg_is.columns:\r\n ppc[\"gen\"][:eg_end, PMIN] = - (eg_is[\"max_p_kw\"].values * 1e-3 - delta)\r\n max_p_kw = ppc[\"gen\"][:eg_end, [PMIN]]\r\n ncn.copyto(max_p_kw, -p_lim_default, where=isnan(max_p_kw))\r\n ppc[\"gen\"][:eg_end, [PMIN]] = max_p_kw\r\n\r\n if \"min_p_kw\" in eg_is.columns:\r\n ppc[\"gen\"][:eg_end, PMAX] = - (eg_is[\"min_p_kw\"].values * 1e-3 + delta)\r\n min_p_kw = ppc[\"gen\"][:eg_end, [PMAX]]\r\n ncn.copyto(min_p_kw, p_lim_default, where=isnan(min_p_kw))\r\n ppc[\"gen\"][:eg_end, [PMAX]] = min_p_kw\r\n\r\n if \"min_q_kvar\" in eg_is.columns:\r\n ppc[\"gen\"][:eg_end, QMAX] = - (eg_is[\"min_q_kvar\"].values * 1e-3 - delta)\r\n max_q_kvar = ppc[\"gen\"][:eg_end, [QMAX]]\r\n ncn.copyto(max_q_kvar, -q_lim_default, where=isnan(max_q_kvar))\r\n ppc[\"gen\"][:eg_end, [QMAX]] = max_q_kvar\r\n\r\n if \"max_q_kvar\" in eg_is.columns:\r\n ppc[\"gen\"][:eg_end, QMIN] = - (eg_is[\"max_q_kvar\"].values * 1e-3 + delta)\r\n min_q_kvar = ppc[\"gen\"][:eg_end, [QMIN]]\r\n ncn.copyto(min_q_kvar, q_lim_default, where=isnan(min_q_kvar))\r\n ppc[\"gen\"][:eg_end, [QMIN]] = min_q_kvar\r\n\r\n # set bus values for external grid buses\r\n eg_buses = bus_lookup[eg_is[\"bus\"].values]\r\n if calculate_voltage_angles:\r\n ppc[\"bus\"][eg_buses, VA] = eg_is[\"va_degree\"].values\r\n ppc[\"bus\"][eg_buses, BUS_TYPE] = REF\r\n ppc[\"bus\"][eg_buses, VM] = eg_is[\"vm_pu\"].values\r\n\r\n # REF busses don't have flexible voltages by definition:\r\n ppc[\"bus\"][eg_buses, VMAX] = ppc[\"bus\"][ppc[\"bus\"][:, BUS_TYPE] == REF, VM]\r\n ppc[\"bus\"][eg_buses, VMIN] = ppc[\"bus\"][ppc[\"bus\"][:, BUS_TYPE] == REF, VM]\r\n\r\n # add generator / pv data\r\n if gen_end > eg_end:\r\n ppc[\"gen\"][eg_end:gen_end, GEN_BUS] = bus_lookup[gen_is[\"bus\"].values]\r\n ppc[\"gen\"][eg_end:gen_end, PG] = - gen_is[\"p_kw\"].values * 1e-3 * gen_is[\"scaling\"].values\r\n ppc[\"gen\"][eg_end:gen_end, VG] = gen_is[\"vm_pu\"].values\r\n\r\n # set bus values for generator buses\r\n gen_buses = bus_lookup[gen_is[\"bus\"].values]\r\n ppc[\"bus\"][gen_buses, BUS_TYPE] = PV\r\n ppc[\"bus\"][gen_buses, VM] = gen_is[\"vm_pu\"].values\r\n\r\n # set constraints for PV generators\r\n _copy_q_limits_to_ppc(net, ppc, eg_end, gen_end, _is_elements['gen'])\r\n _copy_p_limits_to_ppc(net, ppc, eg_end, gen_end, _is_elements['gen'])\r\n\r\n _replace_nans_with_default_q_limits_in_ppc(ppc, eg_end, gen_end, q_lim_default)\r\n _replace_nans_with_default_p_limits_in_ppc(ppc, eg_end, gen_end, p_lim_default)\r\n\r\n\r\ndef _init_ppc_gen(ppc, xw_end, q_lim_default):\r\n # initialize generator matrix\r\n ppc[\"gen\"] = np.zeros(shape=(xw_end, 21), dtype=float)\r\n ppc[\"gen\"][:] = np.array([0, 0, 0, q_lim_default, -q_lim_default, 1.,\r\n 1., 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\r\n\r\n\r\ndef _build_pp_ext_grid(net, ppc, eg_is_mask, eg_end):\r\n calculate_voltage_angles = net[\"_options\"][\"calculate_voltage_angles\"]\r\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\r\n # add ext grid / slack data\r\n eg_buses = bus_lookup[net[\"ext_grid\"][\"bus\"].values[eg_is_mask]]\r\n ppc[\"gen\"][:eg_end, GEN_BUS] = eg_buses\r\n ppc[\"gen\"][:eg_end, VG] = net[\"ext_grid\"][\"vm_pu\"].values[eg_is_mask]\r\n ppc[\"gen\"][:eg_end, GEN_STATUS] = True\r\n\r\n # set bus values for external grid buses\r\n if calculate_voltage_angles:\r\n ppc[\"bus\"][eg_buses, VA] = net[\"ext_grid\"][\"va_degree\"].values[eg_is_mask]\r\n ppc[\"bus\"][eg_buses, BUS_TYPE] = REF\r\n # _build_gen_lookups(net, \"ext_grid\", 0, eg_end)\r\n\r\n\r\ndef _build_pp_gen(net, ppc, gen_is_mask, eg_end, gen_end, q_lim_default, p_lim_default):\r\n\r\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\r\n copy_constraints_to_ppc = net[\"_options\"][\"copy_constraints_to_ppc\"]\r\n\r\n gen_buses = bus_lookup[net[\"gen\"][\"bus\"].values[gen_is_mask]]\r\n gen_is_vm = net[\"gen\"][\"vm_pu\"].values[gen_is_mask]\r\n ppc[\"gen\"][eg_end:gen_end, GEN_BUS] = gen_buses\r\n ppc[\"gen\"][eg_end:gen_end, PG] = - (net[\"gen\"][\"p_kw\"].values[gen_is_mask] * 1e-3 *\r\n net[\"gen\"][\"scaling\"].values[gen_is_mask])\r\n ppc[\"gen\"][eg_end:gen_end, VG] = gen_is_vm\r\n\r\n # set bus values for generator buses\r\n\r\n ppc[\"bus\"][gen_buses, BUS_TYPE] = PV\r\n ppc[\"bus\"][gen_buses, VM] = gen_is_vm\r\n\r\n _copy_q_limits_to_ppc(net, ppc, eg_end, gen_end, gen_is_mask)\r\n _replace_nans_with_default_q_limits_in_ppc(ppc, eg_end, gen_end, q_lim_default)\r\n\r\n if copy_constraints_to_ppc:\r\n _copy_p_limits_to_ppc(net, ppc, eg_end, gen_end, gen_is_mask)\r\n _replace_nans_with_default_p_limits_in_ppc(ppc, eg_end, gen_end, p_lim_default)\r\n\r\n # _build_gen_lookups(net, \"gen\", eg_end, gen_end)\r\n\r\n\r\ndef _build_pp_xward(net, ppc, gen_end, xw_end, q_lim_default, update_lookup=True):\r\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\r\n xw = net[\"xward\"]\r\n xw_is = net[\"_is_elements\"]['xward']\r\n if update_lookup:\r\n ppc[\"gen\"][gen_end:xw_end, GEN_BUS] = bus_lookup[xw[\"ad_bus\"].values]\r\n ppc[\"gen\"][gen_end:xw_end, VG] = xw[\"vm_pu\"].values\r\n ppc[\"gen\"][gen_end:xw_end, GEN_STATUS] = xw_is\r\n ppc[\"gen\"][gen_end:xw_end, QMIN] = -q_lim_default\r\n ppc[\"gen\"][gen_end:xw_end, QMAX] = q_lim_default\r\n\r\n xward_buses = bus_lookup[net[\"xward\"][\"ad_bus\"].values]\r\n ppc[\"bus\"][xward_buses[xw_is], BUS_TYPE] = PV\r\n ppc[\"bus\"][xward_buses[~xw_is], BUS_TYPE] = NONE\r\n ppc[\"bus\"][xward_buses, VM] = net[\"xward\"][\"vm_pu\"].values\r\n\r\n\r\n\r\n\r\ndef _update_gen_ppc(net, ppc):\r\n '''\r\n Takes the ppc network and updates the gen values from the values in net.\r\n\r\n **INPUT**:\r\n **net** -The pandapower format network\r\n\r\n **ppc** - The PYPOWER format network to fill in values\r\n '''\r\n # get options from net\r\n calculate_voltage_angles = net[\"_options\"][\"calculate_voltage_angles\"]\r\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\r\n # get in service elements\r\n _is_elements = net[\"_is_elements\"]\r\n gen_is_mask = _is_elements['gen']\r\n # TODO maybe speed up things here, too\r\n eg_is = net[\"ext_grid\"][_is_elements['ext_grid']]\r\n gen_is = net[\"gen\"][_is_elements['gen']]\r\n\r\n eg_end = len(eg_is)\r\n gen_end = eg_end + len(gen_is)\r\n xw_end = gen_end + len(net[\"xward\"])\r\n\r\n q_lim_default = 1e9 # which is 1000 TW - should be enough for distribution grids.\r\n\r\n # add ext grid / slack data\r\n ext_grid_lookup = net[\"_pd2ppc_lookups\"][\"ext_grid\"]\r\n ext_grid_idx_ppc = ext_grid_lookup[eg_is.index]\r\n ppc[\"gen\"][ext_grid_idx_ppc, VG] = eg_is[\"vm_pu\"].values\r\n ppc[\"gen\"][ext_grid_idx_ppc, GEN_STATUS] = eg_is[\"in_service\"].values\r\n\r\n # set bus values for external grid buses\r\n if calculate_voltage_angles:\r\n # eg_buses = bus_lookup[eg_is[\"bus\"].values]\r\n ppc[\"bus\"][ext_grid_idx_ppc, VA] = eg_is[\"va_degree\"].values\r\n\r\n # add generator / pv data\r\n if gen_end > eg_end:\r\n gen_lookup = net[\"_pd2ppc_lookups\"][\"gen\"]\r\n gen_idx_ppc = gen_lookup[gen_is.index]\r\n ppc[\"gen\"][gen_idx_ppc, PG] = - gen_is[\"p_kw\"].values * 1e-3 * gen_is[\"scaling\"].values\r\n ppc[\"gen\"][gen_idx_ppc, VG] = gen_is[\"vm_pu\"].values\r\n\r\n # set bus values for generator buses\r\n gen_buses = bus_lookup[gen_is[\"bus\"].values]\r\n ppc[\"bus\"][gen_buses, VM] = gen_is[\"vm_pu\"].values\r\n\r\n _copy_q_limits_to_ppc(net, ppc, eg_end, gen_end, gen_is_mask)\r\n _replace_nans_with_default_q_limits_in_ppc(ppc, eg_end, gen_end, q_lim_default)\r\n\r\n # add extended ward pv node data\r\n if xw_end > gen_end:\r\n # ToDo: this must be tested in combination with recycle. Maybe the placement of the updated value in ppc[\"gen\"]\r\n # ToDo: is wrong. -> I'll better raise en error\r\n raise NotImplementedError(\"xwards in combination with recycle is not properly implemented\")\r\n # _build_pp_xward(net, ppc, gen_end, xw_end, q_lim_default,\r\n # update_lookup=False)\r\n\r\n\r\ndef _copy_q_limits_to_ppc(net, ppc, eg_end, gen_end, gen_is_mask):\r\n # Note: Pypower has generator reference system, pandapower uses load reference\r\n # system (max <-> min)\r\n\r\n delta = net[\"_options\"][\"delta\"]\r\n\r\n if \"max_q_kvar\" in net[\"gen\"].columns:\r\n ppc[\"gen\"][eg_end:gen_end, QMIN] = -net[\"gen\"][\"max_q_kvar\"].values[gen_is_mask] * 1e-3 - delta\r\n if \"min_q_kvar\" in net[\"gen\"].columns:\r\n ppc[\"gen\"][eg_end:gen_end, QMAX] = -net[\"gen\"][\"min_q_kvar\"].values[gen_is_mask] * 1e-3 + delta\r\n\r\n\r\ndef _copy_p_limits_to_ppc(net, ppc, eg_end, gen_end, gen_is_mask):\r\n delta = net[\"_options\"][\"delta\"]\r\n\r\n if \"max_p_kw\" in net[\"gen\"].columns:\r\n ppc[\"gen\"][eg_end:gen_end, PMIN] = -net[\"gen\"][\"max_p_kw\"].values[gen_is_mask] * 1e-3 + delta\r\n if \"min_p_kw\" in net[\"gen\"].columns:\r\n ppc[\"gen\"][eg_end:gen_end, PMAX] = -net[\"gen\"][\"min_p_kw\"].values[gen_is_mask] * 1e-3 - delta\r\n\r\n\r\ndef _replace_nans_with_default_q_limits_in_ppc(ppc, eg_end, gen_end, q_lim_default):\r\n # Note: Pypower has generator reference system, pandapower uses load reference system (max <-> min)\r\n max_q_kvar = ppc[\"gen\"][eg_end:gen_end, [QMIN]]\r\n ncn.copyto(max_q_kvar, -q_lim_default, where=np.isnan(max_q_kvar))\r\n ppc[\"gen\"][eg_end:gen_end, [QMIN]] = max_q_kvar\r\n\r\n min_q_kvar = ppc[\"gen\"][eg_end:gen_end, [QMAX]]\r\n ncn.copyto(min_q_kvar, q_lim_default, where=np.isnan(min_q_kvar))\r\n ppc[\"gen\"][eg_end:gen_end, [QMAX]] = min_q_kvar\r\n\r\n\r\ndef _replace_nans_with_default_p_limits_in_ppc(ppc, eg_end, gen_end, p_lim_default):\r\n # Note: Pypower has generator reference system, pandapower uses load reference system (max <-> min)\r\n max_p_kw = ppc[\"gen\"][eg_end:gen_end, [PMIN]]\r\n ncn.copyto(max_p_kw, -p_lim_default, where=isnan(max_p_kw))\r\n ppc[\"gen\"][eg_end:gen_end, [PMIN]] = max_p_kw\r\n\r\n min_p_kw = ppc[\"gen\"][eg_end:gen_end, [PMAX]]\r\n ncn.copyto(min_p_kw, p_lim_default, where=isnan(min_p_kw))\r\n ppc[\"gen\"][eg_end:gen_end, [PMAX]] = min_p_kw\r\n\r\n\r\ndef _check_voltage_setpoints_at_same_bus(ppc):\r\n # generator buses:\r\n gen_bus = ppc['gen'][:, GEN_BUS].astype(int)\r\n # generator setpoints:\r\n gen_vm = ppc['gen'][:, VG]\r\n if _different_values_at_one_bus(gen_bus, gen_vm):\r\n raise UserWarning(\"Generators with different voltage setpoints connected to the same bus\")\r\n\r\ndef _check_voltage_angles_at_same_bus(net, ppc):\r\n gen_va = net.ext_grid.va_degree[net._is_elements[\"ext_grid\"]].values\r\n eg_gens = net._pd2ppc_lookups[\"ext_grid\"][net.ext_grid.index[net._is_elements[\"ext_grid\"]]]\r\n gen_bus = ppc[\"gen\"][eg_gens, GEN_BUS].astype(int)\r\n if _different_values_at_one_bus(gen_bus, gen_va):\r\n raise UserWarning(\"Ext grids with different voltage angle setpoints connected to the same bus\")\r\n\r\n\r\ndef _different_values_at_one_bus(buses, values):\r\n \"\"\"\r\n checks if there are different values in any of the\r\n\r\n \"\"\"\r\n # buses with one or more generators and their index\r\n unique_bus, index_first_bus = np.unique(buses, return_index=True)\r\n\r\n # voltage setpoint lookup with the voltage of the first occurence of that bus\r\n first_values = -np.ones(buses.max() + 1)\r\n first_values[unique_bus] = values[index_first_bus]\r\n\r\n # generate voltage setpoints where all generators at the same bus\r\n # have the voltage of the first generator at that bus\r\n values_equal = first_values[buses]\r\n\r\n return not np.allclose(values, values_equal)\r\n", "path": "pandapower/build_gen.py" } ]
diff --git a/pandapower/build_gen.py b/pandapower/build_gen.py index cc014f43b..8be4edbcc 100644 --- a/pandapower/build_gen.py +++ b/pandapower/build_gen.py @@ -460,4 +460,4 @@ def _different_values_at_one_bus(buses, values): # have the voltage of the first generator at that bus values_equal = first_values[buses] - return not np.array_equal(values, values_equal) + return not np.allclose(values, values_equal)
pypi__warehouse-3568
Set samesite=lax on session cookies This is a strong defense-in-depth mechanism for protecting against CSRF. It's currently only respected by Chrome, but Firefox will add it as well.
[ { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nimport time\n\nimport msgpack\nimport msgpack.exceptions\nimport redis\n\nfrom pyramid import viewderivers\nfrom pyramid.interfaces import ISession, ISessionFactory\nfrom zope.interface import implementer\n\nfrom warehouse.cache.http import add_vary\nfrom warehouse.utils import crypto\n\n\ndef _invalid_method(method):\n @functools.wraps(method)\n def wrapped(self, *args, **kwargs):\n self._error_message()\n return wrapped\n\n\n@implementer(ISession)\nclass InvalidSession(dict):\n\n __contains__ = _invalid_method(dict.__contains__)\n __delitem__ = _invalid_method(dict.__delitem__)\n __getitem__ = _invalid_method(dict.__getitem__)\n __iter__ = _invalid_method(dict.__iter__)\n __len__ = _invalid_method(dict.__len__)\n __setitem__ = _invalid_method(dict.__setitem__)\n clear = _invalid_method(dict.clear)\n copy = _invalid_method(dict.copy)\n fromkeys = _invalid_method(dict.fromkeys)\n get = _invalid_method(dict.get)\n items = _invalid_method(dict.items)\n keys = _invalid_method(dict.keys)\n pop = _invalid_method(dict.pop)\n popitem = _invalid_method(dict.popitem)\n setdefault = _invalid_method(dict.setdefault)\n update = _invalid_method(dict.update)\n values = _invalid_method(dict.values)\n\n def _error_message(self):\n raise RuntimeError(\n \"Cannot use request.session in a view without uses_session=True.\"\n )\n\n def __getattr__(self, name):\n self._error_message()\n\n @property\n def created(self):\n self._error_message()\n\n\ndef _changed_method(method):\n @functools.wraps(method)\n def wrapped(self, *args, **kwargs):\n self.changed()\n return method(self, *args, **kwargs)\n return wrapped\n\n\n@implementer(ISession)\nclass Session(dict):\n\n _csrf_token_key = \"_csrf_token\"\n _flash_key = \"_flash_messages\"\n\n # A number of our methods need to be decorated so that they also call\n # self.changed()\n __delitem__ = _changed_method(dict.__delitem__)\n __setitem__ = _changed_method(dict.__setitem__)\n clear = _changed_method(dict.clear)\n pop = _changed_method(dict.pop)\n popitem = _changed_method(dict.popitem)\n setdefault = _changed_method(dict.setdefault)\n update = _changed_method(dict.update)\n\n def __init__(self, data=None, session_id=None, new=True):\n # Brand new sessions don't have any data, so we'll just create an empty\n # dictionary for them.\n if data is None:\n data = {}\n\n # Initialize our actual dictionary here.\n super().__init__(data)\n\n # We need to track the state of our Session.\n self._sid = session_id\n self._changed = False\n self.new = new\n self.created = int(time.time())\n\n # We'll track all of the IDs that have been invalidated here\n self.invalidated = set()\n\n @property\n def sid(self):\n if self._sid is None:\n self._sid = crypto.random_token()\n return self._sid\n\n def changed(self):\n self._changed = True\n\n def invalidate(self):\n self.clear()\n self.new = True\n self.created = int(time.time())\n self._changed = False\n\n # If the current session id isn't None we'll want to record it as one\n # of the ones that have been invalidated.\n if self._sid is not None:\n self.invalidated.add(self._sid)\n self._sid = None\n\n def should_save(self):\n return self._changed\n\n # Flash Messages Methods\n def _get_flash_queue_key(self, queue):\n return \".\".join(filter(None, [self._flash_key, queue]))\n\n def flash(self, msg, queue=\"\", allow_duplicate=True):\n queue_key = self._get_flash_queue_key(queue)\n\n # If we're not allowing duplicates check if this message is already\n # in the queue, and if it is just return immediately.\n if not allow_duplicate and msg in self[queue_key]:\n return\n\n self.setdefault(queue_key, []).append(msg)\n\n def peek_flash(self, queue=\"\"):\n return self.get(self._get_flash_queue_key(queue), [])\n\n def pop_flash(self, queue=\"\"):\n queue_key = self._get_flash_queue_key(queue)\n messages = self.get(queue_key, [])\n self.pop(queue_key, None)\n return messages\n\n # CSRF Methods\n def new_csrf_token(self):\n self[self._csrf_token_key] = crypto.random_token()\n return self[self._csrf_token_key]\n\n def get_csrf_token(self):\n token = self.get(self._csrf_token_key)\n if token is None:\n token = self.new_csrf_token()\n return token\n\n\n@implementer(ISessionFactory)\nclass SessionFactory:\n\n cookie_name = \"session_id\"\n max_age = 12 * 60 * 60 # 12 hours\n\n def __init__(self, secret, url):\n self.redis = redis.StrictRedis.from_url(url)\n self.signer = crypto.TimestampSigner(secret, salt=\"session\")\n\n def __call__(self, request):\n return self._process_request(request)\n\n def _redis_key(self, session_id):\n return \"warehouse/session/data/{}\".format(session_id)\n\n def _process_request(self, request):\n # Register a callback with the request so we can save the session once\n # it's finished.\n request.add_response_callback(self._process_response)\n\n # Load our session ID from the request.\n session_id = request.cookies.get(self.cookie_name)\n\n # If we do not have a session ID then we'll just use a new empty\n # session.\n if session_id is None:\n return Session()\n\n # Check to make sure we have a valid session id\n try:\n session_id = self.signer.unsign(session_id, max_age=self.max_age)\n session_id = session_id.decode(\"utf8\")\n except crypto.BadSignature:\n return Session()\n\n # Fetch the serialized data from redis\n bdata = self.redis.get(self._redis_key(session_id))\n\n # If the session didn't exist in redis, we'll give the user a new\n # session.\n if bdata is None:\n return Session()\n\n # De-serialize our session data\n try:\n data = msgpack.unpackb(bdata, encoding=\"utf8\", use_list=True)\n except (msgpack.exceptions.UnpackException,\n msgpack.exceptions.ExtraData):\n # If the session data was invalid we'll give the user a new session\n return Session()\n\n # If we were able to load existing session data, load it into a\n # Session class\n session = Session(data, session_id, False)\n\n return session\n\n def _process_response(self, request, response):\n # If the request has an InvalidSession, then the view can't have\n # accessed the session, and we can just skip all of this anyways.\n if isinstance(request.session, InvalidSession):\n return\n\n # Check to see if the session has been marked to be deleted, if it has\n # benn then we'll delete it, and tell our response to delete the\n # session cookie as well.\n if request.session.invalidated:\n for session_id in request.session.invalidated:\n self.redis.delete(self._redis_key(session_id))\n\n if not request.session.should_save():\n response.delete_cookie(self.cookie_name)\n\n # Check to see if the session has been marked to be saved, generally\n # this means that the session data has been modified and thus we need\n # to store the new data.\n if request.session.should_save():\n # Save our session in Redis\n self.redis.setex(\n self._redis_key(request.session.sid),\n self.max_age,\n msgpack.packb(\n request.session,\n encoding=\"utf8\",\n use_bin_type=True,\n ),\n )\n\n # Send our session cookie to the client\n response.set_cookie(\n self.cookie_name,\n self.signer.sign(request.session.sid.encode(\"utf8\")),\n max_age=self.max_age,\n httponly=True,\n secure=request.scheme == \"https\",\n )\n\n\ndef session_view(view, info):\n if info.options.get(\"uses_session\"):\n # If we're using the session, then we'll just return the original view\n # with a small wrapper around it to ensure that it has a Vary: Cookie\n # header.\n return add_vary(\"Cookie\")(view)\n elif info.exception_only:\n return view\n else:\n # If we're not using the session on this view, then we'll wrap the view\n # with a wrapper that just ensures that the session cannot be used.\n @functools.wraps(view)\n def wrapped(context, request):\n # This whole method is a little bit of an odd duck, we want to make\n # sure that we don't actually *access* request.session, because\n # doing so triggers the machinery to create a new session. So\n # instead we will dig into the request object __dict__ to\n # effectively do the same thing, jsut without triggering an access\n # on request.session.\n\n # Save the original session so that we can restore it once the\n # inner views have been called.\n nothing = object()\n original_session = request.__dict__.get(\"session\", nothing)\n\n # This particular view hasn't been set to allow access to the\n # session, so we'll just assign an InvalidSession to\n # request.session\n request.__dict__[\"session\"] = InvalidSession()\n\n try:\n # Invoke the real view\n return view(context, request)\n finally:\n # Restore the original session so that things like\n # pyramid_debugtoolbar can access it.\n if original_session is nothing:\n del request.__dict__[\"session\"]\n else:\n request.__dict__[\"session\"] = original_session\n\n return wrapped\n\n\nsession_view.options = {\"uses_session\"}\n\n\ndef includeme(config):\n config.set_session_factory(\n SessionFactory(\n config.registry.settings[\"sessions.secret\"],\n config.registry.settings[\"sessions.url\"],\n ),\n )\n\n config.add_view_deriver(\n session_view,\n over=\"csrf_view\",\n under=viewderivers.INGRESS,\n )\n", "path": "warehouse/sessions.py" } ]
[ { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nimport time\n\nimport msgpack\nimport msgpack.exceptions\nimport redis\n\nfrom pyramid import viewderivers\nfrom pyramid.interfaces import ISession, ISessionFactory\nfrom zope.interface import implementer\n\nfrom warehouse.cache.http import add_vary\nfrom warehouse.utils import crypto\n\n\ndef _invalid_method(method):\n @functools.wraps(method)\n def wrapped(self, *args, **kwargs):\n self._error_message()\n return wrapped\n\n\n@implementer(ISession)\nclass InvalidSession(dict):\n\n __contains__ = _invalid_method(dict.__contains__)\n __delitem__ = _invalid_method(dict.__delitem__)\n __getitem__ = _invalid_method(dict.__getitem__)\n __iter__ = _invalid_method(dict.__iter__)\n __len__ = _invalid_method(dict.__len__)\n __setitem__ = _invalid_method(dict.__setitem__)\n clear = _invalid_method(dict.clear)\n copy = _invalid_method(dict.copy)\n fromkeys = _invalid_method(dict.fromkeys)\n get = _invalid_method(dict.get)\n items = _invalid_method(dict.items)\n keys = _invalid_method(dict.keys)\n pop = _invalid_method(dict.pop)\n popitem = _invalid_method(dict.popitem)\n setdefault = _invalid_method(dict.setdefault)\n update = _invalid_method(dict.update)\n values = _invalid_method(dict.values)\n\n def _error_message(self):\n raise RuntimeError(\n \"Cannot use request.session in a view without uses_session=True.\"\n )\n\n def __getattr__(self, name):\n self._error_message()\n\n @property\n def created(self):\n self._error_message()\n\n\ndef _changed_method(method):\n @functools.wraps(method)\n def wrapped(self, *args, **kwargs):\n self.changed()\n return method(self, *args, **kwargs)\n return wrapped\n\n\n@implementer(ISession)\nclass Session(dict):\n\n _csrf_token_key = \"_csrf_token\"\n _flash_key = \"_flash_messages\"\n\n # A number of our methods need to be decorated so that they also call\n # self.changed()\n __delitem__ = _changed_method(dict.__delitem__)\n __setitem__ = _changed_method(dict.__setitem__)\n clear = _changed_method(dict.clear)\n pop = _changed_method(dict.pop)\n popitem = _changed_method(dict.popitem)\n setdefault = _changed_method(dict.setdefault)\n update = _changed_method(dict.update)\n\n def __init__(self, data=None, session_id=None, new=True):\n # Brand new sessions don't have any data, so we'll just create an empty\n # dictionary for them.\n if data is None:\n data = {}\n\n # Initialize our actual dictionary here.\n super().__init__(data)\n\n # We need to track the state of our Session.\n self._sid = session_id\n self._changed = False\n self.new = new\n self.created = int(time.time())\n\n # We'll track all of the IDs that have been invalidated here\n self.invalidated = set()\n\n @property\n def sid(self):\n if self._sid is None:\n self._sid = crypto.random_token()\n return self._sid\n\n def changed(self):\n self._changed = True\n\n def invalidate(self):\n self.clear()\n self.new = True\n self.created = int(time.time())\n self._changed = False\n\n # If the current session id isn't None we'll want to record it as one\n # of the ones that have been invalidated.\n if self._sid is not None:\n self.invalidated.add(self._sid)\n self._sid = None\n\n def should_save(self):\n return self._changed\n\n # Flash Messages Methods\n def _get_flash_queue_key(self, queue):\n return \".\".join(filter(None, [self._flash_key, queue]))\n\n def flash(self, msg, queue=\"\", allow_duplicate=True):\n queue_key = self._get_flash_queue_key(queue)\n\n # If we're not allowing duplicates check if this message is already\n # in the queue, and if it is just return immediately.\n if not allow_duplicate and msg in self[queue_key]:\n return\n\n self.setdefault(queue_key, []).append(msg)\n\n def peek_flash(self, queue=\"\"):\n return self.get(self._get_flash_queue_key(queue), [])\n\n def pop_flash(self, queue=\"\"):\n queue_key = self._get_flash_queue_key(queue)\n messages = self.get(queue_key, [])\n self.pop(queue_key, None)\n return messages\n\n # CSRF Methods\n def new_csrf_token(self):\n self[self._csrf_token_key] = crypto.random_token()\n return self[self._csrf_token_key]\n\n def get_csrf_token(self):\n token = self.get(self._csrf_token_key)\n if token is None:\n token = self.new_csrf_token()\n return token\n\n\n@implementer(ISessionFactory)\nclass SessionFactory:\n\n cookie_name = \"session_id\"\n max_age = 12 * 60 * 60 # 12 hours\n\n def __init__(self, secret, url):\n self.redis = redis.StrictRedis.from_url(url)\n self.signer = crypto.TimestampSigner(secret, salt=\"session\")\n\n def __call__(self, request):\n return self._process_request(request)\n\n def _redis_key(self, session_id):\n return \"warehouse/session/data/{}\".format(session_id)\n\n def _process_request(self, request):\n # Register a callback with the request so we can save the session once\n # it's finished.\n request.add_response_callback(self._process_response)\n\n # Load our session ID from the request.\n session_id = request.cookies.get(self.cookie_name)\n\n # If we do not have a session ID then we'll just use a new empty\n # session.\n if session_id is None:\n return Session()\n\n # Check to make sure we have a valid session id\n try:\n session_id = self.signer.unsign(session_id, max_age=self.max_age)\n session_id = session_id.decode(\"utf8\")\n except crypto.BadSignature:\n return Session()\n\n # Fetch the serialized data from redis\n bdata = self.redis.get(self._redis_key(session_id))\n\n # If the session didn't exist in redis, we'll give the user a new\n # session.\n if bdata is None:\n return Session()\n\n # De-serialize our session data\n try:\n data = msgpack.unpackb(bdata, encoding=\"utf8\", use_list=True)\n except (msgpack.exceptions.UnpackException,\n msgpack.exceptions.ExtraData):\n # If the session data was invalid we'll give the user a new session\n return Session()\n\n # If we were able to load existing session data, load it into a\n # Session class\n session = Session(data, session_id, False)\n\n return session\n\n def _process_response(self, request, response):\n # If the request has an InvalidSession, then the view can't have\n # accessed the session, and we can just skip all of this anyways.\n if isinstance(request.session, InvalidSession):\n return\n\n # Check to see if the session has been marked to be deleted, if it has\n # benn then we'll delete it, and tell our response to delete the\n # session cookie as well.\n if request.session.invalidated:\n for session_id in request.session.invalidated:\n self.redis.delete(self._redis_key(session_id))\n\n if not request.session.should_save():\n response.delete_cookie(self.cookie_name)\n\n # Check to see if the session has been marked to be saved, generally\n # this means that the session data has been modified and thus we need\n # to store the new data.\n if request.session.should_save():\n # Save our session in Redis\n self.redis.setex(\n self._redis_key(request.session.sid),\n self.max_age,\n msgpack.packb(\n request.session,\n encoding=\"utf8\",\n use_bin_type=True,\n ),\n )\n\n # Send our session cookie to the client\n response.set_cookie(\n self.cookie_name,\n self.signer.sign(request.session.sid.encode(\"utf8\")),\n max_age=self.max_age,\n httponly=True,\n secure=request.scheme == \"https\",\n samesite=b\"lax\"\n )\n\n\ndef session_view(view, info):\n if info.options.get(\"uses_session\"):\n # If we're using the session, then we'll just return the original view\n # with a small wrapper around it to ensure that it has a Vary: Cookie\n # header.\n return add_vary(\"Cookie\")(view)\n elif info.exception_only:\n return view\n else:\n # If we're not using the session on this view, then we'll wrap the view\n # with a wrapper that just ensures that the session cannot be used.\n @functools.wraps(view)\n def wrapped(context, request):\n # This whole method is a little bit of an odd duck, we want to make\n # sure that we don't actually *access* request.session, because\n # doing so triggers the machinery to create a new session. So\n # instead we will dig into the request object __dict__ to\n # effectively do the same thing, jsut without triggering an access\n # on request.session.\n\n # Save the original session so that we can restore it once the\n # inner views have been called.\n nothing = object()\n original_session = request.__dict__.get(\"session\", nothing)\n\n # This particular view hasn't been set to allow access to the\n # session, so we'll just assign an InvalidSession to\n # request.session\n request.__dict__[\"session\"] = InvalidSession()\n\n try:\n # Invoke the real view\n return view(context, request)\n finally:\n # Restore the original session so that things like\n # pyramid_debugtoolbar can access it.\n if original_session is nothing:\n del request.__dict__[\"session\"]\n else:\n request.__dict__[\"session\"] = original_session\n\n return wrapped\n\n\nsession_view.options = {\"uses_session\"}\n\n\ndef includeme(config):\n config.set_session_factory(\n SessionFactory(\n config.registry.settings[\"sessions.secret\"],\n config.registry.settings[\"sessions.url\"],\n ),\n )\n\n config.add_view_deriver(\n session_view,\n over=\"csrf_view\",\n under=viewderivers.INGRESS,\n )\n", "path": "warehouse/sessions.py" } ]
diff --git a/tests/unit/test_sessions.py b/tests/unit/test_sessions.py index 0baee1c117b5..8bc57b3c27b0 100644 --- a/tests/unit/test_sessions.py +++ b/tests/unit/test_sessions.py @@ -497,7 +497,7 @@ def test_invalidated_deletes_save_non_secure(self, monkeypatch, ) response = pretend.stub( set_cookie=pretend.call_recorder( - lambda cookie, data, max_age, httponly, secure: None + lambda cookie, data, max_age, httponly, secure, samesite: None ) ) session_factory._process_response(pyramid_request, response) @@ -532,6 +532,7 @@ def test_invalidated_deletes_save_non_secure(self, monkeypatch, max_age=12 * 60 * 60, httponly=True, secure=False, + samesite=b"lax", ), ] diff --git a/warehouse/sessions.py b/warehouse/sessions.py index a52318f0eb7c..548f760c757a 100644 --- a/warehouse/sessions.py +++ b/warehouse/sessions.py @@ -263,6 +263,7 @@ def _process_response(self, request, response): max_age=self.max_age, httponly=True, secure=request.scheme == "https", + samesite=b"lax" )
microsoft__Qcodes-997
Bug: experiment id not properly attributed when calling the load_experiment_by_name method Steps to reproduce: ```python from qcodes.dataset.measurements import Measurement # Start with a clean data base db_location = qcodes.config["core"]["db_location"] db = DataSet(db_location) exp = new_experiment("test", "test1") exp_loaded = load_experiment_by_name("test", "test1") # The following will work meas = SweepMeasurement(exp=exp_loaded) with meas.run() as datasaver: pass # This time we will have an error with meas.run() as datasaver: pass ``` If the experiment was already there in the database, the first measurement will also fail.
[ { "content": "import json\nimport logging\nfrom time import monotonic\nfrom collections import OrderedDict\nfrom typing import (Callable, Union, Dict, Tuple, List, Sequence, cast,\n MutableMapping, MutableSequence, Optional)\nfrom inspect import signature\nfrom numbers import Number\n\nimport numpy as np\n\nimport qcodes as qc\nfrom qcodes import Station\nfrom qcodes.instrument.parameter import ArrayParameter, _BaseParameter\nfrom qcodes.dataset.experiment_container import Experiment\nfrom qcodes.dataset.param_spec import ParamSpec\nfrom qcodes.dataset.data_set import DataSet\n\nlog = logging.getLogger(__name__)\n\n\nclass ParameterTypeError(Exception):\n pass\n\n\nclass DataSaver:\n \"\"\"\n The class used byt the Runner context manager to handle the\n datasaving to the database\n \"\"\"\n\n def __init__(self, dataset: DataSet, write_period: float,\n parameters: Dict[str, ParamSpec]) -> None:\n self._dataset = dataset\n self.write_period = write_period\n self.parameters = parameters\n self._known_parameters = list(parameters.keys())\n self._results: List[dict] = [] # will be filled by addResult\n self._last_save_time = monotonic()\n self._known_dependencies: Dict[str, str] = {}\n for param, parspec in parameters.items():\n if parspec.depends_on != '':\n self._known_dependencies.update({str(param):\n parspec.depends_on.split(', ')})\n\n def add_result(self,\n *res_tuple: Tuple[Union[_BaseParameter, str],\n Union[str, int, float, np.ndarray]])-> None:\n \"\"\"\n Add a result to the measurement results. Represents a measurement\n point in the space of measurement parameters, e.g. in an experiment\n varying two voltages and measuring two currents, a measurement point\n is the four dimensional (v1, v2, c1, c2). The corresponding call\n to this function would be (e.g.)\n >> datasaver.add_result((v1, 0.1), (v2, 0.2), (c1, 5), (c2, -2.1))\n\n For better performance, this function does not immediately write to\n the database, but keeps the results in memory. Writing happens every\n `write_period` seconds and during the __exit__ method if this class.\n\n Regarding arrays: since arrays as binary blobs are (almost) worthless\n in a relational database, this function \"unravels\" arrays passed to it.\n That, in turn, forces us to impose rules on what can be saved in one\n go. Any number of scalars and any number of arrays OF THE SAME LENGTH\n can be passed to add_result. The scalars are duplicated to match the\n arrays.\n\n Args:\n res: a dictionary with keys that are parameter names and items\n that are the corresponding values at this measurement point.\n\n Raises:\n ValueError: if a parameter name not registered in the parent\n Measurement object is encountered.\n ParameterTypeError: if a parameter is given a value not matching\n its type.\n \"\"\"\n res = list(res_tuple) # ArrayParameters cause us to mutate the results\n\n # we iterate through the input twice in order to allow users to call\n # add_result with the arguments in any particular order, i.e. NOT\n # enforcing that setpoints come before dependent variables.\n # Also, we pre-check that array dimensions are compatible before\n # proceeding.\n input_size = 1\n params = []\n for partial_result in res:\n parameter = partial_result[0]\n paramstr = str(partial_result[0])\n value = partial_result[1]\n params.append(paramstr)\n if paramstr not in self._known_parameters:\n raise ValueError(f'Can not add a result for {paramstr}, no '\n 'such parameter registered in this '\n 'measurement.')\n if isinstance(value, np.ndarray):\n value = cast(np.ndarray, partial_result[1])\n array_size = len(value)\n if input_size > 1 and input_size != array_size:\n raise ValueError('Incompatible array dimensions. Trying to'\n f' add arrays of dimension {input_size} '\n f'and {array_size}')\n else:\n input_size = array_size\n # TODO (WilliamHPNielsen): The following code block is ugly and\n # brittle and should be enough to convince us to abandon the\n # design of ArrayParameters (possibly) containing (some of) their\n # setpoints\n if isinstance(parameter, ArrayParameter):\n sps = parameter.setpoints[0]\n inst_name = getattr(parameter._instrument, 'name', '')\n if inst_name:\n spname = f'{inst_name}_{parameter.setpoint_names[0]}'\n else:\n spname = parameter.setpoint_names[0]\n\n if f'{paramstr}_setpoint' in self.parameters.keys():\n res.append((f'{paramstr}_setpoint', sps))\n elif spname in self.parameters.keys():\n res.append((spname, sps))\n else:\n raise RuntimeError('No setpoints registered for '\n f'ArrayParameter {paramstr}!')\n\n # Now check for missing setpoints\n for partial_result in res:\n param = str(partial_result[0])\n value = partial_result[1]\n if param in self._known_dependencies.keys():\n stuffweneed = set(self._known_dependencies[param])\n stuffwehave = set(params)\n if not stuffweneed.issubset(stuffwehave):\n raise ValueError('Can not add this result; missing '\n f'setpoint values for {param}:'\n f' {stuffweneed}.'\n f' Values only given for {params}.')\n\n for index in range(input_size):\n res_dict = {}\n for partial_result in res:\n param = str(partial_result[0])\n value = partial_result[1]\n\n # For compatibility with the old Loop, setpoints are\n # tuples of numbers (usually tuple(np.linspace(...))\n if hasattr(value, '__len__') and not(isinstance(value, str)):\n res_dict.update({param: value[index]})\n else:\n res_dict.update({param: value})\n\n self._results.append(res_dict)\n\n if monotonic() - self._last_save_time > self.write_period:\n self.flush_data_to_database()\n self._last_save_time = monotonic()\n\n def flush_data_to_database(self):\n \"\"\"\n Write the in-memory results to the database.\n \"\"\"\n log.debug('Flushing to database')\n if self._results != []:\n try:\n write_point = self._dataset.add_results(self._results)\n log.debug(f'Successfully wrote from index {write_point}')\n self._results = []\n except Exception as e:\n log.warning(f'Could not commit to database; {e}')\n else:\n log.debug('No results to flush')\n\n @property\n def run_id(self):\n return self._dataset.run_id\n\n @property\n def points_written(self):\n return self._dataset.number_of_results\n\n @property\n def dataset(self):\n return self._dataset\n\n\nclass Runner:\n \"\"\"\n Context manager for the measurement.\n Lives inside a Measurement and should never be instantiated\n outside a Measurement.\n\n This context manager handles all the dirty business of writing data\n to the database. Additionally, it may perform experiment bootstrapping\n and clean-up after the measurement.\n \"\"\"\n def __init__(\n self, enteractions: List, exitactions: List,\n experiment: Experiment=None, station: Station=None,\n write_period: float=None,\n parameters: Dict[str, ParamSpec]=None,\n name: str='',\n subscribers: List=[]) -> None:\n\n self.enteractions = enteractions\n self.exitactions = exitactions\n self.subscribers = subscribers\n self.experiment = experiment\n self.station = station\n self.parameters = parameters\n # here we use 5 s as a sane default, but that value should perhaps\n # be read from some config file\n self.write_period = write_period if write_period is not None else 5\n self.name = name if name else 'results'\n\n def __enter__(self) -> DataSaver:\n # TODO: should user actions really precede the dataset?\n # first do whatever bootstrapping the user specified\n for func, args in self.enteractions:\n func(*args)\n\n # next set up the \"datasaver\"\n if self.experiment:\n eid = self.experiment.id\n else:\n eid = None\n\n self.ds = qc.new_data_set(self.name, eid)\n\n # .. and give the dataset a snapshot as metadata\n if self.station is None:\n station = qc.Station.default\n else:\n station = self.station\n\n if station:\n self.ds.add_metadata('snapshot',\n json.dumps({'station': station.snapshot()}))\n\n for paramspec in self.parameters.values():\n self.ds.add_parameter(paramspec)\n\n # register all subscribers\n for (callble, state) in self.subscribers:\n # We register with minimal waiting time.\n # That should make all subscribers be called when data is flushed\n # to the database\n log.debug(f'Subscribing callable {callble} with state {state}')\n self.ds.subscribe(callble, min_wait=0, min_count=1, state=state)\n\n print(f'Starting experimental run with id: {self.ds.run_id}')\n\n self.datasaver = DataSaver(dataset=self.ds,\n write_period=self.write_period,\n parameters=self.parameters)\n\n return self.datasaver\n\n def __exit__(self, exception_type, exception_value, traceback) -> None:\n\n self.datasaver.flush_data_to_database()\n\n # perform the \"teardown\" events\n for func, args in self.exitactions:\n func(*args)\n\n self.ds.unsubscribe_all()\n\n # and finally mark the dataset as closed, thus\n # finishing the measurement\n self.ds.mark_complete()\n\n\nclass Measurement:\n \"\"\"\n Measurement procedure container\n\n Attributes:\n name (str): The name of this measurement/run. Is used by the dataset\n to give a name to the results_table.\n \"\"\"\n def __init__(self, exp: Optional[Experiment]=None,\n station: Optional[qc.Station]=None) -> None:\n \"\"\"\n Init\n\n Args:\n exp: Specify the experiment to use. If not given\n the default one is used.\n station: The QCoDeS station to snapshot. If not given, the\n default one is used.\n \"\"\"\n self.exp = exp\n self.exitactions: List[Tuple[Callable, Sequence]] = []\n self.enteractions: List[Tuple[Callable, Sequence]] = []\n self.subscribers: List[Tuple[Callable, Union[MutableSequence,\n MutableMapping]]] = []\n self.experiment = exp\n self.station = station\n self.parameters: Dict[str, ParamSpec] = OrderedDict()\n self._write_period: Optional[Number] = None\n self.name = ''\n\n @property\n def write_period(self):\n return self._write_period\n\n @write_period.setter\n def write_period(self, wp: Number) -> None:\n if not isinstance(wp, Number):\n raise ValueError('The write period must be a number (of seconds).')\n wp_float = cast(float, wp)\n if wp_float < 1e-3:\n raise ValueError('The write period must be at least 1 ms.')\n self._write_period = wp\n\n def _registration_validation(\n self, name: str, setpoints: Sequence[str]=None,\n basis: Sequence[str]=None) -> Tuple[List[str], List[str]]:\n \"\"\"\n Helper function to do all the validation in terms of dependencies\n when adding parameters, e.g. that no setpoints have setpoints etc.\n\n Called by register_parameter and register_custom_parameter\n\n Args:\n name: Name of the parameter to register\n setpoints: name(s) of the setpoint parameter(s)\n basis: name(s) of the parameter(s) that this parameter is\n inferred from\n \"\"\"\n\n # now handle setpoints\n depends_on = []\n if setpoints:\n for sp in setpoints:\n if sp not in list(self.parameters.keys()):\n raise ValueError(f'Unknown setpoint: {sp}.'\n ' Please register that parameter first.')\n elif sp == name:\n raise ValueError('A parameter can not have itself as '\n 'setpoint.')\n elif self.parameters[sp].depends_on != '':\n raise ValueError(\"A parameter's setpoints can not have \"\n f\"setpoints themselves. {sp} depends on\"\n f\" {self.parameters[sp].depends_on}\")\n else:\n depends_on.append(sp)\n\n # now handle inferred parameters\n inf_from = []\n if basis:\n for inff in basis:\n if inff not in list(self.parameters.keys()):\n raise ValueError(f'Unknown basis parameter: {inff}.'\n ' Please register that parameter first.')\n elif inff == name:\n raise ValueError('A parameter can not be inferred from'\n 'itself.')\n else:\n inf_from.append(inff)\n\n return (depends_on, inf_from)\n\n def register_parameter(\n self, parameter: _BaseParameter,\n setpoints: Tuple[_BaseParameter]=None,\n basis: Tuple[_BaseParameter]=None) -> None:\n \"\"\"\n Add QCoDeS Parameter to the dataset produced by running this\n measurement.\n\n TODO: Does not handle metadata yet\n\n Args:\n parameter: The parameter to add\n setpoints: The setpoints for this parameter. If this parameter\n is a setpoint, it should be left blank\n basis: The parameters that this parameter is inferred from. If\n this parameter is not inferred from any other parameters,\n this should be left blank.\n \"\"\"\n # input validation\n if not isinstance(parameter, _BaseParameter):\n raise ValueError('Can not register object of type {}. Can only '\n 'register a QCoDeS Parameter.'\n ''.format(type(parameter)))\n # perhaps users will want a different name? But the name must be unique\n # on a per-run basis\n # we also use the name below, but perhaps is is better to have\n # a more robust Parameter2String function?\n name = str(parameter)\n\n if isinstance(parameter, ArrayParameter):\n if parameter.setpoint_names:\n spname = (f'{parameter._instrument.name}_'\n f'{parameter.setpoint_names[0]}')\n else:\n spname = f'{name}_setpoint'\n if parameter.setpoint_labels:\n splabel = parameter.setpoint_labels[0]\n else:\n splabel = ''\n if parameter.setpoint_units:\n spunit = parameter.setpoint_units[0]\n else:\n spunit = ''\n\n sp = ParamSpec(name=spname, paramtype='numeric',\n label=splabel, unit=spunit)\n\n self.parameters[spname] = sp\n setpoints = setpoints if setpoints else ()\n setpoints += (spname,)\n\n # We currently treat ALL parameters as 'numeric' and fail to add them\n # to the dataset if they can not be unraveled to fit that description\n # (except strings, we just let those through)\n # this is indeed a limitation, but a sane one. We might loosen that\n # requirement later and start saving binary blobs with the datasaver,\n # but for now binary blob saving is referred to using the DataSet\n # API directly\n paramtype = 'numeric'\n label = parameter.label\n unit = parameter.unit\n\n if setpoints:\n sp_strings = [str(sp) for sp in setpoints]\n else:\n sp_strings = []\n if basis:\n bs_strings = [str(bs) for bs in basis]\n else:\n bs_strings = []\n\n # validate all dependencies\n depends_on, inf_from = self._registration_validation(name, sp_strings,\n bs_strings)\n\n paramspec = ParamSpec(name=name,\n paramtype=paramtype,\n label=label,\n unit=unit,\n inferred_from=inf_from,\n depends_on=depends_on)\n\n # ensure the correct order\n if name in self.parameters.keys():\n self.parameters.pop(name)\n\n self.parameters[name] = paramspec\n log.info(f'Registered {name} in the Measurement.')\n\n def register_custom_parameter(\n self, name: str,\n label: str=None, unit: str=None,\n basis: Sequence[Union[str, _BaseParameter]]=None,\n setpoints: Sequence[Union[str, _BaseParameter]]=None) -> None:\n \"\"\"\n Register a custom parameter with this measurement\n\n Args:\n name: The name that this parameter will have in the dataset. Must\n be unique (will overwrite an existing parameter with the same\n name!)\n label: The label\n unit: The unit\n basis: A list of either QCoDeS Parameters or the names\n of parameters already registered in the measurement that\n this parameter is inferred from\n setpoints: A list of either QCoDeS Parameters or the names of\n of parameters already registered in the measurement that\n are the setpoints of this parameter\n \"\"\"\n\n # validate dependencies\n if setpoints:\n sp_strings = [str(sp) for sp in setpoints]\n else:\n sp_strings = []\n if basis:\n bs_strings = [str(bs) for bs in basis]\n else:\n bs_strings = []\n\n # validate all dependencies\n depends_on, inf_from = self._registration_validation(name, sp_strings,\n bs_strings)\n\n parspec = ParamSpec(name=name, paramtype='numeric',\n label=label, unit=unit,\n inferred_from=inf_from,\n depends_on=depends_on)\n\n # ensure the correct order\n if name in self.parameters.keys():\n self.parameters.pop(name)\n\n self.parameters[name] = parspec\n\n def unregister_parameter(self,\n parameter: Union[_BaseParameter, str]) -> None:\n \"\"\"\n Remove a custom/QCoDeS parameter from the dataset produced by\n running this measurement\n \"\"\"\n if isinstance(parameter, _BaseParameter):\n param = str(parameter)\n elif isinstance(parameter, str):\n param = parameter\n else:\n raise ValueError('Wrong input type. Must be a QCoDeS parameter or'\n ' the name (a string) of a parameter.')\n\n if param not in self.parameters:\n log.info(f'Tried to unregister {param}, but it was not'\n 'registered.')\n return\n\n for name, paramspec in self.parameters.items():\n if param in paramspec.depends_on:\n raise ValueError(f'Can not unregister {param}, it is a '\n f'setpoint for {name}')\n if param in paramspec.inferred_from:\n raise ValueError(f'Can not unregister {param}, it is a '\n f'basis for {name}')\n\n self.parameters.pop(param)\n log.info(f'Removed {param} from Measurement.')\n\n def add_before_run(self, func: Callable, args: tuple) -> None:\n \"\"\"\n Add an action to be performed before the measurement.\n\n Args:\n func: Function to be performed\n args: The arguments to said function\n \"\"\"\n # some tentative cheap checking\n nargs = len(signature(func).parameters)\n if len(args) != nargs:\n raise ValueError('Mismatch between function call signature and '\n 'the provided arguments.')\n\n self.enteractions.append((func, args))\n\n def add_after_run(self, func: Callable, args: tuple) -> None:\n \"\"\"\n Add an action to be performed after the measurement.\n\n Args:\n func: Function to be performed\n args: The arguments to said function\n \"\"\"\n # some tentative cheap checking\n nargs = len(signature(func).parameters)\n if len(args) != nargs:\n raise ValueError('Mismatch between function call signature and '\n 'the provided arguments.')\n\n self.exitactions.append((func, args))\n\n def add_subscriber(self,\n func: Callable,\n state: Union[MutableSequence, MutableMapping]) -> None:\n \"\"\"\n Add a subscriber to the dataset of the measurement.\n\n Args:\n name: The name of the subscriber.\n func: A function taking three positional arguments: a list of\n tuples of parameter values, an integer, a mutable variable\n (list or dict) to hold state/writes updates to.\n state: The variable to hold the state.\n \"\"\"\n # TODO: Should we protect users from registering two subscribers\n # with the same state?\n self.subscribers.append((func, state))\n\n def run(self):\n \"\"\"\n Returns the context manager for the experimental run\n \"\"\"\n return Runner(self.enteractions, self.exitactions,\n self.experiment, station=self.station,\n write_period=self._write_period,\n parameters=self.parameters,\n name=self.name,\n subscribers=self.subscribers)\n", "path": "qcodes/dataset/measurements.py" } ]
[ { "content": "import json\nimport logging\nfrom time import monotonic\nfrom collections import OrderedDict\nfrom typing import (Callable, Union, Dict, Tuple, List, Sequence, cast,\n MutableMapping, MutableSequence, Optional)\nfrom inspect import signature\nfrom numbers import Number\n\nimport numpy as np\n\nimport qcodes as qc\nfrom qcodes import Station\nfrom qcodes.instrument.parameter import ArrayParameter, _BaseParameter\nfrom qcodes.dataset.experiment_container import Experiment\nfrom qcodes.dataset.param_spec import ParamSpec\nfrom qcodes.dataset.data_set import DataSet\n\nlog = logging.getLogger(__name__)\n\n\nclass ParameterTypeError(Exception):\n pass\n\n\nclass DataSaver:\n \"\"\"\n The class used byt the Runner context manager to handle the\n datasaving to the database\n \"\"\"\n\n def __init__(self, dataset: DataSet, write_period: float,\n parameters: Dict[str, ParamSpec]) -> None:\n self._dataset = dataset\n self.write_period = write_period\n self.parameters = parameters\n self._known_parameters = list(parameters.keys())\n self._results: List[dict] = [] # will be filled by addResult\n self._last_save_time = monotonic()\n self._known_dependencies: Dict[str, str] = {}\n for param, parspec in parameters.items():\n if parspec.depends_on != '':\n self._known_dependencies.update({str(param):\n parspec.depends_on.split(', ')})\n\n def add_result(self,\n *res_tuple: Tuple[Union[_BaseParameter, str],\n Union[str, int, float, np.ndarray]])-> None:\n \"\"\"\n Add a result to the measurement results. Represents a measurement\n point in the space of measurement parameters, e.g. in an experiment\n varying two voltages and measuring two currents, a measurement point\n is the four dimensional (v1, v2, c1, c2). The corresponding call\n to this function would be (e.g.)\n >> datasaver.add_result((v1, 0.1), (v2, 0.2), (c1, 5), (c2, -2.1))\n\n For better performance, this function does not immediately write to\n the database, but keeps the results in memory. Writing happens every\n `write_period` seconds and during the __exit__ method if this class.\n\n Regarding arrays: since arrays as binary blobs are (almost) worthless\n in a relational database, this function \"unravels\" arrays passed to it.\n That, in turn, forces us to impose rules on what can be saved in one\n go. Any number of scalars and any number of arrays OF THE SAME LENGTH\n can be passed to add_result. The scalars are duplicated to match the\n arrays.\n\n Args:\n res: a dictionary with keys that are parameter names and items\n that are the corresponding values at this measurement point.\n\n Raises:\n ValueError: if a parameter name not registered in the parent\n Measurement object is encountered.\n ParameterTypeError: if a parameter is given a value not matching\n its type.\n \"\"\"\n res = list(res_tuple) # ArrayParameters cause us to mutate the results\n\n # we iterate through the input twice in order to allow users to call\n # add_result with the arguments in any particular order, i.e. NOT\n # enforcing that setpoints come before dependent variables.\n # Also, we pre-check that array dimensions are compatible before\n # proceeding.\n input_size = 1\n params = []\n for partial_result in res:\n parameter = partial_result[0]\n paramstr = str(partial_result[0])\n value = partial_result[1]\n params.append(paramstr)\n if paramstr not in self._known_parameters:\n raise ValueError(f'Can not add a result for {paramstr}, no '\n 'such parameter registered in this '\n 'measurement.')\n if isinstance(value, np.ndarray):\n value = cast(np.ndarray, partial_result[1])\n array_size = len(value)\n if input_size > 1 and input_size != array_size:\n raise ValueError('Incompatible array dimensions. Trying to'\n f' add arrays of dimension {input_size} '\n f'and {array_size}')\n else:\n input_size = array_size\n # TODO (WilliamHPNielsen): The following code block is ugly and\n # brittle and should be enough to convince us to abandon the\n # design of ArrayParameters (possibly) containing (some of) their\n # setpoints\n if isinstance(parameter, ArrayParameter):\n sps = parameter.setpoints[0]\n inst_name = getattr(parameter._instrument, 'name', '')\n if inst_name:\n spname = f'{inst_name}_{parameter.setpoint_names[0]}'\n else:\n spname = parameter.setpoint_names[0]\n\n if f'{paramstr}_setpoint' in self.parameters.keys():\n res.append((f'{paramstr}_setpoint', sps))\n elif spname in self.parameters.keys():\n res.append((spname, sps))\n else:\n raise RuntimeError('No setpoints registered for '\n f'ArrayParameter {paramstr}!')\n\n # Now check for missing setpoints\n for partial_result in res:\n param = str(partial_result[0])\n value = partial_result[1]\n if param in self._known_dependencies.keys():\n stuffweneed = set(self._known_dependencies[param])\n stuffwehave = set(params)\n if not stuffweneed.issubset(stuffwehave):\n raise ValueError('Can not add this result; missing '\n f'setpoint values for {param}:'\n f' {stuffweneed}.'\n f' Values only given for {params}.')\n\n for index in range(input_size):\n res_dict = {}\n for partial_result in res:\n param = str(partial_result[0])\n value = partial_result[1]\n\n # For compatibility with the old Loop, setpoints are\n # tuples of numbers (usually tuple(np.linspace(...))\n if hasattr(value, '__len__') and not(isinstance(value, str)):\n res_dict.update({param: value[index]})\n else:\n res_dict.update({param: value})\n\n self._results.append(res_dict)\n\n if monotonic() - self._last_save_time > self.write_period:\n self.flush_data_to_database()\n self._last_save_time = monotonic()\n\n def flush_data_to_database(self):\n \"\"\"\n Write the in-memory results to the database.\n \"\"\"\n log.debug('Flushing to database')\n if self._results != []:\n try:\n write_point = self._dataset.add_results(self._results)\n log.debug(f'Successfully wrote from index {write_point}')\n self._results = []\n except Exception as e:\n log.warning(f'Could not commit to database; {e}')\n else:\n log.debug('No results to flush')\n\n @property\n def run_id(self):\n return self._dataset.run_id\n\n @property\n def points_written(self):\n return self._dataset.number_of_results\n\n @property\n def dataset(self):\n return self._dataset\n\n\nclass Runner:\n \"\"\"\n Context manager for the measurement.\n Lives inside a Measurement and should never be instantiated\n outside a Measurement.\n\n This context manager handles all the dirty business of writing data\n to the database. Additionally, it may perform experiment bootstrapping\n and clean-up after the measurement.\n \"\"\"\n def __init__(\n self, enteractions: List, exitactions: List,\n experiment: Experiment=None, station: Station=None,\n write_period: float=None,\n parameters: Dict[str, ParamSpec]=None,\n name: str='',\n subscribers: List=[]) -> None:\n\n self.enteractions = enteractions\n self.exitactions = exitactions\n self.subscribers = subscribers\n self.experiment = experiment\n self.station = station\n self.parameters = parameters\n # here we use 5 s as a sane default, but that value should perhaps\n # be read from some config file\n self.write_period = write_period if write_period is not None else 5\n self.name = name if name else 'results'\n\n def __enter__(self) -> DataSaver:\n # TODO: should user actions really precede the dataset?\n # first do whatever bootstrapping the user specified\n for func, args in self.enteractions:\n func(*args)\n\n # next set up the \"datasaver\"\n if self.experiment:\n eid = self.experiment.exp_id\n else:\n eid = None\n\n self.ds = qc.new_data_set(self.name, eid)\n\n # .. and give the dataset a snapshot as metadata\n if self.station is None:\n station = qc.Station.default\n else:\n station = self.station\n\n if station:\n self.ds.add_metadata('snapshot',\n json.dumps({'station': station.snapshot()}))\n\n for paramspec in self.parameters.values():\n self.ds.add_parameter(paramspec)\n\n # register all subscribers\n for (callble, state) in self.subscribers:\n # We register with minimal waiting time.\n # That should make all subscribers be called when data is flushed\n # to the database\n log.debug(f'Subscribing callable {callble} with state {state}')\n self.ds.subscribe(callble, min_wait=0, min_count=1, state=state)\n\n print(f'Starting experimental run with id: {self.ds.run_id}')\n\n self.datasaver = DataSaver(dataset=self.ds,\n write_period=self.write_period,\n parameters=self.parameters)\n\n return self.datasaver\n\n def __exit__(self, exception_type, exception_value, traceback) -> None:\n\n self.datasaver.flush_data_to_database()\n\n # perform the \"teardown\" events\n for func, args in self.exitactions:\n func(*args)\n\n self.ds.unsubscribe_all()\n\n # and finally mark the dataset as closed, thus\n # finishing the measurement\n self.ds.mark_complete()\n\n\nclass Measurement:\n \"\"\"\n Measurement procedure container\n\n Attributes:\n name (str): The name of this measurement/run. Is used by the dataset\n to give a name to the results_table.\n \"\"\"\n def __init__(self, exp: Optional[Experiment]=None,\n station: Optional[qc.Station]=None) -> None:\n \"\"\"\n Init\n\n Args:\n exp: Specify the experiment to use. If not given\n the default one is used.\n station: The QCoDeS station to snapshot. If not given, the\n default one is used.\n \"\"\"\n self.exp = exp\n self.exitactions: List[Tuple[Callable, Sequence]] = []\n self.enteractions: List[Tuple[Callable, Sequence]] = []\n self.subscribers: List[Tuple[Callable, Union[MutableSequence,\n MutableMapping]]] = []\n self.experiment = exp\n self.station = station\n self.parameters: Dict[str, ParamSpec] = OrderedDict()\n self._write_period: Optional[Number] = None\n self.name = ''\n\n @property\n def write_period(self):\n return self._write_period\n\n @write_period.setter\n def write_period(self, wp: Number) -> None:\n if not isinstance(wp, Number):\n raise ValueError('The write period must be a number (of seconds).')\n wp_float = cast(float, wp)\n if wp_float < 1e-3:\n raise ValueError('The write period must be at least 1 ms.')\n self._write_period = wp\n\n def _registration_validation(\n self, name: str, setpoints: Sequence[str]=None,\n basis: Sequence[str]=None) -> Tuple[List[str], List[str]]:\n \"\"\"\n Helper function to do all the validation in terms of dependencies\n when adding parameters, e.g. that no setpoints have setpoints etc.\n\n Called by register_parameter and register_custom_parameter\n\n Args:\n name: Name of the parameter to register\n setpoints: name(s) of the setpoint parameter(s)\n basis: name(s) of the parameter(s) that this parameter is\n inferred from\n \"\"\"\n\n # now handle setpoints\n depends_on = []\n if setpoints:\n for sp in setpoints:\n if sp not in list(self.parameters.keys()):\n raise ValueError(f'Unknown setpoint: {sp}.'\n ' Please register that parameter first.')\n elif sp == name:\n raise ValueError('A parameter can not have itself as '\n 'setpoint.')\n elif self.parameters[sp].depends_on != '':\n raise ValueError(\"A parameter's setpoints can not have \"\n f\"setpoints themselves. {sp} depends on\"\n f\" {self.parameters[sp].depends_on}\")\n else:\n depends_on.append(sp)\n\n # now handle inferred parameters\n inf_from = []\n if basis:\n for inff in basis:\n if inff not in list(self.parameters.keys()):\n raise ValueError(f'Unknown basis parameter: {inff}.'\n ' Please register that parameter first.')\n elif inff == name:\n raise ValueError('A parameter can not be inferred from'\n 'itself.')\n else:\n inf_from.append(inff)\n\n return (depends_on, inf_from)\n\n def register_parameter(\n self, parameter: _BaseParameter,\n setpoints: Tuple[_BaseParameter]=None,\n basis: Tuple[_BaseParameter]=None) -> None:\n \"\"\"\n Add QCoDeS Parameter to the dataset produced by running this\n measurement.\n\n TODO: Does not handle metadata yet\n\n Args:\n parameter: The parameter to add\n setpoints: The setpoints for this parameter. If this parameter\n is a setpoint, it should be left blank\n basis: The parameters that this parameter is inferred from. If\n this parameter is not inferred from any other parameters,\n this should be left blank.\n \"\"\"\n # input validation\n if not isinstance(parameter, _BaseParameter):\n raise ValueError('Can not register object of type {}. Can only '\n 'register a QCoDeS Parameter.'\n ''.format(type(parameter)))\n # perhaps users will want a different name? But the name must be unique\n # on a per-run basis\n # we also use the name below, but perhaps is is better to have\n # a more robust Parameter2String function?\n name = str(parameter)\n\n if isinstance(parameter, ArrayParameter):\n if parameter.setpoint_names:\n spname = (f'{parameter._instrument.name}_'\n f'{parameter.setpoint_names[0]}')\n else:\n spname = f'{name}_setpoint'\n if parameter.setpoint_labels:\n splabel = parameter.setpoint_labels[0]\n else:\n splabel = ''\n if parameter.setpoint_units:\n spunit = parameter.setpoint_units[0]\n else:\n spunit = ''\n\n sp = ParamSpec(name=spname, paramtype='numeric',\n label=splabel, unit=spunit)\n\n self.parameters[spname] = sp\n setpoints = setpoints if setpoints else ()\n setpoints += (spname,)\n\n # We currently treat ALL parameters as 'numeric' and fail to add them\n # to the dataset if they can not be unraveled to fit that description\n # (except strings, we just let those through)\n # this is indeed a limitation, but a sane one. We might loosen that\n # requirement later and start saving binary blobs with the datasaver,\n # but for now binary blob saving is referred to using the DataSet\n # API directly\n paramtype = 'numeric'\n label = parameter.label\n unit = parameter.unit\n\n if setpoints:\n sp_strings = [str(sp) for sp in setpoints]\n else:\n sp_strings = []\n if basis:\n bs_strings = [str(bs) for bs in basis]\n else:\n bs_strings = []\n\n # validate all dependencies\n depends_on, inf_from = self._registration_validation(name, sp_strings,\n bs_strings)\n\n paramspec = ParamSpec(name=name,\n paramtype=paramtype,\n label=label,\n unit=unit,\n inferred_from=inf_from,\n depends_on=depends_on)\n\n # ensure the correct order\n if name in self.parameters.keys():\n self.parameters.pop(name)\n\n self.parameters[name] = paramspec\n log.info(f'Registered {name} in the Measurement.')\n\n def register_custom_parameter(\n self, name: str,\n label: str=None, unit: str=None,\n basis: Sequence[Union[str, _BaseParameter]]=None,\n setpoints: Sequence[Union[str, _BaseParameter]]=None) -> None:\n \"\"\"\n Register a custom parameter with this measurement\n\n Args:\n name: The name that this parameter will have in the dataset. Must\n be unique (will overwrite an existing parameter with the same\n name!)\n label: The label\n unit: The unit\n basis: A list of either QCoDeS Parameters or the names\n of parameters already registered in the measurement that\n this parameter is inferred from\n setpoints: A list of either QCoDeS Parameters or the names of\n of parameters already registered in the measurement that\n are the setpoints of this parameter\n \"\"\"\n\n # validate dependencies\n if setpoints:\n sp_strings = [str(sp) for sp in setpoints]\n else:\n sp_strings = []\n if basis:\n bs_strings = [str(bs) for bs in basis]\n else:\n bs_strings = []\n\n # validate all dependencies\n depends_on, inf_from = self._registration_validation(name, sp_strings,\n bs_strings)\n\n parspec = ParamSpec(name=name, paramtype='numeric',\n label=label, unit=unit,\n inferred_from=inf_from,\n depends_on=depends_on)\n\n # ensure the correct order\n if name in self.parameters.keys():\n self.parameters.pop(name)\n\n self.parameters[name] = parspec\n\n def unregister_parameter(self,\n parameter: Union[_BaseParameter, str]) -> None:\n \"\"\"\n Remove a custom/QCoDeS parameter from the dataset produced by\n running this measurement\n \"\"\"\n if isinstance(parameter, _BaseParameter):\n param = str(parameter)\n elif isinstance(parameter, str):\n param = parameter\n else:\n raise ValueError('Wrong input type. Must be a QCoDeS parameter or'\n ' the name (a string) of a parameter.')\n\n if param not in self.parameters:\n log.info(f'Tried to unregister {param}, but it was not'\n 'registered.')\n return\n\n for name, paramspec in self.parameters.items():\n if param in paramspec.depends_on:\n raise ValueError(f'Can not unregister {param}, it is a '\n f'setpoint for {name}')\n if param in paramspec.inferred_from:\n raise ValueError(f'Can not unregister {param}, it is a '\n f'basis for {name}')\n\n self.parameters.pop(param)\n log.info(f'Removed {param} from Measurement.')\n\n def add_before_run(self, func: Callable, args: tuple) -> None:\n \"\"\"\n Add an action to be performed before the measurement.\n\n Args:\n func: Function to be performed\n args: The arguments to said function\n \"\"\"\n # some tentative cheap checking\n nargs = len(signature(func).parameters)\n if len(args) != nargs:\n raise ValueError('Mismatch between function call signature and '\n 'the provided arguments.')\n\n self.enteractions.append((func, args))\n\n def add_after_run(self, func: Callable, args: tuple) -> None:\n \"\"\"\n Add an action to be performed after the measurement.\n\n Args:\n func: Function to be performed\n args: The arguments to said function\n \"\"\"\n # some tentative cheap checking\n nargs = len(signature(func).parameters)\n if len(args) != nargs:\n raise ValueError('Mismatch between function call signature and '\n 'the provided arguments.')\n\n self.exitactions.append((func, args))\n\n def add_subscriber(self,\n func: Callable,\n state: Union[MutableSequence, MutableMapping]) -> None:\n \"\"\"\n Add a subscriber to the dataset of the measurement.\n\n Args:\n name: The name of the subscriber.\n func: A function taking three positional arguments: a list of\n tuples of parameter values, an integer, a mutable variable\n (list or dict) to hold state/writes updates to.\n state: The variable to hold the state.\n \"\"\"\n # TODO: Should we protect users from registering two subscribers\n # with the same state?\n self.subscribers.append((func, state))\n\n def run(self):\n \"\"\"\n Returns the context manager for the experimental run\n \"\"\"\n return Runner(self.enteractions, self.exitactions,\n self.experiment, station=self.station,\n write_period=self._write_period,\n parameters=self.parameters,\n name=self.name,\n subscribers=self.subscribers)\n", "path": "qcodes/dataset/measurements.py" } ]
diff --git a/qcodes/dataset/measurements.py b/qcodes/dataset/measurements.py index e44b2a7b19a..f475a461d79 100644 --- a/qcodes/dataset/measurements.py +++ b/qcodes/dataset/measurements.py @@ -219,7 +219,7 @@ def __enter__(self) -> DataSaver: # next set up the "datasaver" if self.experiment: - eid = self.experiment.id + eid = self.experiment.exp_id else: eid = None diff --git a/qcodes/tests/dataset/test_experiment_container.py b/qcodes/tests/dataset/test_experiment_container.py new file mode 100644 index 00000000000..b096b7f1175 --- /dev/null +++ b/qcodes/tests/dataset/test_experiment_container.py @@ -0,0 +1,41 @@ +import pytest +import tempfile +import os + +import qcodes as qc +from qcodes.dataset.experiment_container import load_experiment_by_name, \ + new_experiment +from qcodes.dataset.sqlite_base import connect, init_db +from qcodes.dataset.measurements import Measurement + + [email protected](scope="function") +def empty_temp_db(): + # create a temp database for testing + with tempfile.TemporaryDirectory() as tmpdirname: + qc.config["core"]["db_location"] = os.path.join(tmpdirname, 'temp.db') + qc.config["core"]["db_debug"] = True + # this is somewhat annoying but these module scope variables + # are initialized at import time so they need to be overwritten + qc.dataset.experiment_container.DB = qc.config["core"]["db_location"] + qc.dataset.data_set.DB = qc.config["core"]["db_location"] + qc.dataset.experiment_container.debug_db = qc.config["core"]["db_debug"] + _c = connect(qc.config["core"]["db_location"], qc.config["core"]["db_debug"]) + init_db(_c) + _c.close() + yield + + +def test_run_loaded_experiment(empty_temp_db): + """ + Test that we can resume a measurement after loading by name + """ + new_experiment("test", "test1") + exp_loaded = load_experiment_by_name("test", "test1") + + meas = Measurement(exp=exp_loaded) + with meas.run(): + pass + + with meas.run(): + pass
opendatacube__datacube-core-348
Unnecessary dependency on `pathlib` when running in python3 ### Expected behaviour Datacube shouldn't depend on unnecessary packages when running in Python 3. ### Actual behaviour There's a dependency on `pathlib`, which is included in the Python 3 standard library, and so doesn't need to be installed. This causes trouble on the NCI deployment when trying to load `stats` modules which use the `setuptools` entry_points for their registration. And returns error messages to users trying to load them. ### Steps to reproduce the behaviour ``` module load agdc-py3-prod agdc_statistics dra547@raijin4:~ $ python Python 3.6.3 | packaged by conda-forge | (default, Nov 4 2017, 10:10:56) [GCC 4.8.2 20140120 (Red Hat 4.8.2-15)] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import datacube_stats.statistics Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/g/data/v10/public/modules/agdc_statistics/0.9a7/lib/python3.6/site-packages/datacube_stats/statistics.py", line 769, in <module> STATS[entry_point.name] = entry_point.load() File "/g/data/v10/public/modules/agdc-py3-env/20171214/envs/agdc/lib/python3.6/site-packages/pkg_resources/__init__.py", line 2404, in load self.require(*args, **kwargs) File "/g/data/v10/public/modules/agdc-py3-env/20171214/envs/agdc/lib/python3.6/site-packages/pkg_resources/__init__.py", line 2427, in require items = working_set.resolve(reqs, env, installer, extras=self.extras) File "/g/data/v10/public/modules/agdc-py3-env/20171214/envs/agdc/lib/python3.6/site-packages/pkg_resources/__init__.py", line 870, in resolve raise DistributionNotFound(req, requirers) pkg_resources.DistributionNotFound: The 'pathlib' distribution was not found and is required by datacube >>> ``` ### The Fix Modify `setup.py` to use [platform specific dependencies](https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies) to only require `pathlib` when not running on python 3.
[ { "content": "#!/usr/bin/env python\n\nimport versioneer\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest', 'pytest-cov', 'mock', 'pep8', 'pylint', 'hypothesis', 'compliance-checker', 'objgraph'\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'interactive': ['matplotlib', 'fiona'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'analytics': ['scipy', 'pyparsing', 'numexpr'],\n 'doc': ['Sphinx', 'setuptools'],\n 'replicas': ['paramiko', 'sshtunnel', 'tqdm'],\n 'celery': ['celery>=4', 'redis'],\n 's3': ['boto3==1.4.3', 'SharedArray', 'pathos', 'zstandard'],\n 'test': tests_require,\n}\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='datacube',\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n\n url='https://github.com/opendatacube/datacube-core',\n author='AGDC Collaboration',\n maintainer='AGDC Collaboration',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n },\n scripts=[\n 'datacube_apps/scripts/pbs_helpers.sh'\n ],\n setup_requires=[\n 'pytest-runner'\n ],\n install_requires=[\n 'affine',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'gdal>=1.9',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'pathlib',\n 'psycopg2',\n 'pypeg2',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=0.9', # required for zip reading, 0.9 gets around 1.0a ordering problems\n 'singledispatch',\n 'sqlalchemy',\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-stacker = datacube_apps.stacker:main',\n 'datacube-worker = datacube.execution.worker:main',\n 'datacube-fixer = datacube_apps.stacker:fixer_main',\n 'datacube-ncml = datacube_apps.ncml:ncml_app',\n 'pixeldrill = datacube_apps.pixeldrill:main [interactive]',\n 'movie_generator = datacube_apps.movie_generator:main',\n 'datacube-simple-replica = datacube_apps.simple_replica:replicate'\n ]\n },\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n\nimport versioneer\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest', 'pytest-cov', 'mock', 'pep8', 'pylint', 'hypothesis', 'compliance-checker', 'objgraph'\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'interactive': ['matplotlib', 'fiona'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'analytics': ['scipy', 'pyparsing', 'numexpr'],\n 'doc': ['Sphinx', 'setuptools'],\n 'replicas': ['paramiko', 'sshtunnel', 'tqdm'],\n 'celery': ['celery>=4', 'redis'],\n 's3': ['boto3==1.4.3', 'SharedArray', 'pathos', 'zstandard'],\n 'test': tests_require,\n}\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='datacube',\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n\n url='https://github.com/opendatacube/datacube-core',\n author='AGDC Collaboration',\n maintainer='AGDC Collaboration',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n },\n scripts=[\n 'datacube_apps/scripts/pbs_helpers.sh'\n ],\n setup_requires=[\n 'pytest-runner'\n ],\n install_requires=[\n 'affine',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'gdal>=1.9',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'pathlib;python_version<\"3\"',\n 'psycopg2',\n 'pypeg2',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=0.9', # required for zip reading, 0.9 gets around 1.0a ordering problems\n 'singledispatch',\n 'sqlalchemy',\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-stacker = datacube_apps.stacker:main',\n 'datacube-worker = datacube.execution.worker:main',\n 'datacube-fixer = datacube_apps.stacker:fixer_main',\n 'datacube-ncml = datacube_apps.ncml:ncml_app',\n 'pixeldrill = datacube_apps.pixeldrill:main [interactive]',\n 'movie_generator = datacube_apps.movie_generator:main',\n 'datacube-simple-replica = datacube_apps.simple_replica:replicate'\n ]\n },\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index ea8870e48b..52cea8ff41 100755 --- a/setup.py +++ b/setup.py @@ -76,7 +76,7 @@ 'jsonschema', 'netcdf4', 'numpy', - 'pathlib', + 'pathlib;python_version<"3"', 'psycopg2', 'pypeg2', 'python-dateutil',
django__channels-1614
asgiref dependency should be updated; channels 3.0.3 requires min. 3.2.10, which doesn't work for background workers Channels 3.0.3 depends on `asgiref>=3.2.10`, however with that version, background workers will fail with `TypeError: __call__() missing 2 required positional arguments: 'receive' and 'send'` when receiving a message, even if declared with `.as_asgi()`.
[ { "content": "from setuptools import find_packages, setup\nfrom channels import __version__\n\nsetup(\n name='channels',\n version=__version__,\n url='http://github.com/django/channels',\n author='Django Software Foundation',\n author_email='[email protected]',\n description=\"Brings async, event-driven capabilities to Django. Django 2.2 and up only.\",\n license='BSD',\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n python_requires='>=3.6',\n install_requires=[\n 'Django>=2.2',\n 'asgiref>=3.2.10,<4',\n 'daphne>=3.0,<4',\n ],\n extras_require={\n 'tests': [\n \"pytest\",\n \"pytest-django\",\n \"pytest-asyncio\",\n \"async_generator\",\n \"async-timeout\",\n \"coverage~=4.5\",\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Framework :: Django',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "from setuptools import find_packages, setup\nfrom channels import __version__\n\nsetup(\n name='channels',\n version=__version__,\n url='http://github.com/django/channels',\n author='Django Software Foundation',\n author_email='[email protected]',\n description=\"Brings async, event-driven capabilities to Django. Django 2.2 and up only.\",\n license='BSD',\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n python_requires='>=3.6',\n install_requires=[\n 'Django>=2.2',\n 'asgiref>=3.3.1,<4',\n 'daphne>=3.0,<4',\n ],\n extras_require={\n 'tests': [\n \"pytest\",\n \"pytest-django\",\n \"pytest-asyncio\",\n \"async_generator\",\n \"async-timeout\",\n \"coverage~=4.5\",\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Framework :: Django',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index c9bde0c7c..89f221fd2 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ python_requires='>=3.6', install_requires=[ 'Django>=2.2', - 'asgiref>=3.2.10,<4', + 'asgiref>=3.3.1,<4', 'daphne>=3.0,<4', ], extras_require={
angr__angr-1303
Cachetools broke their API There's a new major version of cachetools (providing LRUCache), 3.0.0. This has caused everything to break. I have pinned our version to `cachetools<3` for the time being, but we should migrate. My guess is that this is because we were using the `missing` argument to LRUCache (in claripy, specifically), and I am fairly sure the intended replacement is to [implement the `__missing__` method](https://cachetools.readthedocs.io/en/latest/#extending-cache-classes). Unsure if there are more implications, which is why this issue is open under angr instead of claripy.
[ { "content": "# pylint: disable=no-name-in-module,import-error,unused-variable\nimport os\nimport sys\nimport subprocess\nimport pkg_resources\nimport shutil\nimport platform\n\nif bytes is str:\n raise Exception(\"\"\"\n\n=-=-=-=-=-=-=-=-=-=-=-=-= WELCOME TO THE FUTURE! =-=-=-=-=-=-=-=-=-=-=-=-=-=\n\nangr has transitioned to python 3. Due to the small size of the team behind it,\nwe can't reasonably maintain compatibility between both python 2 and python 3.\nIf you want to continue using the most recent version of angr (you definitely\nwant that, trust us) you should upgrade to python 3. It's like getting your\nvaccinations. It hurts a little bit initially but in the end it's worth it.\n\nIf you are staying on python 2 and would like to make sure you don't get\nincompatible versions, make sure your pip is at least version 9.0, and it will\nuse our metadata to implicitly avoid them.\n\nFor more information, see here: https://docs.angr.io/MIGRATION.html\n\nGood luck!\n\"\"\")\n\ntry:\n from setuptools import setup\n from setuptools import find_packages\n packages = find_packages()\nexcept ImportError:\n from distutils.core import setup\n packages = [x.strip('./').replace('/','.') for x in os.popen('find -name \"__init__.py\" | xargs -n1 dirname').read().strip().split('\\n')]\n\nfrom distutils.util import get_platform\nfrom distutils.errors import LibError\nfrom distutils.command.build import build as _build\n\nif sys.platform == 'darwin':\n library_file = \"angr_native.dylib\"\nelif sys.platform in ('win32', 'cygwin'):\n library_file = \"angr_native.dll\"\nelse:\n library_file = \"angr_native.so\"\n\ndef _build_native():\n try:\n import unicorn\n import pyvex\n except ImportError:\n raise LibError(\"You must install unicorn and pyvex before building angr\")\n\n env = os.environ.copy()\n env_data = (('UNICORN_INCLUDE_PATH', 'unicorn', 'include'),\n ('UNICORN_LIB_PATH', 'unicorn', 'lib'),\n ('UNICORN_LIB_FILE', 'unicorn', 'lib\\\\unicorn.lib'),\n ('PYVEX_INCLUDE_PATH', 'pyvex', 'include'),\n ('PYVEX_LIB_PATH', 'pyvex', 'lib'),\n ('PYVEX_LIB_FILE', 'pyvex', 'lib\\\\pyvex.lib'))\n for var, pkg, fnm in env_data:\n try:\n env[var] = pkg_resources.resource_filename(pkg, fnm)\n except KeyError:\n pass\n\n cmd1 = ['nmake', '/f', 'Makefile-win']\n cmd2 = ['make']\n for cmd in (cmd1, cmd2):\n try:\n if subprocess.call(cmd, cwd='native', env=env) != 0:\n raise LibError('Unable to build angr_native')\n break\n except OSError:\n continue\n else:\n raise LibError('Unable to build angr_native')\n\n shutil.rmtree('angr/lib', ignore_errors=True)\n os.mkdir('angr/lib')\n shutil.copy(os.path.join('native', library_file), 'angr/lib')\n\nclass build(_build):\n def run(self, *args):\n self.execute(_build_native, (), msg='Building angr_native')\n _build.run(self, *args)\n\ncmdclass = {\n 'build': build,\n}\n\ntry:\n from setuptools.command.develop import develop as _develop\n class develop(_develop):\n def run(self, *args):\n self.execute(_build_native, (), msg='Building angr_native')\n _develop.run(self, *args)\n\n cmdclass['develop'] = develop\nexcept ImportError:\n pass\n\nif 'bdist_wheel' in sys.argv and '--plat-name' not in sys.argv:\n sys.argv.append('--plat-name')\n name = get_platform()\n if 'linux' in name:\n # linux_* platform tags are disallowed because the python ecosystem is fubar\n # linux builds should be built in the centos 5 vm for maximum compatibility\n sys.argv.append('manylinux1_' + platform.machine())\n else:\n # https://www.python.org/dev/peps/pep-0425/\n sys.argv.append(name.replace('.', '_').replace('-', '_'))\n\nsetup(\n name='angr',\n version='8.18.10.25',\n python_requires='>=3.5',\n description='A multi-architecture binary analysis toolkit, with the ability to perform dynamic symbolic execution and various static analyses on binaries',\n url='https://github.com/angr/angr',\n packages=packages,\n install_requires=[\n 'ana',\n 'sortedcontainers',\n 'cachetools<3',\n 'capstone>=3.0.5rc2',\n 'cooldict',\n 'dpkt',\n 'futures; python_version == \"2.7\"',\n 'mulpyplexer',\n 'networkx>=2.0',\n 'progressbar',\n 'rpyc',\n 'cffi>=1.7.0',\n 'unicorn',\n 'archinfo==8.18.10.25',\n 'claripy==8.18.10.25',\n 'cle==8.18.10.25',\n 'pyvex==8.18.10.25',\n 'ailment==8.18.10.25',\n 'GitPython',\n 'pycparser>=2.18',\n 'itanium_demangler',\n ],\n setup_requires=['unicorn', 'pyvex'],\n cmdclass=cmdclass,\n include_package_data=True,\n package_data={\n 'angr': ['lib/*']\n }\n)\n", "path": "setup.py" } ]
[ { "content": "# pylint: disable=no-name-in-module,import-error,unused-variable\nimport os\nimport sys\nimport subprocess\nimport pkg_resources\nimport shutil\nimport platform\n\nif bytes is str:\n raise Exception(\"\"\"\n\n=-=-=-=-=-=-=-=-=-=-=-=-= WELCOME TO THE FUTURE! =-=-=-=-=-=-=-=-=-=-=-=-=-=\n\nangr has transitioned to python 3. Due to the small size of the team behind it,\nwe can't reasonably maintain compatibility between both python 2 and python 3.\nIf you want to continue using the most recent version of angr (you definitely\nwant that, trust us) you should upgrade to python 3. It's like getting your\nvaccinations. It hurts a little bit initially but in the end it's worth it.\n\nIf you are staying on python 2 and would like to make sure you don't get\nincompatible versions, make sure your pip is at least version 9.0, and it will\nuse our metadata to implicitly avoid them.\n\nFor more information, see here: https://docs.angr.io/MIGRATION.html\n\nGood luck!\n\"\"\")\n\ntry:\n from setuptools import setup\n from setuptools import find_packages\n packages = find_packages()\nexcept ImportError:\n from distutils.core import setup\n packages = [x.strip('./').replace('/','.') for x in os.popen('find -name \"__init__.py\" | xargs -n1 dirname').read().strip().split('\\n')]\n\nfrom distutils.util import get_platform\nfrom distutils.errors import LibError\nfrom distutils.command.build import build as _build\n\nif sys.platform == 'darwin':\n library_file = \"angr_native.dylib\"\nelif sys.platform in ('win32', 'cygwin'):\n library_file = \"angr_native.dll\"\nelse:\n library_file = \"angr_native.so\"\n\ndef _build_native():\n try:\n import unicorn\n import pyvex\n except ImportError:\n raise LibError(\"You must install unicorn and pyvex before building angr\")\n\n env = os.environ.copy()\n env_data = (('UNICORN_INCLUDE_PATH', 'unicorn', 'include'),\n ('UNICORN_LIB_PATH', 'unicorn', 'lib'),\n ('UNICORN_LIB_FILE', 'unicorn', 'lib\\\\unicorn.lib'),\n ('PYVEX_INCLUDE_PATH', 'pyvex', 'include'),\n ('PYVEX_LIB_PATH', 'pyvex', 'lib'),\n ('PYVEX_LIB_FILE', 'pyvex', 'lib\\\\pyvex.lib'))\n for var, pkg, fnm in env_data:\n try:\n env[var] = pkg_resources.resource_filename(pkg, fnm)\n except KeyError:\n pass\n\n cmd1 = ['nmake', '/f', 'Makefile-win']\n cmd2 = ['make']\n for cmd in (cmd1, cmd2):\n try:\n if subprocess.call(cmd, cwd='native', env=env) != 0:\n raise LibError('Unable to build angr_native')\n break\n except OSError:\n continue\n else:\n raise LibError('Unable to build angr_native')\n\n shutil.rmtree('angr/lib', ignore_errors=True)\n os.mkdir('angr/lib')\n shutil.copy(os.path.join('native', library_file), 'angr/lib')\n\nclass build(_build):\n def run(self, *args):\n self.execute(_build_native, (), msg='Building angr_native')\n _build.run(self, *args)\n\ncmdclass = {\n 'build': build,\n}\n\ntry:\n from setuptools.command.develop import develop as _develop\n class develop(_develop):\n def run(self, *args):\n self.execute(_build_native, (), msg='Building angr_native')\n _develop.run(self, *args)\n\n cmdclass['develop'] = develop\nexcept ImportError:\n pass\n\nif 'bdist_wheel' in sys.argv and '--plat-name' not in sys.argv:\n sys.argv.append('--plat-name')\n name = get_platform()\n if 'linux' in name:\n # linux_* platform tags are disallowed because the python ecosystem is fubar\n # linux builds should be built in the centos 5 vm for maximum compatibility\n sys.argv.append('manylinux1_' + platform.machine())\n else:\n # https://www.python.org/dev/peps/pep-0425/\n sys.argv.append(name.replace('.', '_').replace('-', '_'))\n\nsetup(\n name='angr',\n version='8.18.10.25',\n python_requires='>=3.5',\n description='A multi-architecture binary analysis toolkit, with the ability to perform dynamic symbolic execution and various static analyses on binaries',\n url='https://github.com/angr/angr',\n packages=packages,\n install_requires=[\n 'ana',\n 'sortedcontainers',\n 'cachetools',\n 'capstone>=3.0.5rc2',\n 'cooldict',\n 'dpkt',\n 'futures; python_version == \"2.7\"',\n 'mulpyplexer',\n 'networkx>=2.0',\n 'progressbar',\n 'rpyc',\n 'cffi>=1.7.0',\n 'unicorn',\n 'archinfo==8.18.10.25',\n 'claripy==8.18.10.25',\n 'cle==8.18.10.25',\n 'pyvex==8.18.10.25',\n 'ailment==8.18.10.25',\n 'GitPython',\n 'pycparser>=2.18',\n 'itanium_demangler',\n ],\n setup_requires=['unicorn', 'pyvex'],\n cmdclass=cmdclass,\n include_package_data=True,\n package_data={\n 'angr': ['lib/*']\n }\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index cc8a67d3758..1e2e8a1bba6 100644 --- a/setup.py +++ b/setup.py @@ -122,7 +122,7 @@ def run(self, *args): install_requires=[ 'ana', 'sortedcontainers', - 'cachetools<3', + 'cachetools', 'capstone>=3.0.5rc2', 'cooldict', 'dpkt',
Parsl__parsl-1046
ugly status message in local provider This log message should probably unwrap the dict_values and [list] - in commit b9ecc1342e1b6ce795d942c4b9df4c841f00193d ``` 2019-06-11 08:40:29.773 parsl.providers.local.local:92 [DEBUG] Checking status of: dict_values([50510]) ```
[ { "content": "\"\"\"HighThroughputExecutor builds on the Swift/T EMEWS architecture to use MPI for fast task distribution\n\"\"\"\n\nfrom concurrent.futures import Future\nimport typeguard\nimport logging\nimport threading\nimport queue\nimport pickle\nfrom multiprocessing import Process, Queue\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nfrom ipyparallel.serialize import pack_apply_message # ,unpack_apply_message\nfrom ipyparallel.serialize import deserialize_object # ,serialize_object\n\nfrom parsl.app.errors import RemoteExceptionWrapper\nfrom parsl.executors.high_throughput import zmq_pipes\nfrom parsl.executors.high_throughput import interchange\nfrom parsl.executors.errors import *\nfrom parsl.executors.base import ParslExecutor\nfrom parsl.dataflow.error import ConfigurationError\nfrom parsl.providers.provider_base import ExecutionProvider\n\nfrom parsl.utils import RepresentationMixin\nfrom parsl.providers import LocalProvider\n\nlogger = logging.getLogger(__name__)\n\nBUFFER_THRESHOLD = 1024 * 1024\nITEM_THRESHOLD = 1024\n\n\nclass HighThroughputExecutor(ParslExecutor, RepresentationMixin):\n \"\"\"Executor designed for cluster-scale\n\n The HighThroughputExecutor system has the following components:\n 1. The HighThroughputExecutor instance which is run as part of the Parsl script.\n 2. The Interchange which is acts as a load-balancing proxy between workers and Parsl\n 3. The multiprocessing based worker pool which coordinates task execution over several\n cores on a node.\n 4. ZeroMQ pipes connect the HighThroughputExecutor, Interchange and the process_worker_pool\n\n Here is a diagram\n\n .. code:: python\n\n\n | Data | Executor | Interchange | External Process(es)\n | Flow | | |\n Task | Kernel | | |\n +----->|-------->|------------>|->outgoing_q---|-> process_worker_pool\n | | | | batching | | |\n Parsl<---Fut-| | | load-balancing| result exception\n ^ | | | watchdogs | | |\n | | | Q_mngmnt | | V V\n | | | Thread<--|-incoming_q<---|--- +---------+\n | | | | | |\n | | | | | |\n +----update_fut-----+\n\n\n Parameters\n ----------\n\n provider : :class:`~parsl.providers.provider_base.ExecutionProvider`\n Provider to access computation resources. Can be one of :class:`~parsl.providers.aws.aws.EC2Provider`,\n :class:`~parsl.providers.cobalt.cobalt.Cobalt`,\n :class:`~parsl.providers.condor.condor.Condor`,\n :class:`~parsl.providers.googlecloud.googlecloud.GoogleCloud`,\n :class:`~parsl.providers.gridEngine.gridEngine.GridEngine`,\n :class:`~parsl.providers.jetstream.jetstream.Jetstream`,\n :class:`~parsl.providers.local.local.Local`,\n :class:`~parsl.providers.sge.sge.GridEngine`,\n :class:`~parsl.providers.slurm.slurm.Slurm`, or\n :class:`~parsl.providers.torque.torque.Torque`.\n\n label : str\n Label for this executor instance.\n\n launch_cmd : str\n Command line string to launch the process_worker_pool from the provider. The command line string\n will be formatted with appropriate values for the following values (debug, task_url, result_url,\n cores_per_worker, nodes_per_block, heartbeat_period ,heartbeat_threshold, logdir). For eg:\n launch_cmd=\"process_worker_pool.py {debug} -c {cores_per_worker} --task_url={task_url} --result_url={result_url}\"\n\n address : string\n An address to connect to the main Parsl process which is reachable from the network in which\n workers will be running. This can be either a hostname as returned by `hostname` or an\n IP address. Most login nodes on clusters have several network interfaces available, only\n some of which can be reached from the compute nodes. Some trial and error might be\n necessary to indentify what addresses are reachable from compute nodes.\n\n worker_ports : (int, int)\n Specify the ports to be used by workers to connect to Parsl. If this option is specified,\n worker_port_range will not be honored.\n\n worker_port_range : (int, int)\n Worker ports will be chosen between the two integers provided.\n\n interchange_port_range : (int, int)\n Port range used by Parsl to communicate with the Interchange.\n\n working_dir : str\n Working dir to be used by the executor.\n\n worker_debug : Bool\n Enables worker debug logging.\n\n managed : Bool\n If this executor is managed by the DFK or externally handled.\n\n cores_per_worker : float\n cores to be assigned to each worker. Oversubscription is possible\n by setting cores_per_worker < 1.0. Default=1\n\n mem_per_worker : float\n GB of memory required per worker. If this option is specified, the node manager\n will check the available memory at startup and limit the number of workers such that\n the there's sufficient memory for each worker. Default: None\n\n max_workers : int\n Caps the number of workers launched by the manager. Default: infinity\n\n prefetch_capacity : int\n Number of tasks that could be prefetched over available worker capacity.\n When there are a few tasks (<100) or when tasks are long running, this option should\n be set to 0 for better load balancing. Default is 0.\n\n suppress_failure : Bool\n If set, the interchange will suppress failures rather than terminate early. Default: False\n\n heartbeat_threshold : int\n Seconds since the last message from the counterpart in the communication pair:\n (interchange, manager) after which the counterpart is assumed to be un-available. Default:120s\n\n heartbeat_period : int\n Number of seconds after which a heartbeat message indicating liveness is sent to the\n counterpart (interchange, manager). Default:30s\n\n poll_period : int\n Timeout period to be used by the executor components in milliseconds. Increasing poll_periods\n trades performance for cpu efficiency. Default: 10ms\n\n worker_logdir_root : string\n In case of a remote file system, specify the path to where logs will be kept.\n \"\"\"\n\n @typeguard.typechecked\n def __init__(self,\n label: str = 'HighThroughputExecutor',\n provider: ExecutionProvider = LocalProvider(),\n launch_cmd: Optional[str] = None,\n address: str = \"127.0.0.1\",\n worker_ports: Optional[Tuple[int, int]] = None,\n worker_port_range: Optional[Tuple[int, int]] = (54000, 55000),\n interchange_port_range: Optional[Tuple[int, int]] = (55000, 56000),\n storage_access: Optional[List[Any]] = None,\n working_dir: Optional[str] = None,\n worker_debug: bool = False,\n cores_per_worker: float = 1.0,\n mem_per_worker: Optional[float] = None,\n max_workers: Union[int, float] = float('inf'),\n prefetch_capacity: int = 0,\n heartbeat_threshold: int = 120,\n heartbeat_period: int = 30,\n poll_period: int = 10,\n suppress_failure: bool = False,\n managed: bool = True,\n worker_logdir_root: Optional[str] = None):\n\n logger.debug(\"Initializing HighThroughputExecutor\")\n\n self.label = label\n self.launch_cmd = launch_cmd\n self.provider = provider\n self.worker_debug = worker_debug\n self.storage_access = storage_access if storage_access is not None else []\n if len(self.storage_access) > 1:\n raise ConfigurationError('Multiple storage access schemes are not supported')\n self.working_dir = working_dir\n self.managed = managed\n self.blocks = {} # type: Dict[str, str]\n self.tasks = {} # type: Dict[str, Future]\n self.cores_per_worker = cores_per_worker\n self.mem_per_worker = mem_per_worker\n self.max_workers = max_workers\n self.prefetch_capacity = prefetch_capacity\n\n self._task_counter = 0\n self.address = address\n self.worker_ports = worker_ports\n self.worker_port_range = worker_port_range\n self.interchange_port_range = interchange_port_range\n self.heartbeat_threshold = heartbeat_threshold\n self.heartbeat_period = heartbeat_period\n self.poll_period = poll_period\n self.suppress_failure = suppress_failure\n self.run_dir = '.'\n self.worker_logdir_root = worker_logdir_root\n\n if not launch_cmd:\n self.launch_cmd = (\"process_worker_pool.py {debug} {max_workers} \"\n \"-p {prefetch_capacity} \"\n \"-c {cores_per_worker} \"\n \"-m {mem_per_worker} \"\n \"--poll {poll_period} \"\n \"--task_url={task_url} \"\n \"--result_url={result_url} \"\n \"--logdir={logdir} \"\n \"--block_id={{block_id}} \"\n \"--hb_period={heartbeat_period} \"\n \"--hb_threshold={heartbeat_threshold} \")\n\n def initialize_scaling(self):\n \"\"\" Compose the launch command and call the scale_out\n\n This should be implemented in the child classes to take care of\n executor specific oddities.\n \"\"\"\n debug_opts = \"--debug\" if self.worker_debug else \"\"\n max_workers = \"\" if self.max_workers == float('inf') else \"--max_workers={}\".format(self.max_workers)\n\n worker_logdir = \"{}/{}\".format(self.run_dir, self.label)\n if self.worker_logdir_root is not None:\n worker_logdir = \"{}/{}\".format(self.worker_logdir_root, self.label)\n\n l_cmd = self.launch_cmd.format(debug=debug_opts,\n prefetch_capacity=self.prefetch_capacity,\n task_url=self.worker_task_url,\n result_url=self.worker_result_url,\n cores_per_worker=self.cores_per_worker,\n mem_per_worker=self.mem_per_worker,\n max_workers=max_workers,\n nodes_per_block=self.provider.nodes_per_block,\n heartbeat_period=self.heartbeat_period,\n heartbeat_threshold=self.heartbeat_threshold,\n poll_period=self.poll_period,\n logdir=worker_logdir)\n self.launch_cmd = l_cmd\n logger.debug(\"Launch command: {}\".format(self.launch_cmd))\n\n self._scaling_enabled = self.provider.scaling_enabled\n logger.debug(\"Starting HighThroughputExecutor with provider:\\n%s\", self.provider)\n if hasattr(self.provider, 'init_blocks'):\n try:\n self.scale_out(blocks=self.provider.init_blocks)\n except Exception as e:\n logger.error(\"Scaling out failed: {}\".format(e))\n raise e\n\n def start(self):\n \"\"\"Create the Interchange process and connect to it.\n \"\"\"\n self.outgoing_q = zmq_pipes.TasksOutgoing(\"127.0.0.1\", self.interchange_port_range)\n self.incoming_q = zmq_pipes.ResultsIncoming(\"127.0.0.1\", self.interchange_port_range)\n self.command_client = zmq_pipes.CommandClient(\"127.0.0.1\", self.interchange_port_range)\n\n self.is_alive = True\n\n self._executor_bad_state = threading.Event()\n self._executor_exception = None\n self._queue_management_thread = None\n self._start_queue_management_thread()\n self._start_local_queue_process()\n\n logger.debug(\"Created management thread: {}\".format(self._queue_management_thread))\n\n if self.provider:\n self.initialize_scaling()\n else:\n self._scaling_enabled = False\n logger.debug(\"Starting HighThroughputExecutor with no provider\")\n\n def _queue_management_worker(self):\n \"\"\"Listen to the queue for task status messages and handle them.\n\n Depending on the message, tasks will be updated with results, exceptions,\n or updates. It expects the following messages:\n\n .. code:: python\n\n {\n \"task_id\" : <task_id>\n \"result\" : serialized result object, if task succeeded\n ... more tags could be added later\n }\n\n {\n \"task_id\" : <task_id>\n \"exception\" : serialized exception object, on failure\n }\n\n We do not support these yet, but they could be added easily.\n\n .. code:: python\n\n {\n \"task_id\" : <task_id>\n \"cpu_stat\" : <>\n \"mem_stat\" : <>\n \"io_stat\" : <>\n \"started\" : tstamp\n }\n\n The `None` message is a die request.\n \"\"\"\n logger.debug(\"[MTHREAD] queue management worker starting\")\n\n while not self._executor_bad_state.is_set():\n try:\n msgs = self.incoming_q.get(timeout=1)\n # logger.debug(\"[MTHREAD] get has returned {}\".format(len(msgs)))\n\n except queue.Empty:\n logger.debug(\"[MTHREAD] queue empty\")\n # Timed out.\n pass\n\n except IOError as e:\n logger.exception(\"[MTHREAD] Caught broken queue with exception code {}: {}\".format(e.errno, e))\n return\n\n except Exception as e:\n logger.exception(\"[MTHREAD] Caught unknown exception: {}\".format(e))\n return\n\n else:\n\n if msgs is None:\n logger.debug(\"[MTHREAD] Got None, exiting\")\n return\n\n else:\n for serialized_msg in msgs:\n try:\n msg = pickle.loads(serialized_msg)\n tid = msg['task_id']\n except pickle.UnpicklingError:\n raise BadMessage(\"Message received could not be unpickled\")\n\n except Exception:\n raise BadMessage(\"Message received does not contain 'task_id' field\")\n\n if tid == -1 and 'exception' in msg:\n logger.warning(\"Executor shutting down due to exception from interchange\")\n self._executor_exception, _ = deserialize_object(msg['exception'])\n logger.exception(\"Exception: {}\".format(self._executor_exception))\n # Set bad state to prevent new tasks from being submitted\n self._executor_bad_state.set()\n # We set all current tasks to this exception to make sure that\n # this is raised in the main context.\n for task in self.tasks:\n self.tasks[task].set_exception(self._executor_exception)\n break\n\n task_fut = self.tasks[tid]\n\n if 'result' in msg:\n result, _ = deserialize_object(msg['result'])\n task_fut.set_result(result)\n\n elif 'exception' in msg:\n try:\n s, _ = deserialize_object(msg['exception'])\n # s should be a RemoteExceptionWrapper... so we can reraise it\n if isinstance(s, RemoteExceptionWrapper):\n try:\n s.reraise()\n except Exception as e:\n task_fut.set_exception(e)\n elif isinstance(s, Exception):\n task_fut.set_exception(s)\n else:\n raise ValueError(\"Unknown exception-like type received: {}\".format(type(s)))\n except Exception as e:\n # TODO could be a proper wrapped exception?\n task_fut.set_exception(\n DeserializationError(\"Received exception, but handling also threw an exception: {}\".format(e)))\n else:\n raise BadMessage(\"Message received is neither result or exception\")\n\n if not self.is_alive:\n break\n logger.info(\"[MTHREAD] queue management worker finished\")\n\n # When the executor gets lost, the weakref callback will wake up\n # the queue management thread.\n def weakref_cb(self, q=None):\n \"\"\"We do not use this yet.\"\"\"\n q.put(None)\n\n def _start_local_queue_process(self):\n \"\"\" Starts the interchange process locally\n\n Starts the interchange process locally and uses an internal command queue to\n get the worker task and result ports that the interchange has bound to.\n \"\"\"\n comm_q = Queue(maxsize=10)\n self.queue_proc = Process(target=interchange.starter,\n args=(comm_q,),\n kwargs={\"client_ports\": (self.outgoing_q.port,\n self.incoming_q.port,\n self.command_client.port),\n \"worker_ports\": self.worker_ports,\n \"worker_port_range\": self.worker_port_range,\n \"logdir\": \"{}/{}\".format(self.run_dir, self.label),\n \"suppress_failure\": self.suppress_failure,\n \"heartbeat_threshold\": self.heartbeat_threshold,\n \"poll_period\": self.poll_period,\n \"logging_level\": logging.DEBUG if self.worker_debug else logging.INFO\n },\n )\n self.queue_proc.start()\n try:\n (worker_task_port, worker_result_port) = comm_q.get(block=True, timeout=120)\n except queue.Empty:\n logger.error(\"Interchange has not completed initialization in 120s. Aborting\")\n raise Exception(\"Interchange failed to start\")\n\n self.worker_task_url = \"tcp://{}:{}\".format(self.address, worker_task_port)\n self.worker_result_url = \"tcp://{}:{}\".format(self.address, worker_result_port)\n\n def _start_queue_management_thread(self):\n \"\"\"Method to start the management thread as a daemon.\n\n Checks if a thread already exists, then starts it.\n Could be used later as a restart if the management thread dies.\n \"\"\"\n if self._queue_management_thread is None:\n logger.debug(\"Starting queue management thread\")\n self._queue_management_thread = threading.Thread(target=self._queue_management_worker)\n self._queue_management_thread.daemon = True\n self._queue_management_thread.start()\n logger.debug(\"Started queue management thread\")\n\n else:\n logger.debug(\"Management thread already exists, returning\")\n\n def hold_worker(self, worker_id):\n \"\"\"Puts a worker on hold, preventing scheduling of additional tasks to it.\n\n This is called \"hold\" mostly because this only stops scheduling of tasks,\n and does not actually kill the worker.\n\n Parameters\n ----------\n\n worker_id : str\n Worker id to be put on hold\n \"\"\"\n c = self.command_client.run(\"HOLD_WORKER;{}\".format(worker_id))\n logger.debug(\"Sent hold request to worker: {}\".format(worker_id))\n return c\n\n @property\n def outstanding(self):\n outstanding_c = self.command_client.run(\"OUTSTANDING_C\")\n # logger.debug(\"Got outstanding count: {}\".format(outstanding_c))\n return outstanding_c\n\n @property\n def connected_workers(self):\n workers = self.command_client.run(\"WORKERS\")\n return workers\n\n @property\n def connected_managers(self):\n workers = self.command_client.run(\"MANAGERS\")\n return workers\n\n def _hold_block(self, block_id):\n \"\"\" Sends hold command to all managers which are in a specific block\n\n Parameters\n ----------\n block_id : str\n Block identifier of the block to be put on hold\n \"\"\"\n\n managers = self.connected_managers\n\n for manager in managers:\n if manager['block_id'] == block_id:\n logger.debug(\"[HOLD_BLOCK]: Sending hold to manager: {}\".format(manager['manager']))\n self.hold_worker(manager['manager'])\n\n def submit(self, func, *args, **kwargs):\n \"\"\"Submits work to the the outgoing_q.\n\n The outgoing_q is an external process listens on this\n queue for new work. This method behaves like a\n submit call as described here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_\n\n Args:\n - func (callable) : Callable function\n - *args (list) : List of arbitrary positional arguments.\n\n Kwargs:\n - **kwargs (dict) : A dictionary of arbitrary keyword args for func.\n\n Returns:\n Future\n \"\"\"\n if self._executor_bad_state.is_set():\n raise self._executor_exception\n\n self._task_counter += 1\n task_id = self._task_counter\n\n # handle people sending blobs gracefully\n args_to_print = args\n if logger.getEffectiveLevel() >= logging.DEBUG:\n args_to_print = tuple([arg if len(repr(arg)) < 100 else (repr(arg)[:100] + '...') for arg in args])\n logger.debug(\"Pushing function {} to queue with args {}\".format(func, args_to_print))\n\n self.tasks[task_id] = Future()\n\n fn_buf = pack_apply_message(func, args, kwargs,\n buffer_threshold=1024 * 1024,\n item_threshold=1024)\n\n msg = {\"task_id\": task_id,\n \"buffer\": fn_buf}\n\n # Post task to the the outgoing queue\n self.outgoing_q.put(msg)\n\n # Return the future\n return self.tasks[task_id]\n\n @property\n def scaling_enabled(self):\n return self._scaling_enabled\n\n def scale_out(self, blocks=1):\n \"\"\"Scales out the number of blocks by \"blocks\"\n\n Raises:\n NotImplementedError\n \"\"\"\n r = []\n for i in range(blocks):\n if self.provider:\n external_block_id = str(len(self.blocks))\n launch_cmd = self.launch_cmd.format(block_id=external_block_id)\n internal_block = self.provider.submit(launch_cmd, 1, 1)\n logger.debug(\"Launched block {}->{}\".format(external_block_id, internal_block))\n if not internal_block:\n raise(ScalingFailed(self.provider.label,\n \"Attempts to provision nodes via provider has failed\"))\n r.extend([external_block_id])\n self.blocks[external_block_id] = internal_block\n else:\n logger.error(\"No execution provider available\")\n r = None\n return r\n\n def scale_in(self, blocks=None, block_ids=[]):\n \"\"\"Scale in the number of active blocks by specified amount.\n\n The scale in method here is very rude. It doesn't give the workers\n the opportunity to finish current tasks or cleanup. This is tracked\n in issue #530\n\n Parameters\n ----------\n\n blocks : int\n Number of blocks to terminate and scale_in by\n\n block_ids : list\n List of specific block ids to terminate. Optional\n\n Raises:\n NotImplementedError\n \"\"\"\n\n if block_ids:\n block_ids_to_kill = block_ids\n else:\n block_ids_to_kill = list(self.blocks.keys())[:blocks]\n\n # Hold the block\n for block_id in block_ids_to_kill:\n self._hold_block(block_id)\n\n # Now kill via provider\n to_kill = [self.blocks.pop(bid) for bid in block_ids_to_kill]\n\n if self.provider:\n r = self.provider.cancel(to_kill)\n\n return r\n\n def status(self):\n \"\"\"Return status of all blocks.\"\"\"\n\n status = []\n if self.provider:\n status = self.provider.status(self.blocks.values())\n\n return status\n\n def shutdown(self, hub=True, targets='all', block=False):\n \"\"\"Shutdown the executor, including all workers and controllers.\n\n This is not implemented.\n\n Kwargs:\n - hub (Bool): Whether the hub should be shutdown, Default:True,\n - targets (list of ints| 'all'): List of block id's to kill, Default:'all'\n - block (Bool): To block for confirmations or not\n\n Raises:\n NotImplementedError\n \"\"\"\n\n logger.info(\"Attempting HighThroughputExecutor shutdown\")\n # self.outgoing_q.close()\n # self.incoming_q.close()\n self.queue_proc.terminate()\n logger.info(\"Finished HighThroughputExecutor shutdown attempt\")\n return True\n", "path": "parsl/executors/high_throughput/executor.py" } ]
[ { "content": "\"\"\"HighThroughputExecutor builds on the Swift/T EMEWS architecture to use MPI for fast task distribution\n\"\"\"\n\nfrom concurrent.futures import Future\nimport typeguard\nimport logging\nimport threading\nimport queue\nimport pickle\nfrom multiprocessing import Process, Queue\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nfrom ipyparallel.serialize import pack_apply_message # ,unpack_apply_message\nfrom ipyparallel.serialize import deserialize_object # ,serialize_object\n\nfrom parsl.app.errors import RemoteExceptionWrapper\nfrom parsl.executors.high_throughput import zmq_pipes\nfrom parsl.executors.high_throughput import interchange\nfrom parsl.executors.errors import *\nfrom parsl.executors.base import ParslExecutor\nfrom parsl.dataflow.error import ConfigurationError\nfrom parsl.providers.provider_base import ExecutionProvider\n\nfrom parsl.utils import RepresentationMixin\nfrom parsl.providers import LocalProvider\n\nlogger = logging.getLogger(__name__)\n\nBUFFER_THRESHOLD = 1024 * 1024\nITEM_THRESHOLD = 1024\n\n\nclass HighThroughputExecutor(ParslExecutor, RepresentationMixin):\n \"\"\"Executor designed for cluster-scale\n\n The HighThroughputExecutor system has the following components:\n 1. The HighThroughputExecutor instance which is run as part of the Parsl script.\n 2. The Interchange which is acts as a load-balancing proxy between workers and Parsl\n 3. The multiprocessing based worker pool which coordinates task execution over several\n cores on a node.\n 4. ZeroMQ pipes connect the HighThroughputExecutor, Interchange and the process_worker_pool\n\n Here is a diagram\n\n .. code:: python\n\n\n | Data | Executor | Interchange | External Process(es)\n | Flow | | |\n Task | Kernel | | |\n +----->|-------->|------------>|->outgoing_q---|-> process_worker_pool\n | | | | batching | | |\n Parsl<---Fut-| | | load-balancing| result exception\n ^ | | | watchdogs | | |\n | | | Q_mngmnt | | V V\n | | | Thread<--|-incoming_q<---|--- +---------+\n | | | | | |\n | | | | | |\n +----update_fut-----+\n\n\n Parameters\n ----------\n\n provider : :class:`~parsl.providers.provider_base.ExecutionProvider`\n Provider to access computation resources. Can be one of :class:`~parsl.providers.aws.aws.EC2Provider`,\n :class:`~parsl.providers.cobalt.cobalt.Cobalt`,\n :class:`~parsl.providers.condor.condor.Condor`,\n :class:`~parsl.providers.googlecloud.googlecloud.GoogleCloud`,\n :class:`~parsl.providers.gridEngine.gridEngine.GridEngine`,\n :class:`~parsl.providers.jetstream.jetstream.Jetstream`,\n :class:`~parsl.providers.local.local.Local`,\n :class:`~parsl.providers.sge.sge.GridEngine`,\n :class:`~parsl.providers.slurm.slurm.Slurm`, or\n :class:`~parsl.providers.torque.torque.Torque`.\n\n label : str\n Label for this executor instance.\n\n launch_cmd : str\n Command line string to launch the process_worker_pool from the provider. The command line string\n will be formatted with appropriate values for the following values (debug, task_url, result_url,\n cores_per_worker, nodes_per_block, heartbeat_period ,heartbeat_threshold, logdir). For eg:\n launch_cmd=\"process_worker_pool.py {debug} -c {cores_per_worker} --task_url={task_url} --result_url={result_url}\"\n\n address : string\n An address to connect to the main Parsl process which is reachable from the network in which\n workers will be running. This can be either a hostname as returned by `hostname` or an\n IP address. Most login nodes on clusters have several network interfaces available, only\n some of which can be reached from the compute nodes. Some trial and error might be\n necessary to indentify what addresses are reachable from compute nodes.\n\n worker_ports : (int, int)\n Specify the ports to be used by workers to connect to Parsl. If this option is specified,\n worker_port_range will not be honored.\n\n worker_port_range : (int, int)\n Worker ports will be chosen between the two integers provided.\n\n interchange_port_range : (int, int)\n Port range used by Parsl to communicate with the Interchange.\n\n working_dir : str\n Working dir to be used by the executor.\n\n worker_debug : Bool\n Enables worker debug logging.\n\n managed : Bool\n If this executor is managed by the DFK or externally handled.\n\n cores_per_worker : float\n cores to be assigned to each worker. Oversubscription is possible\n by setting cores_per_worker < 1.0. Default=1\n\n mem_per_worker : float\n GB of memory required per worker. If this option is specified, the node manager\n will check the available memory at startup and limit the number of workers such that\n the there's sufficient memory for each worker. Default: None\n\n max_workers : int\n Caps the number of workers launched by the manager. Default: infinity\n\n prefetch_capacity : int\n Number of tasks that could be prefetched over available worker capacity.\n When there are a few tasks (<100) or when tasks are long running, this option should\n be set to 0 for better load balancing. Default is 0.\n\n suppress_failure : Bool\n If set, the interchange will suppress failures rather than terminate early. Default: False\n\n heartbeat_threshold : int\n Seconds since the last message from the counterpart in the communication pair:\n (interchange, manager) after which the counterpart is assumed to be un-available. Default:120s\n\n heartbeat_period : int\n Number of seconds after which a heartbeat message indicating liveness is sent to the\n counterpart (interchange, manager). Default:30s\n\n poll_period : int\n Timeout period to be used by the executor components in milliseconds. Increasing poll_periods\n trades performance for cpu efficiency. Default: 10ms\n\n worker_logdir_root : string\n In case of a remote file system, specify the path to where logs will be kept.\n \"\"\"\n\n @typeguard.typechecked\n def __init__(self,\n label: str = 'HighThroughputExecutor',\n provider: ExecutionProvider = LocalProvider(),\n launch_cmd: Optional[str] = None,\n address: str = \"127.0.0.1\",\n worker_ports: Optional[Tuple[int, int]] = None,\n worker_port_range: Optional[Tuple[int, int]] = (54000, 55000),\n interchange_port_range: Optional[Tuple[int, int]] = (55000, 56000),\n storage_access: Optional[List[Any]] = None,\n working_dir: Optional[str] = None,\n worker_debug: bool = False,\n cores_per_worker: float = 1.0,\n mem_per_worker: Optional[float] = None,\n max_workers: Union[int, float] = float('inf'),\n prefetch_capacity: int = 0,\n heartbeat_threshold: int = 120,\n heartbeat_period: int = 30,\n poll_period: int = 10,\n suppress_failure: bool = False,\n managed: bool = True,\n worker_logdir_root: Optional[str] = None):\n\n logger.debug(\"Initializing HighThroughputExecutor\")\n\n self.label = label\n self.launch_cmd = launch_cmd\n self.provider = provider\n self.worker_debug = worker_debug\n self.storage_access = storage_access if storage_access is not None else []\n if len(self.storage_access) > 1:\n raise ConfigurationError('Multiple storage access schemes are not supported')\n self.working_dir = working_dir\n self.managed = managed\n self.blocks = {} # type: Dict[str, str]\n self.tasks = {} # type: Dict[str, Future]\n self.cores_per_worker = cores_per_worker\n self.mem_per_worker = mem_per_worker\n self.max_workers = max_workers\n self.prefetch_capacity = prefetch_capacity\n\n self._task_counter = 0\n self.address = address\n self.worker_ports = worker_ports\n self.worker_port_range = worker_port_range\n self.interchange_port_range = interchange_port_range\n self.heartbeat_threshold = heartbeat_threshold\n self.heartbeat_period = heartbeat_period\n self.poll_period = poll_period\n self.suppress_failure = suppress_failure\n self.run_dir = '.'\n self.worker_logdir_root = worker_logdir_root\n\n if not launch_cmd:\n self.launch_cmd = (\"process_worker_pool.py {debug} {max_workers} \"\n \"-p {prefetch_capacity} \"\n \"-c {cores_per_worker} \"\n \"-m {mem_per_worker} \"\n \"--poll {poll_period} \"\n \"--task_url={task_url} \"\n \"--result_url={result_url} \"\n \"--logdir={logdir} \"\n \"--block_id={{block_id}} \"\n \"--hb_period={heartbeat_period} \"\n \"--hb_threshold={heartbeat_threshold} \")\n\n def initialize_scaling(self):\n \"\"\" Compose the launch command and call the scale_out\n\n This should be implemented in the child classes to take care of\n executor specific oddities.\n \"\"\"\n debug_opts = \"--debug\" if self.worker_debug else \"\"\n max_workers = \"\" if self.max_workers == float('inf') else \"--max_workers={}\".format(self.max_workers)\n\n worker_logdir = \"{}/{}\".format(self.run_dir, self.label)\n if self.worker_logdir_root is not None:\n worker_logdir = \"{}/{}\".format(self.worker_logdir_root, self.label)\n\n l_cmd = self.launch_cmd.format(debug=debug_opts,\n prefetch_capacity=self.prefetch_capacity,\n task_url=self.worker_task_url,\n result_url=self.worker_result_url,\n cores_per_worker=self.cores_per_worker,\n mem_per_worker=self.mem_per_worker,\n max_workers=max_workers,\n nodes_per_block=self.provider.nodes_per_block,\n heartbeat_period=self.heartbeat_period,\n heartbeat_threshold=self.heartbeat_threshold,\n poll_period=self.poll_period,\n logdir=worker_logdir)\n self.launch_cmd = l_cmd\n logger.debug(\"Launch command: {}\".format(self.launch_cmd))\n\n self._scaling_enabled = self.provider.scaling_enabled\n logger.debug(\"Starting HighThroughputExecutor with provider:\\n%s\", self.provider)\n if hasattr(self.provider, 'init_blocks'):\n try:\n self.scale_out(blocks=self.provider.init_blocks)\n except Exception as e:\n logger.error(\"Scaling out failed: {}\".format(e))\n raise e\n\n def start(self):\n \"\"\"Create the Interchange process and connect to it.\n \"\"\"\n self.outgoing_q = zmq_pipes.TasksOutgoing(\"127.0.0.1\", self.interchange_port_range)\n self.incoming_q = zmq_pipes.ResultsIncoming(\"127.0.0.1\", self.interchange_port_range)\n self.command_client = zmq_pipes.CommandClient(\"127.0.0.1\", self.interchange_port_range)\n\n self.is_alive = True\n\n self._executor_bad_state = threading.Event()\n self._executor_exception = None\n self._queue_management_thread = None\n self._start_queue_management_thread()\n self._start_local_queue_process()\n\n logger.debug(\"Created management thread: {}\".format(self._queue_management_thread))\n\n if self.provider:\n self.initialize_scaling()\n else:\n self._scaling_enabled = False\n logger.debug(\"Starting HighThroughputExecutor with no provider\")\n\n def _queue_management_worker(self):\n \"\"\"Listen to the queue for task status messages and handle them.\n\n Depending on the message, tasks will be updated with results, exceptions,\n or updates. It expects the following messages:\n\n .. code:: python\n\n {\n \"task_id\" : <task_id>\n \"result\" : serialized result object, if task succeeded\n ... more tags could be added later\n }\n\n {\n \"task_id\" : <task_id>\n \"exception\" : serialized exception object, on failure\n }\n\n We do not support these yet, but they could be added easily.\n\n .. code:: python\n\n {\n \"task_id\" : <task_id>\n \"cpu_stat\" : <>\n \"mem_stat\" : <>\n \"io_stat\" : <>\n \"started\" : tstamp\n }\n\n The `None` message is a die request.\n \"\"\"\n logger.debug(\"[MTHREAD] queue management worker starting\")\n\n while not self._executor_bad_state.is_set():\n try:\n msgs = self.incoming_q.get(timeout=1)\n # logger.debug(\"[MTHREAD] get has returned {}\".format(len(msgs)))\n\n except queue.Empty:\n logger.debug(\"[MTHREAD] queue empty\")\n # Timed out.\n pass\n\n except IOError as e:\n logger.exception(\"[MTHREAD] Caught broken queue with exception code {}: {}\".format(e.errno, e))\n return\n\n except Exception as e:\n logger.exception(\"[MTHREAD] Caught unknown exception: {}\".format(e))\n return\n\n else:\n\n if msgs is None:\n logger.debug(\"[MTHREAD] Got None, exiting\")\n return\n\n else:\n for serialized_msg in msgs:\n try:\n msg = pickle.loads(serialized_msg)\n tid = msg['task_id']\n except pickle.UnpicklingError:\n raise BadMessage(\"Message received could not be unpickled\")\n\n except Exception:\n raise BadMessage(\"Message received does not contain 'task_id' field\")\n\n if tid == -1 and 'exception' in msg:\n logger.warning(\"Executor shutting down due to exception from interchange\")\n self._executor_exception, _ = deserialize_object(msg['exception'])\n logger.exception(\"Exception: {}\".format(self._executor_exception))\n # Set bad state to prevent new tasks from being submitted\n self._executor_bad_state.set()\n # We set all current tasks to this exception to make sure that\n # this is raised in the main context.\n for task in self.tasks:\n self.tasks[task].set_exception(self._executor_exception)\n break\n\n task_fut = self.tasks[tid]\n\n if 'result' in msg:\n result, _ = deserialize_object(msg['result'])\n task_fut.set_result(result)\n\n elif 'exception' in msg:\n try:\n s, _ = deserialize_object(msg['exception'])\n # s should be a RemoteExceptionWrapper... so we can reraise it\n if isinstance(s, RemoteExceptionWrapper):\n try:\n s.reraise()\n except Exception as e:\n task_fut.set_exception(e)\n elif isinstance(s, Exception):\n task_fut.set_exception(s)\n else:\n raise ValueError(\"Unknown exception-like type received: {}\".format(type(s)))\n except Exception as e:\n # TODO could be a proper wrapped exception?\n task_fut.set_exception(\n DeserializationError(\"Received exception, but handling also threw an exception: {}\".format(e)))\n else:\n raise BadMessage(\"Message received is neither result or exception\")\n\n if not self.is_alive:\n break\n logger.info(\"[MTHREAD] queue management worker finished\")\n\n # When the executor gets lost, the weakref callback will wake up\n # the queue management thread.\n def weakref_cb(self, q=None):\n \"\"\"We do not use this yet.\"\"\"\n q.put(None)\n\n def _start_local_queue_process(self):\n \"\"\" Starts the interchange process locally\n\n Starts the interchange process locally and uses an internal command queue to\n get the worker task and result ports that the interchange has bound to.\n \"\"\"\n comm_q = Queue(maxsize=10)\n self.queue_proc = Process(target=interchange.starter,\n args=(comm_q,),\n kwargs={\"client_ports\": (self.outgoing_q.port,\n self.incoming_q.port,\n self.command_client.port),\n \"worker_ports\": self.worker_ports,\n \"worker_port_range\": self.worker_port_range,\n \"logdir\": \"{}/{}\".format(self.run_dir, self.label),\n \"suppress_failure\": self.suppress_failure,\n \"heartbeat_threshold\": self.heartbeat_threshold,\n \"poll_period\": self.poll_period,\n \"logging_level\": logging.DEBUG if self.worker_debug else logging.INFO\n },\n )\n self.queue_proc.start()\n try:\n (worker_task_port, worker_result_port) = comm_q.get(block=True, timeout=120)\n except queue.Empty:\n logger.error(\"Interchange has not completed initialization in 120s. Aborting\")\n raise Exception(\"Interchange failed to start\")\n\n self.worker_task_url = \"tcp://{}:{}\".format(self.address, worker_task_port)\n self.worker_result_url = \"tcp://{}:{}\".format(self.address, worker_result_port)\n\n def _start_queue_management_thread(self):\n \"\"\"Method to start the management thread as a daemon.\n\n Checks if a thread already exists, then starts it.\n Could be used later as a restart if the management thread dies.\n \"\"\"\n if self._queue_management_thread is None:\n logger.debug(\"Starting queue management thread\")\n self._queue_management_thread = threading.Thread(target=self._queue_management_worker)\n self._queue_management_thread.daemon = True\n self._queue_management_thread.start()\n logger.debug(\"Started queue management thread\")\n\n else:\n logger.debug(\"Management thread already exists, returning\")\n\n def hold_worker(self, worker_id):\n \"\"\"Puts a worker on hold, preventing scheduling of additional tasks to it.\n\n This is called \"hold\" mostly because this only stops scheduling of tasks,\n and does not actually kill the worker.\n\n Parameters\n ----------\n\n worker_id : str\n Worker id to be put on hold\n \"\"\"\n c = self.command_client.run(\"HOLD_WORKER;{}\".format(worker_id))\n logger.debug(\"Sent hold request to worker: {}\".format(worker_id))\n return c\n\n @property\n def outstanding(self):\n outstanding_c = self.command_client.run(\"OUTSTANDING_C\")\n # logger.debug(\"Got outstanding count: {}\".format(outstanding_c))\n return outstanding_c\n\n @property\n def connected_workers(self):\n workers = self.command_client.run(\"WORKERS\")\n return workers\n\n @property\n def connected_managers(self):\n workers = self.command_client.run(\"MANAGERS\")\n return workers\n\n def _hold_block(self, block_id):\n \"\"\" Sends hold command to all managers which are in a specific block\n\n Parameters\n ----------\n block_id : str\n Block identifier of the block to be put on hold\n \"\"\"\n\n managers = self.connected_managers\n\n for manager in managers:\n if manager['block_id'] == block_id:\n logger.debug(\"[HOLD_BLOCK]: Sending hold to manager: {}\".format(manager['manager']))\n self.hold_worker(manager['manager'])\n\n def submit(self, func, *args, **kwargs):\n \"\"\"Submits work to the the outgoing_q.\n\n The outgoing_q is an external process listens on this\n queue for new work. This method behaves like a\n submit call as described here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_\n\n Args:\n - func (callable) : Callable function\n - *args (list) : List of arbitrary positional arguments.\n\n Kwargs:\n - **kwargs (dict) : A dictionary of arbitrary keyword args for func.\n\n Returns:\n Future\n \"\"\"\n if self._executor_bad_state.is_set():\n raise self._executor_exception\n\n self._task_counter += 1\n task_id = self._task_counter\n\n # handle people sending blobs gracefully\n args_to_print = args\n if logger.getEffectiveLevel() >= logging.DEBUG:\n args_to_print = tuple([arg if len(repr(arg)) < 100 else (repr(arg)[:100] + '...') for arg in args])\n logger.debug(\"Pushing function {} to queue with args {}\".format(func, args_to_print))\n\n self.tasks[task_id] = Future()\n\n fn_buf = pack_apply_message(func, args, kwargs,\n buffer_threshold=1024 * 1024,\n item_threshold=1024)\n\n msg = {\"task_id\": task_id,\n \"buffer\": fn_buf}\n\n # Post task to the the outgoing queue\n self.outgoing_q.put(msg)\n\n # Return the future\n return self.tasks[task_id]\n\n @property\n def scaling_enabled(self):\n return self._scaling_enabled\n\n def scale_out(self, blocks=1):\n \"\"\"Scales out the number of blocks by \"blocks\"\n\n Raises:\n NotImplementedError\n \"\"\"\n r = []\n for i in range(blocks):\n if self.provider:\n external_block_id = str(len(self.blocks))\n launch_cmd = self.launch_cmd.format(block_id=external_block_id)\n internal_block = self.provider.submit(launch_cmd, 1, 1)\n logger.debug(\"Launched block {}->{}\".format(external_block_id, internal_block))\n if not internal_block:\n raise(ScalingFailed(self.provider.label,\n \"Attempts to provision nodes via provider has failed\"))\n r.extend([external_block_id])\n self.blocks[external_block_id] = internal_block\n else:\n logger.error(\"No execution provider available\")\n r = None\n return r\n\n def scale_in(self, blocks=None, block_ids=[]):\n \"\"\"Scale in the number of active blocks by specified amount.\n\n The scale in method here is very rude. It doesn't give the workers\n the opportunity to finish current tasks or cleanup. This is tracked\n in issue #530\n\n Parameters\n ----------\n\n blocks : int\n Number of blocks to terminate and scale_in by\n\n block_ids : list\n List of specific block ids to terminate. Optional\n\n Raises:\n NotImplementedError\n \"\"\"\n\n if block_ids:\n block_ids_to_kill = block_ids\n else:\n block_ids_to_kill = list(self.blocks.keys())[:blocks]\n\n # Hold the block\n for block_id in block_ids_to_kill:\n self._hold_block(block_id)\n\n # Now kill via provider\n to_kill = [self.blocks.pop(bid) for bid in block_ids_to_kill]\n\n if self.provider:\n r = self.provider.cancel(to_kill)\n\n return r\n\n def status(self):\n \"\"\"Return status of all blocks.\"\"\"\n\n status = []\n if self.provider:\n status = self.provider.status(list(self.blocks.values()))\n\n return status\n\n def shutdown(self, hub=True, targets='all', block=False):\n \"\"\"Shutdown the executor, including all workers and controllers.\n\n This is not implemented.\n\n Kwargs:\n - hub (Bool): Whether the hub should be shutdown, Default:True,\n - targets (list of ints| 'all'): List of block id's to kill, Default:'all'\n - block (Bool): To block for confirmations or not\n\n Raises:\n NotImplementedError\n \"\"\"\n\n logger.info(\"Attempting HighThroughputExecutor shutdown\")\n # self.outgoing_q.close()\n # self.incoming_q.close()\n self.queue_proc.terminate()\n logger.info(\"Finished HighThroughputExecutor shutdown attempt\")\n return True\n", "path": "parsl/executors/high_throughput/executor.py" } ]
diff --git a/parsl/executors/high_throughput/executor.py b/parsl/executors/high_throughput/executor.py index 63706d8bda..cabd3ce501 100644 --- a/parsl/executors/high_throughput/executor.py +++ b/parsl/executors/high_throughput/executor.py @@ -597,7 +597,7 @@ def status(self): status = [] if self.provider: - status = self.provider.status(self.blocks.values()) + status = self.provider.status(list(self.blocks.values())) return status
bridgecrewio__checkov-1905
Bump boto3 to the latest version **Describe the bug** I am trying to installing checkov and the latest boto3 version within an environment. However, checkov depends on version 1.17.* Could you please bump boto3 to the latest version?
[ { "content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2>=0.3.24\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3==1.17.*\",\n \"GitPython\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"detect-secrets\",\n \"policyuniverse\",\n \"typing-extensions\",\n \"cachetools\",\n \"cyclonedx-python-lib==0.6.2\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\", \"integration_tests*\"]),\n include_package_data=True,\n package_dir={\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\"\n },\n package_data={\n \"checkov.terraform.checks.graph_checks\": [\n \"aws/*.yaml\",\n \"gcp/*.yaml\",\n \"azure/*.yaml\",\n ]\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2>=0.3.24\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3>=1.17\",\n \"GitPython\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"detect-secrets\",\n \"policyuniverse\",\n \"typing-extensions\",\n \"cachetools\",\n \"cyclonedx-python-lib==0.6.2\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\", \"integration_tests*\"]),\n include_package_data=True,\n package_dir={\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\"\n },\n package_data={\n \"checkov.terraform.checks.graph_checks\": [\n \"aws/*.yaml\",\n \"gcp/*.yaml\",\n \"azure/*.yaml\",\n ]\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n ],\n)\n", "path": "setup.py" } ]
diff --git a/Pipfile b/Pipfile index 8de488fb19..9125af1a31 100644 --- a/Pipfile +++ b/Pipfile @@ -16,6 +16,7 @@ GitPython = "*" bandit = "*" urllib3-mock = "*" jsonschema = "*" +importlib-resources = ">=1.3" atomicwrites = "*" responses = "*" types-requests = "*" @@ -32,7 +33,7 @@ termcolor="*" junit-xml = ">=1.9" dpath = ">=1.5.0,<2" pyyaml = ">=5.4.1" -boto3 = "==1.17.*" +boto3 = ">=1.17" GitPython = "*" jmespath = "*" tqdm = "*" diff --git a/Pipfile.lock b/Pipfile.lock index 4fa7da71cd..9ed91c7c18 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "7a65ce09459bcf33efcd37b8e228736309abf7eeb79d520181b3b2ca56eccc97" + "sha256": "701841bd573e6846533946dd963bb9ca8a01630961d75a98b5f1fc9b08065547" }, "pipfile-spec": 6, "requires": { @@ -34,19 +34,19 @@ }, "boto3": { "hashes": [ - "sha256:08b6dacbe7ebe57ae8acfb7106b2728d946ae1e0c3da270caee1deb79ccbd8af", - "sha256:8716465313c50ad9e5c2ac1767642ca0ddf7d1729c3d5c884d82880c1a15a310" + "sha256:57ee38d02772f44a52d2d836cee61d039d405f6eaefc68f92ae0d80e0260c097", + "sha256:79c982c5930f989292ca849b0caaa1ffeb9eb9d27c32992c3b2f6736b3b14ad2" ], "index": "pypi", - "version": "==1.17.112" + "version": "==1.19.10" }, "botocore": { "hashes": [ - "sha256:6d51de0981a3ef19da9e6a3c73b5ab427e3c0c8b92200ebd38d087299683dd2b", - "sha256:d0b9b70b6eb5b65bb7162da2aaf04b6b086b15cc7ea322ddc3ef2f5e07944dcf" + "sha256:543cd69e9b248be942d181a097a4715312939ec998602a7b4b07e9fda36d30e9", + "sha256:dffa1e7e7e3a8da73bbdead3aeff7d52fd5a159a1a93b2896ac67b2aa79a461c" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", - "version": "==1.20.112" + "markers": "python_version >= '3.6'", + "version": "==1.22.10" }, "cached-property": { "hashes": [ @@ -429,10 +429,11 @@ }, "s3transfer": { "hashes": [ - "sha256:9b3752887a2880690ce628bc263d6d13a3864083aeacff4890c1c9839a5eb0bc", - "sha256:cb022f4b16551edebbb31a377d3f09600dbada7363d8c5db7976e7f47732e1b2" + "sha256:50ed823e1dc5868ad40c8dc92072f757aa0e653a192845c94a3b676f4a62da4c", + "sha256:9c1dc369814391a6bda20ebbf4b70a0f34630592c9aa520856bf384916af2803" ], - "version": "==0.4.2" + "markers": "python_version >= '3.6'", + "version": "==0.5.0" }, "schema": { "hashes": [ @@ -467,11 +468,11 @@ }, "soupsieve": { "hashes": [ - "sha256:052774848f448cf19c7e959adf5566904d525f33a3f8b6ba6f6f8f26ec7de0cc", - "sha256:c2c1c2d44f158cdbddab7824a9af8c4f83c76b1e23e049479aa432feb6c4c23b" + "sha256:617ffc4d0dfd39c66f4d1413a6e165663a34eca86be9b54f97b91756300ff6df", + "sha256:e4860f889dfa88774c07da0b276b70c073b6470fa1a4a8350800bb7bce3dcc76" ], "markers": "python_version >= '3.6'", - "version": "==2.2.1" + "version": "==2.3" }, "tabulate": { "hashes": [ @@ -687,13 +688,13 @@ "markers": "python_version >= '3'", "version": "==3.3" }, - "importlib-metadata": { + "importlib-resources": { "hashes": [ - "sha256:b618b6d2d5ffa2f16add5697cf57a46c76a56229b0ed1c438322e4e95645bd15", - "sha256:f284b3e11256ad1e5d03ab86bb2ccd6f5339688ff17a4d797a0fe7df326f23b1" + "sha256:33a95faed5fc19b4bc16b29a6eeae248a3fe69dd55d4d229d2b480e23eeaad45", + "sha256:d756e2f85dd4de2ba89be0b21dba2a3bbec2e871a42a3a16719258a11f87506b" ], "index": "pypi", - "version": "==4.8.1" + "version": "==5.4.0" }, "iniconfig": { "hashes": [ @@ -704,11 +705,11 @@ }, "jsonschema": { "hashes": [ - "sha256:166870c8ab27bd712a8627e0598de4685bd8d199c4d7bd7cacc3d941ba0c6ca0", - "sha256:5c1a282ee6b74235057421fd0f766ac5f2972f77440927f6471c9e8493632fac" + "sha256:2b563117f3659a7f433dffe1371c88f52115b79133493f376f15724b9caa7efa", + "sha256:e2d3601321ac74d38214e2853300ae740cd07e53d919a15862b8c71f9d840574" ], "index": "pypi", - "version": "==4.1.2" + "version": "==4.2.0" }, "packaging": { "hashes": [ @@ -720,11 +721,11 @@ }, "pbr": { "hashes": [ - "sha256:42df03e7797b796625b1029c0400279c7c34fd7df24a7d7818a1abb5b38710dd", - "sha256:c68c661ac5cc81058ac94247278eeda6d2e6aecb3e227b0387c30d277e7ef8d4" + "sha256:4651ca1445e80f2781827305de3d76b3ce53195f2227762684eb08f17bc473b7", + "sha256:60002958e459b195e8dbe61bf22bcf344eedf1b4e03a321a5414feb15566100c" ], "markers": "python_version >= '2.6'", - "version": "==5.6.0" + "version": "==5.7.0" }, "pluggy": { "hashes": [ @@ -736,11 +737,11 @@ }, "py": { "hashes": [ - "sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3", - "sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a" + "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719", + "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.10.0" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==1.11.0" }, "pyparsing": { "hashes": [ diff --git a/setup.py b/setup.py index a6a91c4541..6eceae4022 100644 --- a/setup.py +++ b/setup.py @@ -42,7 +42,7 @@ "junit-xml>=1.9", "dpath>=1.5.0,<2", "pyyaml>=5.4.1", - "boto3==1.17.*", + "boto3>=1.17", "GitPython", "jmespath", "tqdm",
internetarchive__openlibrary-4557
/openlibrary/openlibrary/templates/lists/widget.html: error in processing template: TypeError: Object of type Nothing is not JSON serializable (falling back to default template) Patron is reporting the following error: `/openlibrary/openlibrary/templates/lists/widget.html: error in processing template: TypeError: Object of type Nothing is not JSON serializable (falling back to default template)` ### Evidence / Screenshot (if possible) ### Relevant url? https://openlibrary.org/works/OL24171550W/Kelebihan_Amalan_Bulan_Rejab_Sya%E2%80%99ban_Ramadhan ### Steps to Reproduce <!-- What steps caused you to find the bug? --> 1. Go to ... https://openlibrary.org/works/OL24171550W/Kelebihan_Amalan_Bulan_Rejab_Sya%E2%80%99ban_Ramadhan 2. Do ... view error. <!-- What actually happened after these steps? What did you expect to happen? --> * Actual: * Expected: ### Details - **Logged in (Y/N)?** - **Browser type/version?** - **Operating system?** - **Environment (prod/dev/local)?** prod <!-- If not sure, put prod --> ### Proposal & Constraints <!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? --> ### Related files <!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. --> ### Stakeholders <!-- @ tag stakeholders of this bug -->
[ { "content": "\"\"\"Generic helper functions to use in the templates and the webapp.\n\"\"\"\nimport web\nfrom datetime import datetime\nimport re\n\nimport six\nfrom six.moves.urllib.parse import urlsplit\n\nif six.PY2: # See #4525 json.dump(indent) MUST be an int on PY2\n import simplejson as json\nelse:\n import json\n\nimport babel\nimport babel.core\nimport babel.dates\nimport babel.numbers\n\ntry:\n import genshi\n import genshi.filters\nexcept ImportError:\n genshi = None\n\ntry:\n from bs4 import BeautifulSoup\nexcept ImportError:\n BeautifulSoup = None\n\nfrom infogami import config\n\n# handy utility to parse ISO date strings\nfrom infogami.infobase.utils import parse_datetime\nfrom infogami.utils.view import safeint\n\n# TODO: i18n should be moved to core or infogami\nfrom openlibrary.i18n import gettext as _ # noqa: F401\n\n__all__ = [\n \"sanitize\",\n \"json_encode\",\n \"safesort\",\n \"days_since\", \"datestr\", \"format_date\",\n \"sprintf\", \"cond\", \"commify\", \"truncate\", \"datetimestr_utc\",\n \"urlsafe\", \"texsafe\",\n \"percentage\", \"affiliate_id\", \"bookreader_host\",\n \"private_collections\", \"private_collection_in\",\n\n # functions imported from elsewhere\n \"parse_datetime\", \"safeint\"\n]\n__docformat__ = \"restructuredtext en\"\n\ndef sanitize(html, encoding='utf8'):\n \"\"\"Removes unsafe tags and attributes from html and adds\n ``rel=\"nofollow\"`` attribute to all external links.\n Using encoding=None if passing unicode strings e.g. for Python 3.\n encoding=\"utf8\" matches default format for earlier versions of Genshi\n https://genshi.readthedocs.io/en/latest/upgrade/#upgrading-from-genshi-0-6-x-to-the-development-version\n \"\"\"\n\n # Can't sanitize unless genshi module is available\n if genshi is None:\n return html\n\n def get_nofollow(name, event):\n attrs = event[1][1]\n href = attrs.get('href', '')\n\n if href:\n # add rel=nofollow to all absolute links\n _, host, _, _, _ = urlsplit(href)\n if host:\n return 'nofollow'\n\n try:\n html = genshi.HTML(html, encoding=encoding)\n\n # except (genshi.ParseError, UnicodeDecodeError, UnicodeError) as e:\n # don't catch Unicode errors so we can tell if we're getting bytes\n except genshi.ParseError:\n if BeautifulSoup:\n # Bad html. Tidy it up using BeautifulSoup\n html = str(BeautifulSoup(html, \"lxml\"))\n try:\n html = genshi.HTML(html)\n except Exception:\n # Failed to sanitize.\n # We can't do any better than returning the original HTML, without sanitizing.\n return html\n else:\n raise\n\n stream = html \\\n | genshi.filters.HTMLSanitizer() \\\n | genshi.filters.Transformer(\"//a\").attr(\"rel\", get_nofollow)\n return stream.render()\n\n\ndef json_encode(d, **kw):\n \"\"\"Same as json.dumps.\n \"\"\"\n return json.dumps(d, **kw)\n\n\ndef safesort(iterable, key=None, reverse=False):\n \"\"\"Sorts heterogeneous of objects without raising errors.\n\n Sorting heterogeneous objects sometimes causes error. For example,\n datetime and Nones don't go well together. This function takes special\n care to make that work.\n \"\"\"\n key = key or (lambda x: x)\n def safekey(x):\n k = key(x)\n return (k.__class__.__name__, k)\n return sorted(iterable, key=safekey, reverse=reverse)\n\n\ndef days_since(then, now=None):\n delta = then - (now or datetime.now())\n return abs(delta.days)\n\n\ndef datestr(then, now=None, lang=None, relative=True):\n \"\"\"Internationalized version of web.datestr.\"\"\"\n lang = lang or web.ctx.get('lang') or \"en\"\n if relative:\n if now is None:\n now = datetime.now()\n delta = then - now\n if abs(delta.days) < 4: # Threshold from web.py\n return babel.dates.format_timedelta(delta,\n add_direction=True,\n locale=_get_babel_locale(lang))\n return format_date(then, lang=lang)\n\n\ndef datetimestr_utc(then):\n return then.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\ndef format_date(date, lang=None):\n lang = lang or web.ctx.get('lang') or \"en\"\n locale = _get_babel_locale(lang)\n return babel.dates.format_date(date, format=\"long\", locale=locale)\n\ndef _get_babel_locale(lang):\n try:\n return babel.Locale(lang)\n except babel.core.UnknownLocaleError:\n return babel.Locale(\"en\")\n\n\ndef sprintf(s, *a, **kw):\n \"\"\"Handy utility for string replacements.\n\n >>> sprintf('hello %s', 'python')\n 'hello python'\n >>> sprintf('hello %(name)s', name='python')\n 'hello python'\n \"\"\"\n args = kw or a\n if args:\n return s % args\n else:\n return s\n\n\ndef cond(pred, true_value, false_value=\"\"):\n \"\"\"Lisp style cond function.\n\n Hanly to use instead of if-else expression.\n \"\"\"\n if pred:\n return true_value\n else:\n return false_value\n\n\ndef commify(number, lang=None):\n \"\"\"localized version of web.commify\"\"\"\n try:\n lang = lang or web.ctx.get(\"lang\") or \"en\"\n return babel.numbers.format_number(int(number), lang)\n except:\n return six.text_type(number)\n\n\ndef truncate(text, limit):\n \"\"\"Truncate text and add ellipses if it longer than specified limit.\"\"\"\n if not text:\n return ''\n if len(text) <= limit:\n return text\n return text[:limit] + \"...\"\n\n\ndef urlsafe(path):\n \"\"\"Replaces the unsafe chars from path with underscores.\n \"\"\"\n return _get_safepath_re().sub('_', path).strip('_')[:100]\n\[email protected]\ndef _get_safepath_re():\n \"\"\"Make regular expression that matches all unsafe chars.\"\"\"\n # unsafe chars according to RFC 2396\n reserved = \";/?:@&=+$,\"\n delims = '<>#%\"'\n unwise = \"{}|\\\\^[]`\"\n space = ' \\n\\r'\n\n unsafe = reserved + delims + unwise + space\n pattern = '[%s]+' % \"\".join(re.escape(c) for c in unsafe)\n return re.compile(pattern)\n\n\ndef get_coverstore_url():\n \"\"\"Returns the base url of coverstore by looking at the config.\"\"\"\n return config.get('coverstore_url', 'https://covers.openlibrary.org').rstrip('/')\n\n\n_texsafe_map = {\n '\"': r'\\textquotedbl{}',\n '#': r'\\#',\n '$': r'\\$',\n '%': r'\\%',\n '&': r'\\&',\n '<': r'\\textless{}',\n '>': r'\\textgreater{}',\n '\\\\': r'\\textbackslash{}',\n '^': r'\\^{}',\n '_': r'\\_{}',\n '{': r'\\{',\n '}': r'\\}',\n '|': r'\\textbar{}',\n '~': r'\\~{}',\n}\n\n_texsafe_re = None\n\ndef texsafe(text):\n \"\"\"Escapes the special characters in the given text for using it in tex type setting.\n\n Tex (or Latex) uses some characters in the ascii character range for\n special notations. These characters must be escaped when occur in the\n regular text. This function escapes those special characters.\n\n The list of special characters and the latex command to typeset them can\n be found in `The Comprehensive LaTeX Symbol List`_.\n\n .. _The Comprehensive LaTeX Symbol List: http://www.ctan.org/tex-archive/info/symbols/comprehensive/symbols-a4.pdf\n \"\"\"\n global _texsafe_re\n if _texsafe_re is None:\n pattern = \"[%s]\" % re.escape(\"\".join(list(_texsafe_map)))\n _texsafe_re = re.compile(pattern)\n\n return _texsafe_re.sub(lambda m: _texsafe_map[m.group(0)], text)\n\ndef percentage(value, total):\n \"\"\"Computes percentage.\n\n >>> percentage(1, 10)\n 10.0\n >>> percentage(0, 0)\n 0.0\n \"\"\"\n return (value * 100.0) / total if total else 0.0\n\ndef uniq(values, key=None):\n \"\"\"Returns the unique entries from the given values in the original order.\n\n The value of the optional `key` parameter should be a function that takes\n a single argument and returns a key to test the uniqueness.\n \"\"\"\n key = key or (lambda x: x)\n s = set()\n result = []\n for v in values:\n k = key(v)\n if k not in s:\n s.add(k)\n result.append(v)\n return result\n\ndef affiliate_id(affiliate):\n return config.get('affiliate_ids', {}).get(affiliate, '')\n\ndef bookreader_host():\n return config.get('bookreader_host', '')\n\ndef private_collections():\n \"\"\"Collections which are lendable but should not be linked from OL\n TODO: Remove when we can handle institutional books\"\"\"\n return ['georgetown-university-law-library-rr']\n\ndef private_collection_in(collections):\n return any(x in private_collections() for x in collections)\n\ndef _get_helpers():\n _globals = globals()\n return web.storage((k, _globals[k]) for k in __all__)\n\n\n## This must be at the end of this module\nhelpers = _get_helpers()\n", "path": "openlibrary/core/helpers.py" } ]
[ { "content": "\"\"\"Generic helper functions to use in the templates and the webapp.\n\"\"\"\nimport web\nfrom datetime import datetime\nimport re\n\nimport six\nfrom six.moves.urllib.parse import urlsplit\n\nif six.PY2: # See #4525 json.dump(indent) MUST be an int on PY2\n import simplejson as json\nelse:\n import json\n\nimport babel\nimport babel.core\nimport babel.dates\nimport babel.numbers\n\ntry:\n import genshi\n import genshi.filters\nexcept ImportError:\n genshi = None\n\ntry:\n from bs4 import BeautifulSoup\nexcept ImportError:\n BeautifulSoup = None\n\nfrom infogami import config\n\n# handy utility to parse ISO date strings\nfrom infogami.infobase.utils import parse_datetime\nfrom infogami.utils.view import safeint\n\n# TODO: i18n should be moved to core or infogami\nfrom openlibrary.i18n import gettext as _ # noqa: F401\n\n__all__ = [\n \"sanitize\",\n \"json_encode\",\n \"safesort\",\n \"days_since\", \"datestr\", \"format_date\",\n \"sprintf\", \"cond\", \"commify\", \"truncate\", \"datetimestr_utc\",\n \"urlsafe\", \"texsafe\",\n \"percentage\", \"affiliate_id\", \"bookreader_host\",\n \"private_collections\", \"private_collection_in\",\n\n # functions imported from elsewhere\n \"parse_datetime\", \"safeint\"\n]\n__docformat__ = \"restructuredtext en\"\n\ndef sanitize(html, encoding='utf8'):\n \"\"\"Removes unsafe tags and attributes from html and adds\n ``rel=\"nofollow\"`` attribute to all external links.\n Using encoding=None if passing unicode strings e.g. for Python 3.\n encoding=\"utf8\" matches default format for earlier versions of Genshi\n https://genshi.readthedocs.io/en/latest/upgrade/#upgrading-from-genshi-0-6-x-to-the-development-version\n \"\"\"\n\n # Can't sanitize unless genshi module is available\n if genshi is None:\n return html\n\n def get_nofollow(name, event):\n attrs = event[1][1]\n href = attrs.get('href', '')\n\n if href:\n # add rel=nofollow to all absolute links\n _, host, _, _, _ = urlsplit(href)\n if host:\n return 'nofollow'\n\n try:\n html = genshi.HTML(html, encoding=encoding)\n\n # except (genshi.ParseError, UnicodeDecodeError, UnicodeError) as e:\n # don't catch Unicode errors so we can tell if we're getting bytes\n except genshi.ParseError:\n if BeautifulSoup:\n # Bad html. Tidy it up using BeautifulSoup\n html = str(BeautifulSoup(html, \"lxml\"))\n try:\n html = genshi.HTML(html)\n except Exception:\n # Failed to sanitize.\n # We can't do any better than returning the original HTML, without sanitizing.\n return html\n else:\n raise\n\n stream = html \\\n | genshi.filters.HTMLSanitizer() \\\n | genshi.filters.Transformer(\"//a\").attr(\"rel\", get_nofollow)\n return stream.render()\n\n\ndef json_encode(d, **kw):\n \"\"\"Same as json.dumps.\n \"\"\"\n return json.dumps(d or {}, **kw)\n\n\ndef safesort(iterable, key=None, reverse=False):\n \"\"\"Sorts heterogeneous of objects without raising errors.\n\n Sorting heterogeneous objects sometimes causes error. For example,\n datetime and Nones don't go well together. This function takes special\n care to make that work.\n \"\"\"\n key = key or (lambda x: x)\n def safekey(x):\n k = key(x)\n return (k.__class__.__name__, k)\n return sorted(iterable, key=safekey, reverse=reverse)\n\n\ndef days_since(then, now=None):\n delta = then - (now or datetime.now())\n return abs(delta.days)\n\n\ndef datestr(then, now=None, lang=None, relative=True):\n \"\"\"Internationalized version of web.datestr.\"\"\"\n lang = lang or web.ctx.get('lang') or \"en\"\n if relative:\n if now is None:\n now = datetime.now()\n delta = then - now\n if abs(delta.days) < 4: # Threshold from web.py\n return babel.dates.format_timedelta(delta,\n add_direction=True,\n locale=_get_babel_locale(lang))\n return format_date(then, lang=lang)\n\n\ndef datetimestr_utc(then):\n return then.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\ndef format_date(date, lang=None):\n lang = lang or web.ctx.get('lang') or \"en\"\n locale = _get_babel_locale(lang)\n return babel.dates.format_date(date, format=\"long\", locale=locale)\n\ndef _get_babel_locale(lang):\n try:\n return babel.Locale(lang)\n except babel.core.UnknownLocaleError:\n return babel.Locale(\"en\")\n\n\ndef sprintf(s, *a, **kw):\n \"\"\"Handy utility for string replacements.\n\n >>> sprintf('hello %s', 'python')\n 'hello python'\n >>> sprintf('hello %(name)s', name='python')\n 'hello python'\n \"\"\"\n args = kw or a\n if args:\n return s % args\n else:\n return s\n\n\ndef cond(pred, true_value, false_value=\"\"):\n \"\"\"Lisp style cond function.\n\n Hanly to use instead of if-else expression.\n \"\"\"\n if pred:\n return true_value\n else:\n return false_value\n\n\ndef commify(number, lang=None):\n \"\"\"localized version of web.commify\"\"\"\n try:\n lang = lang or web.ctx.get(\"lang\") or \"en\"\n return babel.numbers.format_number(int(number), lang)\n except:\n return six.text_type(number)\n\n\ndef truncate(text, limit):\n \"\"\"Truncate text and add ellipses if it longer than specified limit.\"\"\"\n if not text:\n return ''\n if len(text) <= limit:\n return text\n return text[:limit] + \"...\"\n\n\ndef urlsafe(path):\n \"\"\"Replaces the unsafe chars from path with underscores.\n \"\"\"\n return _get_safepath_re().sub('_', path).strip('_')[:100]\n\[email protected]\ndef _get_safepath_re():\n \"\"\"Make regular expression that matches all unsafe chars.\"\"\"\n # unsafe chars according to RFC 2396\n reserved = \";/?:@&=+$,\"\n delims = '<>#%\"'\n unwise = \"{}|\\\\^[]`\"\n space = ' \\n\\r'\n\n unsafe = reserved + delims + unwise + space\n pattern = '[%s]+' % \"\".join(re.escape(c) for c in unsafe)\n return re.compile(pattern)\n\n\ndef get_coverstore_url():\n \"\"\"Returns the base url of coverstore by looking at the config.\"\"\"\n return config.get('coverstore_url', 'https://covers.openlibrary.org').rstrip('/')\n\n\n_texsafe_map = {\n '\"': r'\\textquotedbl{}',\n '#': r'\\#',\n '$': r'\\$',\n '%': r'\\%',\n '&': r'\\&',\n '<': r'\\textless{}',\n '>': r'\\textgreater{}',\n '\\\\': r'\\textbackslash{}',\n '^': r'\\^{}',\n '_': r'\\_{}',\n '{': r'\\{',\n '}': r'\\}',\n '|': r'\\textbar{}',\n '~': r'\\~{}',\n}\n\n_texsafe_re = None\n\ndef texsafe(text):\n \"\"\"Escapes the special characters in the given text for using it in tex type setting.\n\n Tex (or Latex) uses some characters in the ascii character range for\n special notations. These characters must be escaped when occur in the\n regular text. This function escapes those special characters.\n\n The list of special characters and the latex command to typeset them can\n be found in `The Comprehensive LaTeX Symbol List`_.\n\n .. _The Comprehensive LaTeX Symbol List: http://www.ctan.org/tex-archive/info/symbols/comprehensive/symbols-a4.pdf\n \"\"\"\n global _texsafe_re\n if _texsafe_re is None:\n pattern = \"[%s]\" % re.escape(\"\".join(list(_texsafe_map)))\n _texsafe_re = re.compile(pattern)\n\n return _texsafe_re.sub(lambda m: _texsafe_map[m.group(0)], text)\n\ndef percentage(value, total):\n \"\"\"Computes percentage.\n\n >>> percentage(1, 10)\n 10.0\n >>> percentage(0, 0)\n 0.0\n \"\"\"\n return (value * 100.0) / total if total else 0.0\n\ndef uniq(values, key=None):\n \"\"\"Returns the unique entries from the given values in the original order.\n\n The value of the optional `key` parameter should be a function that takes\n a single argument and returns a key to test the uniqueness.\n \"\"\"\n key = key or (lambda x: x)\n s = set()\n result = []\n for v in values:\n k = key(v)\n if k not in s:\n s.add(k)\n result.append(v)\n return result\n\ndef affiliate_id(affiliate):\n return config.get('affiliate_ids', {}).get(affiliate, '')\n\ndef bookreader_host():\n return config.get('bookreader_host', '')\n\ndef private_collections():\n \"\"\"Collections which are lendable but should not be linked from OL\n TODO: Remove when we can handle institutional books\"\"\"\n return ['georgetown-university-law-library-rr']\n\ndef private_collection_in(collections):\n return any(x in private_collections() for x in collections)\n\ndef _get_helpers():\n _globals = globals()\n return web.storage((k, _globals[k]) for k in __all__)\n\n\n## This must be at the end of this module\nhelpers = _get_helpers()\n", "path": "openlibrary/core/helpers.py" } ]
diff --git a/openlibrary/core/helpers.py b/openlibrary/core/helpers.py index de212412998..b4ec1476453 100644 --- a/openlibrary/core/helpers.py +++ b/openlibrary/core/helpers.py @@ -101,7 +101,7 @@ def get_nofollow(name, event): def json_encode(d, **kw): """Same as json.dumps. """ - return json.dumps(d, **kw) + return json.dumps(d or {}, **kw) def safesort(iterable, key=None, reverse=False):
elastic__apm-agent-python-1558
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 369: invalid start byte **Describe the bug**: Sending PUT request to FastAPI with binary file encoded, its returns a error. **To Reproduce** 1. Configure FastAPI with elasticapm.contrib.starlette 2. Send a PUT request with binary file **Environment (please complete the following information)** - OS: Linux - Python version: 3.8 - Framework and version: fastapi 0.61.2 - APM Server version: 8 - Agent version: 8 The same problema was resolved in the main code : [#344](https://github.com/elastic/apm-agent-python/issues/334)
[ { "content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nimport asyncio\n\nfrom starlette.datastructures import Headers\nfrom starlette.requests import Request\nfrom starlette.types import Message\n\nfrom elasticapm.conf import Config, constants\nfrom elasticapm.utils import get_url_dict\n\n\nasync def get_data_from_request(request: Request, config: Config, event_type: str) -> dict:\n \"\"\"Loads data from incoming request for APM capturing.\n\n Args:\n request (Request)\n config (Config)\n event_type (str)\n\n Returns:\n dict\n \"\"\"\n result = {\n \"method\": request.method,\n \"socket\": {\"remote_address\": _get_client_ip(request)},\n \"cookies\": request.cookies,\n }\n if config.capture_headers:\n result[\"headers\"] = dict(request.headers)\n\n if request.method in constants.HTTP_WITH_BODY:\n if config.capture_body not in (\"all\", event_type):\n result[\"body\"] = \"[REDACTED]\"\n else:\n body = None\n try:\n body = await get_body(request)\n except Exception:\n pass\n if body is not None:\n result[\"body\"] = body\n\n result[\"url\"] = get_url_dict(str(request.url))\n\n return result\n\n\nasync def get_data_from_response(message: dict, config: Config, event_type: str) -> dict:\n \"\"\"Loads data from response for APM capturing.\n\n Args:\n message (dict)\n config (Config)\n event_type (str)\n\n Returns:\n dict\n \"\"\"\n result = {}\n\n if \"status\" in message:\n result[\"status_code\"] = message[\"status\"]\n\n if config.capture_headers and \"headers\" in message:\n headers = Headers(raw=message[\"headers\"])\n result[\"headers\"] = {key: \";\".join(headers.getlist(key)) for key in headers.keys()}\n\n return result\n\n\nasync def set_body(request: Request, body: bytes):\n \"\"\"Overwrites body in Starlette.\n\n Args:\n request (Request)\n body (bytes)\n \"\"\"\n\n async def receive() -> Message:\n await asyncio.sleep(0)\n return {\"type\": \"http.request\", \"body\": body}\n\n request._receive = receive\n\n\nasync def get_body(request: Request) -> str:\n \"\"\"Gets body from the request.\n\n When we consume the body, we replace the streaming mechanism with\n a mocked version -- this workaround came from\n https://github.com/encode/starlette/issues/495#issuecomment-513138055\n\n Args:\n request (Request)\n\n Returns:\n str\n \"\"\"\n body = await request.body()\n await set_body(request, body)\n\n request._stream_consumed = False\n\n return body.decode(\"utf-8\")\n\n\nasync def query_params_to_dict(query_params: str) -> dict:\n \"\"\"Transforms query params from URL to dictionary\n\n Args:\n query_params (str)\n\n Returns:\n dict\n\n Examples:\n >>> print(query_params_to_dict(b\"key=val&key2=val2\"))\n {\"key\": \"val\", \"key2\": \"val2\"}\n \"\"\"\n query_params = query_params.split(\"&\")\n res = {}\n for param in query_params:\n key, val = param.split(\"=\")\n res[key] = val\n\n return res\n\n\ndef _get_client_ip(request: Request):\n x_forwarded_for = request.headers.get(\"HTTP_X_FORWARDED_FOR\")\n if x_forwarded_for:\n ip = x_forwarded_for.split(\",\")[0]\n else:\n ip = request.headers.get(\"REMOTE_ADDR\")\n return ip\n", "path": "elasticapm/contrib/starlette/utils.py" } ]
[ { "content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nimport asyncio\n\nfrom starlette.datastructures import Headers\nfrom starlette.requests import Request\nfrom starlette.types import Message\n\nfrom elasticapm.conf import Config, constants\nfrom elasticapm.utils import get_url_dict\n\n\nasync def get_data_from_request(request: Request, config: Config, event_type: str) -> dict:\n \"\"\"Loads data from incoming request for APM capturing.\n\n Args:\n request (Request)\n config (Config)\n event_type (str)\n\n Returns:\n dict\n \"\"\"\n result = {\n \"method\": request.method,\n \"socket\": {\"remote_address\": _get_client_ip(request)},\n \"cookies\": request.cookies,\n }\n if config.capture_headers:\n result[\"headers\"] = dict(request.headers)\n\n if request.method in constants.HTTP_WITH_BODY:\n if config.capture_body not in (\"all\", event_type):\n result[\"body\"] = \"[REDACTED]\"\n else:\n body = None\n try:\n body = await get_body(request)\n except Exception:\n pass\n if body is not None:\n result[\"body\"] = body\n\n result[\"url\"] = get_url_dict(str(request.url))\n\n return result\n\n\nasync def get_data_from_response(message: dict, config: Config, event_type: str) -> dict:\n \"\"\"Loads data from response for APM capturing.\n\n Args:\n message (dict)\n config (Config)\n event_type (str)\n\n Returns:\n dict\n \"\"\"\n result = {}\n\n if \"status\" in message:\n result[\"status_code\"] = message[\"status\"]\n\n if config.capture_headers and \"headers\" in message:\n headers = Headers(raw=message[\"headers\"])\n result[\"headers\"] = {key: \";\".join(headers.getlist(key)) for key in headers.keys()}\n\n return result\n\n\nasync def set_body(request: Request, body: bytes):\n \"\"\"Overwrites body in Starlette.\n\n Args:\n request (Request)\n body (bytes)\n \"\"\"\n\n async def receive() -> Message:\n await asyncio.sleep(0)\n return {\"type\": \"http.request\", \"body\": body}\n\n request._receive = receive\n\n\nasync def get_body(request: Request) -> str:\n \"\"\"Gets body from the request.\n\n When we consume the body, we replace the streaming mechanism with\n a mocked version -- this workaround came from\n https://github.com/encode/starlette/issues/495#issuecomment-513138055\n\n Args:\n request (Request)\n\n Returns:\n str\n \"\"\"\n body = await request.body()\n await set_body(request, body)\n\n request._stream_consumed = False\n\n return body.decode(\"utf-8\", errors=\"replace\")\n\n\nasync def query_params_to_dict(query_params: str) -> dict:\n \"\"\"Transforms query params from URL to dictionary\n\n Args:\n query_params (str)\n\n Returns:\n dict\n\n Examples:\n >>> print(query_params_to_dict(b\"key=val&key2=val2\"))\n {\"key\": \"val\", \"key2\": \"val2\"}\n \"\"\"\n query_params = query_params.split(\"&\")\n res = {}\n for param in query_params:\n key, val = param.split(\"=\")\n res[key] = val\n\n return res\n\n\ndef _get_client_ip(request: Request):\n x_forwarded_for = request.headers.get(\"HTTP_X_FORWARDED_FOR\")\n if x_forwarded_for:\n ip = x_forwarded_for.split(\",\")[0]\n else:\n ip = request.headers.get(\"REMOTE_ADDR\")\n return ip\n", "path": "elasticapm/contrib/starlette/utils.py" } ]
diff --git a/elasticapm/contrib/starlette/utils.py b/elasticapm/contrib/starlette/utils.py index f06c19055..ec2eaef5f 100644 --- a/elasticapm/contrib/starlette/utils.py +++ b/elasticapm/contrib/starlette/utils.py @@ -129,7 +129,7 @@ async def get_body(request: Request) -> str: request._stream_consumed = False - return body.decode("utf-8") + return body.decode("utf-8", errors="replace") async def query_params_to_dict(query_params: str) -> dict:
benoitc__gunicorn-806
Fix utils.is_fileobj for streaming responses from requests Turns out that the utils.is_fileobj breaks for streamed responses from the requests library due to how we check for a fileno() function and the exception raised by urllib3. https://github.com/benoitc/gunicorn/blob/19.0/gunicorn/util.py#L511-L521 https://github.com/kennethreitz/requests/blob/v2.3.0/requests/packages/urllib3/response.py#L294-L301 Obvious fix is to add the IOError type to the exception list. PR to follow shortly.
[ { "content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\n\nimport email.utils\nimport fcntl\nimport io\nimport os\nimport pkg_resources\nimport random\nimport resource\nimport socket\nimport sys\nimport textwrap\nimport time\nimport traceback\nimport inspect\nimport errno\nimport warnings\nimport cgi\n\nfrom gunicorn.errors import AppImportError\nfrom gunicorn.six import text_type\nfrom gunicorn.workers import SUPPORTED_WORKERS\n\n\nMAXFD = 1024\nREDIRECT_TO = getattr(os, 'devnull', '/dev/null')\n\ntimeout_default = object()\n\nCHUNK_SIZE = (16 * 1024)\n\nMAX_BODY = 1024 * 132\n\n# Server and Date aren't technically hop-by-hop\n# headers, but they are in the purview of the\n# origin server which the WSGI spec says we should\n# act like. So we drop them and add our own.\n#\n# In the future, concatenation server header values\n# might be better, but nothing else does it and\n# dropping them is easier.\nhop_headers = set(\"\"\"\n connection keep-alive proxy-authenticate proxy-authorization\n te trailers transfer-encoding upgrade\n server date\n \"\"\".split())\n\ntry:\n from setproctitle import setproctitle\n def _setproctitle(title):\n setproctitle(\"gunicorn: %s\" % title)\nexcept ImportError:\n def _setproctitle(title):\n return\n\n\ntry:\n from importlib import import_module\nexcept ImportError:\n def _resolve_name(name, package, level):\n \"\"\"Return the absolute name of the module to be imported.\"\"\"\n if not hasattr(package, 'rindex'):\n raise ValueError(\"'package' not set to a string\")\n dot = len(package)\n for x in range(level, 1, -1):\n try:\n dot = package.rindex('.', 0, dot)\n except ValueError:\n raise ValueError(\"attempted relative import beyond top-level \"\n \"package\")\n return \"%s.%s\" % (package[:dot], name)\n\n def import_module(name, package=None):\n \"\"\"Import a module.\n\nThe 'package' argument is required when performing a relative import. It\nspecifies the package to use as the anchor point from which to resolve the\nrelative import to an absolute import.\n\n\"\"\"\n if name.startswith('.'):\n if not package:\n raise TypeError(\"relative imports require the 'package' argument\")\n level = 0\n for character in name:\n if character != '.':\n break\n level += 1\n name = _resolve_name(name[level:], package, level)\n __import__(name)\n return sys.modules[name]\n\n\ndef load_class(uri, default=\"gunicorn.workers.sync.SyncWorker\",\n section=\"gunicorn.workers\"):\n if inspect.isclass(uri):\n return uri\n if uri.startswith(\"egg:\"):\n # uses entry points\n entry_str = uri.split(\"egg:\")[1]\n try:\n dist, name = entry_str.rsplit(\"#\", 1)\n except ValueError:\n dist = entry_str\n name = default\n\n try:\n return pkg_resources.load_entry_point(dist, section, name)\n except:\n exc = traceback.format_exc()\n raise RuntimeError(\"class uri %r invalid or not found: \\n\\n[%s]\" % (uri,\n exc))\n else:\n components = uri.split('.')\n if len(components) == 1:\n while True:\n if uri.startswith(\"#\"):\n uri = uri[1:]\n\n if uri in SUPPORTED_WORKERS:\n components = SUPPORTED_WORKERS[uri].split(\".\")\n break\n\n try:\n return pkg_resources.load_entry_point(\"gunicorn\",\n section, uri)\n except:\n exc = traceback.format_exc()\n raise RuntimeError(\"class uri %r invalid or not found: \\n\\n[%s]\" % (uri,\n exc))\n\n klass = components.pop(-1)\n\n try:\n mod = import_module('.'.join(components))\n except:\n exc = traceback.format_exc()\n raise RuntimeError(\n \"class uri %r invalid or not found: \\n\\n[%s]\" %\n (uri, exc))\n return getattr(mod, klass)\n\n\ndef set_owner_process(uid, gid):\n \"\"\" set user and group of workers processes \"\"\"\n if gid:\n # versions of python < 2.6.2 don't manage unsigned int for\n # groups like on osx or fedora\n gid = abs(gid) & 0x7FFFFFFF\n os.setgid(gid)\n if uid:\n os.setuid(uid)\n\n\ndef chown(path, uid, gid):\n gid = abs(gid) & 0x7FFFFFFF # see note above.\n os.chown(path, uid, gid)\n\n\nif sys.platform.startswith(\"win\"):\n def _waitfor(func, pathname, waitall=False):\n # Peform the operation\n func(pathname)\n # Now setup the wait loop\n if waitall:\n dirname = pathname\n else:\n dirname, name = os.path.split(pathname)\n dirname = dirname or '.'\n # Check for `pathname` to be removed from the filesystem.\n # The exponential backoff of the timeout amounts to a total\n # of ~1 second after which the deletion is probably an error\n # anyway.\n # Testing on a [email protected] shows that usually only 1 iteration is\n # required when contention occurs.\n timeout = 0.001\n while timeout < 1.0:\n # Note we are only testing for the existance of the file(s) in\n # the contents of the directory regardless of any security or\n # access rights. If we have made it this far, we have sufficient\n # permissions to do that much using Python's equivalent of the\n # Windows API FindFirstFile.\n # Other Windows APIs can fail or give incorrect results when\n # dealing with files that are pending deletion.\n L = os.listdir(dirname)\n if not (L if waitall else name in L):\n return\n # Increase the timeout and try again\n time.sleep(timeout)\n timeout *= 2\n warnings.warn('tests may fail, delete still pending for ' + pathname,\n RuntimeWarning, stacklevel=4)\n\n def _unlink(filename):\n _waitfor(os.unlink, filename)\nelse:\n _unlink = os.unlink\n\n\ndef unlink(filename):\n try:\n _unlink(filename)\n except OSError as error:\n # The filename need not exist.\n if error.errno not in (errno.ENOENT, errno.ENOTDIR):\n raise\n\n\ndef is_ipv6(addr):\n try:\n socket.inet_pton(socket.AF_INET6, addr)\n except socket.error: # not a valid address\n return False\n except ValueError: # ipv6 not supported on this platform\n return False\n return True\n\n\ndef parse_address(netloc, default_port=8000):\n if netloc.startswith(\"unix://\"):\n return netloc.split(\"unix://\")[1]\n\n if netloc.startswith(\"unix:\"):\n return netloc.split(\"unix:\")[1]\n\n if netloc.startswith(\"tcp://\"):\n netloc = netloc.split(\"tcp://\")[1]\n\n\n # get host\n if '[' in netloc and ']' in netloc:\n host = netloc.split(']')[0][1:].lower()\n elif ':' in netloc:\n host = netloc.split(':')[0].lower()\n elif netloc == \"\":\n host = \"0.0.0.0\"\n else:\n host = netloc.lower()\n\n #get port\n netloc = netloc.split(']')[-1]\n if \":\" in netloc:\n port = netloc.split(':', 1)[1]\n if not port.isdigit():\n raise RuntimeError(\"%r is not a valid port number.\" % port)\n port = int(port)\n else:\n port = default_port\n return (host, port)\n\ndef get_maxfd():\n maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]\n if (maxfd == resource.RLIM_INFINITY):\n maxfd = MAXFD\n return maxfd\n\n\ndef close_on_exec(fd):\n flags = fcntl.fcntl(fd, fcntl.F_GETFD)\n flags |= fcntl.FD_CLOEXEC\n fcntl.fcntl(fd, fcntl.F_SETFD, flags)\n\n\ndef set_non_blocking(fd):\n flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)\n\ndef close(sock):\n try:\n sock.close()\n except socket.error:\n pass\n\ntry:\n from os import closerange\nexcept ImportError:\n def closerange(fd_low, fd_high):\n # Iterate through and close all file descriptors.\n for fd in range(fd_low, fd_high):\n try:\n os.close(fd)\n except OSError: # ERROR, fd wasn't open to begin with (ignored)\n pass\n\n\ndef write_chunk(sock, data):\n if isinstance(data, text_type):\n data = data.encode('utf-8')\n chunk_size = \"%X\\r\\n\" % len(data)\n chunk = b\"\".join([chunk_size.encode('utf-8'), data, b\"\\r\\n\"])\n sock.sendall(chunk)\n\n\ndef write(sock, data, chunked=False):\n if chunked:\n return write_chunk(sock, data)\n sock.sendall(data)\n\n\ndef write_nonblock(sock, data, chunked=False):\n timeout = sock.gettimeout()\n if timeout != 0.0:\n try:\n sock.setblocking(0)\n return write(sock, data, chunked)\n finally:\n sock.setblocking(1)\n else:\n return write(sock, data, chunked)\n\n\ndef writelines(sock, lines, chunked=False):\n for line in list(lines):\n write(sock, line, chunked)\n\n\ndef write_error(sock, status_int, reason, mesg):\n html = textwrap.dedent(\"\"\"\\\n <html>\n <head>\n <title>%(reason)s</title>\n </head>\n <body>\n <h1><p>%(reason)s</p></h1>\n %(mesg)s\n </body>\n </html>\n \"\"\") % {\"reason\": reason, \"mesg\": cgi.escape(mesg)}\n\n http = textwrap.dedent(\"\"\"\\\n HTTP/1.1 %s %s\\r\n Connection: close\\r\n Content-Type: text/html\\r\n Content-Length: %d\\r\n \\r\n %s\"\"\") % (str(status_int), reason, len(html), html)\n write_nonblock(sock, http.encode('latin1'))\n\n\ndef normalize_name(name):\n return \"-\".join([w.lower().capitalize() for w in name.split(\"-\")])\n\n\ndef import_app(module):\n parts = module.split(\":\", 1)\n if len(parts) == 1:\n module, obj = module, \"application\"\n else:\n module, obj = parts[0], parts[1]\n\n try:\n __import__(module)\n except ImportError:\n if module.endswith(\".py\") and os.path.exists(module):\n raise ImportError(\"Failed to find application, did \"\n \"you mean '%s:%s'?\" % (module.rsplit(\".\", 1)[0], obj))\n else:\n raise\n\n mod = sys.modules[module]\n\n try:\n app = eval(obj, mod.__dict__)\n except NameError:\n raise AppImportError(\"Failed to find application: %r\" % module)\n\n if app is None:\n raise AppImportError(\"Failed to find application object: %r\" % obj)\n\n if not callable(app):\n raise AppImportError(\"Application object must be callable.\")\n return app\n\n\ndef getcwd():\n # get current path, try to use PWD env first\n try:\n a = os.stat(os.environ['PWD'])\n b = os.stat(os.getcwd())\n if a.st_ino == b.st_ino and a.st_dev == b.st_dev:\n cwd = os.environ['PWD']\n else:\n cwd = os.getcwd()\n except:\n cwd = os.getcwd()\n return cwd\n\n\ndef http_date(timestamp=None):\n \"\"\"Return the current date and time formatted for a message header.\"\"\"\n if timestamp is None:\n timestamp = time.time()\n s = email.utils.formatdate(timestamp, localtime=False, usegmt=True)\n return s\n\n\ndef is_hoppish(header):\n return header.lower().strip() in hop_headers\n\n\ndef daemonize(enable_stdio_inheritance=False):\n \"\"\"\\\n Standard daemonization of a process.\n http://www.svbug.com/documentation/comp.unix.programmer-FAQ/faq_2.html#SEC16\n \"\"\"\n if not 'GUNICORN_FD' in os.environ:\n if os.fork():\n os._exit(0)\n os.setsid()\n\n if os.fork():\n os._exit(0)\n\n os.umask(0)\n\n # In both the following any file descriptors above stdin\n # stdout and stderr are left untouched. The inheritence\n # option simply allows one to have output go to a file\n # specified by way of shell redirection when not wanting\n # to use --error-log option.\n\n if not enable_stdio_inheritance:\n # Remap all of stdin, stdout and stderr on to\n # /dev/null. The expectation is that users have\n # specified the --error-log option.\n\n closerange(0, 3)\n\n fd_null = os.open(REDIRECT_TO, os.O_RDWR)\n\n if fd_null != 0:\n os.dup2(fd_null, 0)\n\n os.dup2(fd_null, 1)\n os.dup2(fd_null, 2)\n\n else:\n fd_null = os.open(REDIRECT_TO, os.O_RDWR)\n\n # Always redirect stdin to /dev/null as we would\n # never expect to need to read interactive input.\n\n if fd_null != 0:\n os.close(0)\n os.dup2(fd_null, 0)\n\n # If stdout and stderr are still connected to\n # their original file descriptors we check to see\n # if they are associated with terminal devices.\n # When they are we map them to /dev/null so that\n # are still detached from any controlling terminal\n # properly. If not we preserve them as they are.\n #\n # If stdin and stdout were not hooked up to the\n # original file descriptors, then all bets are\n # off and all we can really do is leave them as\n # they were.\n #\n # This will allow 'gunicorn ... > output.log 2>&1'\n # to work with stdout/stderr going to the file\n # as expected.\n #\n # Note that if using --error-log option, the log\n # file specified through shell redirection will\n # only be used up until the log file specified\n # by the option takes over. As it replaces stdout\n # and stderr at the file descriptor level, then\n # anything using stdout or stderr, including having\n # cached a reference to them, will still work.\n\n def redirect(stream, fd_expect):\n try:\n fd = stream.fileno()\n if fd == fd_expect and stream.isatty():\n os.close(fd)\n os.dup2(fd_null, fd)\n except AttributeError:\n pass\n\n redirect(sys.stdout, 1)\n redirect(sys.stderr, 2)\n\n\ndef seed():\n try:\n random.seed(os.urandom(64))\n except NotImplementedError:\n random.seed('%s.%s' % (time.time(), os.getpid()))\n\n\ndef check_is_writeable(path):\n try:\n f = open(path, 'a')\n except IOError as e:\n raise RuntimeError(\"Error: '%s' isn't writable [%r]\" % (path, e))\n f.close()\n\n\ndef to_bytestring(value):\n \"\"\"Converts a string argument to a byte string\"\"\"\n if isinstance(value, bytes):\n return value\n assert isinstance(value, text_type)\n return value.encode(\"utf-8\")\n\n\ndef is_fileobject(obj):\n if not hasattr(obj, \"tell\") or not hasattr(obj, \"fileno\"):\n return False\n\n # check BytesIO case and maybe others\n try:\n obj.fileno()\n except io.UnsupportedOperation:\n return False\n\n return True\n\n\ndef warn(msg):\n sys.stderr.write(\"!!!\\n\")\n\n lines = msg.splitlines()\n for i, line in enumerate(lines):\n if i == 0:\n line = \"WARNING: %s\" % line\n sys.stderr.write(\"!!! %s\\n\" % line)\n\n sys.stderr.write(\"!!!\\n\\n\")\n sys.stderr.flush()\n", "path": "gunicorn/util.py" } ]
[ { "content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\n\nimport email.utils\nimport fcntl\nimport io\nimport os\nimport pkg_resources\nimport random\nimport resource\nimport socket\nimport sys\nimport textwrap\nimport time\nimport traceback\nimport inspect\nimport errno\nimport warnings\nimport cgi\n\nfrom gunicorn.errors import AppImportError\nfrom gunicorn.six import text_type\nfrom gunicorn.workers import SUPPORTED_WORKERS\n\n\nMAXFD = 1024\nREDIRECT_TO = getattr(os, 'devnull', '/dev/null')\n\ntimeout_default = object()\n\nCHUNK_SIZE = (16 * 1024)\n\nMAX_BODY = 1024 * 132\n\n# Server and Date aren't technically hop-by-hop\n# headers, but they are in the purview of the\n# origin server which the WSGI spec says we should\n# act like. So we drop them and add our own.\n#\n# In the future, concatenation server header values\n# might be better, but nothing else does it and\n# dropping them is easier.\nhop_headers = set(\"\"\"\n connection keep-alive proxy-authenticate proxy-authorization\n te trailers transfer-encoding upgrade\n server date\n \"\"\".split())\n\ntry:\n from setproctitle import setproctitle\n def _setproctitle(title):\n setproctitle(\"gunicorn: %s\" % title)\nexcept ImportError:\n def _setproctitle(title):\n return\n\n\ntry:\n from importlib import import_module\nexcept ImportError:\n def _resolve_name(name, package, level):\n \"\"\"Return the absolute name of the module to be imported.\"\"\"\n if not hasattr(package, 'rindex'):\n raise ValueError(\"'package' not set to a string\")\n dot = len(package)\n for x in range(level, 1, -1):\n try:\n dot = package.rindex('.', 0, dot)\n except ValueError:\n raise ValueError(\"attempted relative import beyond top-level \"\n \"package\")\n return \"%s.%s\" % (package[:dot], name)\n\n def import_module(name, package=None):\n \"\"\"Import a module.\n\nThe 'package' argument is required when performing a relative import. It\nspecifies the package to use as the anchor point from which to resolve the\nrelative import to an absolute import.\n\n\"\"\"\n if name.startswith('.'):\n if not package:\n raise TypeError(\"relative imports require the 'package' argument\")\n level = 0\n for character in name:\n if character != '.':\n break\n level += 1\n name = _resolve_name(name[level:], package, level)\n __import__(name)\n return sys.modules[name]\n\n\ndef load_class(uri, default=\"gunicorn.workers.sync.SyncWorker\",\n section=\"gunicorn.workers\"):\n if inspect.isclass(uri):\n return uri\n if uri.startswith(\"egg:\"):\n # uses entry points\n entry_str = uri.split(\"egg:\")[1]\n try:\n dist, name = entry_str.rsplit(\"#\", 1)\n except ValueError:\n dist = entry_str\n name = default\n\n try:\n return pkg_resources.load_entry_point(dist, section, name)\n except:\n exc = traceback.format_exc()\n raise RuntimeError(\"class uri %r invalid or not found: \\n\\n[%s]\" % (uri,\n exc))\n else:\n components = uri.split('.')\n if len(components) == 1:\n while True:\n if uri.startswith(\"#\"):\n uri = uri[1:]\n\n if uri in SUPPORTED_WORKERS:\n components = SUPPORTED_WORKERS[uri].split(\".\")\n break\n\n try:\n return pkg_resources.load_entry_point(\"gunicorn\",\n section, uri)\n except:\n exc = traceback.format_exc()\n raise RuntimeError(\"class uri %r invalid or not found: \\n\\n[%s]\" % (uri,\n exc))\n\n klass = components.pop(-1)\n\n try:\n mod = import_module('.'.join(components))\n except:\n exc = traceback.format_exc()\n raise RuntimeError(\n \"class uri %r invalid or not found: \\n\\n[%s]\" %\n (uri, exc))\n return getattr(mod, klass)\n\n\ndef set_owner_process(uid, gid):\n \"\"\" set user and group of workers processes \"\"\"\n if gid:\n # versions of python < 2.6.2 don't manage unsigned int for\n # groups like on osx or fedora\n gid = abs(gid) & 0x7FFFFFFF\n os.setgid(gid)\n if uid:\n os.setuid(uid)\n\n\ndef chown(path, uid, gid):\n gid = abs(gid) & 0x7FFFFFFF # see note above.\n os.chown(path, uid, gid)\n\n\nif sys.platform.startswith(\"win\"):\n def _waitfor(func, pathname, waitall=False):\n # Peform the operation\n func(pathname)\n # Now setup the wait loop\n if waitall:\n dirname = pathname\n else:\n dirname, name = os.path.split(pathname)\n dirname = dirname or '.'\n # Check for `pathname` to be removed from the filesystem.\n # The exponential backoff of the timeout amounts to a total\n # of ~1 second after which the deletion is probably an error\n # anyway.\n # Testing on a [email protected] shows that usually only 1 iteration is\n # required when contention occurs.\n timeout = 0.001\n while timeout < 1.0:\n # Note we are only testing for the existance of the file(s) in\n # the contents of the directory regardless of any security or\n # access rights. If we have made it this far, we have sufficient\n # permissions to do that much using Python's equivalent of the\n # Windows API FindFirstFile.\n # Other Windows APIs can fail or give incorrect results when\n # dealing with files that are pending deletion.\n L = os.listdir(dirname)\n if not (L if waitall else name in L):\n return\n # Increase the timeout and try again\n time.sleep(timeout)\n timeout *= 2\n warnings.warn('tests may fail, delete still pending for ' + pathname,\n RuntimeWarning, stacklevel=4)\n\n def _unlink(filename):\n _waitfor(os.unlink, filename)\nelse:\n _unlink = os.unlink\n\n\ndef unlink(filename):\n try:\n _unlink(filename)\n except OSError as error:\n # The filename need not exist.\n if error.errno not in (errno.ENOENT, errno.ENOTDIR):\n raise\n\n\ndef is_ipv6(addr):\n try:\n socket.inet_pton(socket.AF_INET6, addr)\n except socket.error: # not a valid address\n return False\n except ValueError: # ipv6 not supported on this platform\n return False\n return True\n\n\ndef parse_address(netloc, default_port=8000):\n if netloc.startswith(\"unix://\"):\n return netloc.split(\"unix://\")[1]\n\n if netloc.startswith(\"unix:\"):\n return netloc.split(\"unix:\")[1]\n\n if netloc.startswith(\"tcp://\"):\n netloc = netloc.split(\"tcp://\")[1]\n\n\n # get host\n if '[' in netloc and ']' in netloc:\n host = netloc.split(']')[0][1:].lower()\n elif ':' in netloc:\n host = netloc.split(':')[0].lower()\n elif netloc == \"\":\n host = \"0.0.0.0\"\n else:\n host = netloc.lower()\n\n #get port\n netloc = netloc.split(']')[-1]\n if \":\" in netloc:\n port = netloc.split(':', 1)[1]\n if not port.isdigit():\n raise RuntimeError(\"%r is not a valid port number.\" % port)\n port = int(port)\n else:\n port = default_port\n return (host, port)\n\ndef get_maxfd():\n maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]\n if (maxfd == resource.RLIM_INFINITY):\n maxfd = MAXFD\n return maxfd\n\n\ndef close_on_exec(fd):\n flags = fcntl.fcntl(fd, fcntl.F_GETFD)\n flags |= fcntl.FD_CLOEXEC\n fcntl.fcntl(fd, fcntl.F_SETFD, flags)\n\n\ndef set_non_blocking(fd):\n flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)\n\ndef close(sock):\n try:\n sock.close()\n except socket.error:\n pass\n\ntry:\n from os import closerange\nexcept ImportError:\n def closerange(fd_low, fd_high):\n # Iterate through and close all file descriptors.\n for fd in range(fd_low, fd_high):\n try:\n os.close(fd)\n except OSError: # ERROR, fd wasn't open to begin with (ignored)\n pass\n\n\ndef write_chunk(sock, data):\n if isinstance(data, text_type):\n data = data.encode('utf-8')\n chunk_size = \"%X\\r\\n\" % len(data)\n chunk = b\"\".join([chunk_size.encode('utf-8'), data, b\"\\r\\n\"])\n sock.sendall(chunk)\n\n\ndef write(sock, data, chunked=False):\n if chunked:\n return write_chunk(sock, data)\n sock.sendall(data)\n\n\ndef write_nonblock(sock, data, chunked=False):\n timeout = sock.gettimeout()\n if timeout != 0.0:\n try:\n sock.setblocking(0)\n return write(sock, data, chunked)\n finally:\n sock.setblocking(1)\n else:\n return write(sock, data, chunked)\n\n\ndef writelines(sock, lines, chunked=False):\n for line in list(lines):\n write(sock, line, chunked)\n\n\ndef write_error(sock, status_int, reason, mesg):\n html = textwrap.dedent(\"\"\"\\\n <html>\n <head>\n <title>%(reason)s</title>\n </head>\n <body>\n <h1><p>%(reason)s</p></h1>\n %(mesg)s\n </body>\n </html>\n \"\"\") % {\"reason\": reason, \"mesg\": cgi.escape(mesg)}\n\n http = textwrap.dedent(\"\"\"\\\n HTTP/1.1 %s %s\\r\n Connection: close\\r\n Content-Type: text/html\\r\n Content-Length: %d\\r\n \\r\n %s\"\"\") % (str(status_int), reason, len(html), html)\n write_nonblock(sock, http.encode('latin1'))\n\n\ndef normalize_name(name):\n return \"-\".join([w.lower().capitalize() for w in name.split(\"-\")])\n\n\ndef import_app(module):\n parts = module.split(\":\", 1)\n if len(parts) == 1:\n module, obj = module, \"application\"\n else:\n module, obj = parts[0], parts[1]\n\n try:\n __import__(module)\n except ImportError:\n if module.endswith(\".py\") and os.path.exists(module):\n raise ImportError(\"Failed to find application, did \"\n \"you mean '%s:%s'?\" % (module.rsplit(\".\", 1)[0], obj))\n else:\n raise\n\n mod = sys.modules[module]\n\n try:\n app = eval(obj, mod.__dict__)\n except NameError:\n raise AppImportError(\"Failed to find application: %r\" % module)\n\n if app is None:\n raise AppImportError(\"Failed to find application object: %r\" % obj)\n\n if not callable(app):\n raise AppImportError(\"Application object must be callable.\")\n return app\n\n\ndef getcwd():\n # get current path, try to use PWD env first\n try:\n a = os.stat(os.environ['PWD'])\n b = os.stat(os.getcwd())\n if a.st_ino == b.st_ino and a.st_dev == b.st_dev:\n cwd = os.environ['PWD']\n else:\n cwd = os.getcwd()\n except:\n cwd = os.getcwd()\n return cwd\n\n\ndef http_date(timestamp=None):\n \"\"\"Return the current date and time formatted for a message header.\"\"\"\n if timestamp is None:\n timestamp = time.time()\n s = email.utils.formatdate(timestamp, localtime=False, usegmt=True)\n return s\n\n\ndef is_hoppish(header):\n return header.lower().strip() in hop_headers\n\n\ndef daemonize(enable_stdio_inheritance=False):\n \"\"\"\\\n Standard daemonization of a process.\n http://www.svbug.com/documentation/comp.unix.programmer-FAQ/faq_2.html#SEC16\n \"\"\"\n if not 'GUNICORN_FD' in os.environ:\n if os.fork():\n os._exit(0)\n os.setsid()\n\n if os.fork():\n os._exit(0)\n\n os.umask(0)\n\n # In both the following any file descriptors above stdin\n # stdout and stderr are left untouched. The inheritence\n # option simply allows one to have output go to a file\n # specified by way of shell redirection when not wanting\n # to use --error-log option.\n\n if not enable_stdio_inheritance:\n # Remap all of stdin, stdout and stderr on to\n # /dev/null. The expectation is that users have\n # specified the --error-log option.\n\n closerange(0, 3)\n\n fd_null = os.open(REDIRECT_TO, os.O_RDWR)\n\n if fd_null != 0:\n os.dup2(fd_null, 0)\n\n os.dup2(fd_null, 1)\n os.dup2(fd_null, 2)\n\n else:\n fd_null = os.open(REDIRECT_TO, os.O_RDWR)\n\n # Always redirect stdin to /dev/null as we would\n # never expect to need to read interactive input.\n\n if fd_null != 0:\n os.close(0)\n os.dup2(fd_null, 0)\n\n # If stdout and stderr are still connected to\n # their original file descriptors we check to see\n # if they are associated with terminal devices.\n # When they are we map them to /dev/null so that\n # are still detached from any controlling terminal\n # properly. If not we preserve them as they are.\n #\n # If stdin and stdout were not hooked up to the\n # original file descriptors, then all bets are\n # off and all we can really do is leave them as\n # they were.\n #\n # This will allow 'gunicorn ... > output.log 2>&1'\n # to work with stdout/stderr going to the file\n # as expected.\n #\n # Note that if using --error-log option, the log\n # file specified through shell redirection will\n # only be used up until the log file specified\n # by the option takes over. As it replaces stdout\n # and stderr at the file descriptor level, then\n # anything using stdout or stderr, including having\n # cached a reference to them, will still work.\n\n def redirect(stream, fd_expect):\n try:\n fd = stream.fileno()\n if fd == fd_expect and stream.isatty():\n os.close(fd)\n os.dup2(fd_null, fd)\n except AttributeError:\n pass\n\n redirect(sys.stdout, 1)\n redirect(sys.stderr, 2)\n\n\ndef seed():\n try:\n random.seed(os.urandom(64))\n except NotImplementedError:\n random.seed('%s.%s' % (time.time(), os.getpid()))\n\n\ndef check_is_writeable(path):\n try:\n f = open(path, 'a')\n except IOError as e:\n raise RuntimeError(\"Error: '%s' isn't writable [%r]\" % (path, e))\n f.close()\n\n\ndef to_bytestring(value):\n \"\"\"Converts a string argument to a byte string\"\"\"\n if isinstance(value, bytes):\n return value\n assert isinstance(value, text_type)\n return value.encode(\"utf-8\")\n\n\ndef is_fileobject(obj):\n if not hasattr(obj, \"tell\") or not hasattr(obj, \"fileno\"):\n return False\n\n # check BytesIO case and maybe others\n try:\n obj.fileno()\n except (IOError, io.UnsupportedOperation):\n return False\n\n return True\n\n\ndef warn(msg):\n sys.stderr.write(\"!!!\\n\")\n\n lines = msg.splitlines()\n for i, line in enumerate(lines):\n if i == 0:\n line = \"WARNING: %s\" % line\n sys.stderr.write(\"!!! %s\\n\" % line)\n\n sys.stderr.write(\"!!!\\n\\n\")\n sys.stderr.flush()\n", "path": "gunicorn/util.py" } ]
diff --git a/gunicorn/util.py b/gunicorn/util.py index 9ef79a3a1..b7d3a67a9 100644 --- a/gunicorn/util.py +++ b/gunicorn/util.py @@ -515,7 +515,7 @@ def is_fileobject(obj): # check BytesIO case and maybe others try: obj.fileno() - except io.UnsupportedOperation: + except (IOError, io.UnsupportedOperation): return False return True
ivy-llc__ivy-15926
log
[ { "content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acosh(x, name=None):\n return ivy.acosh(x)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef asin(x, name=None):\n return ivy.asin(x)\n", "path": "ivy/functional/frontends/paddle/tensor/math.py" } ]
[ { "content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acosh(x, name=None):\n return ivy.acosh(x)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef asin(x, name=None):\n return ivy.asin(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log(x, name=None):\n return ivy.log(x)\n", "path": "ivy/functional/frontends/paddle/tensor/math.py" } ]
diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py index 66f96ec019fa8..791d4468a085f 100644 --- a/ivy/functional/frontends/paddle/tensor/math.py +++ b/ivy/functional/frontends/paddle/tensor/math.py @@ -46,3 +46,9 @@ def acosh(x, name=None): @to_ivy_arrays_and_back def asin(x, name=None): return ivy.asin(x) + + +@with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") +@to_ivy_arrays_and_back +def log(x, name=None): + return ivy.log(x) diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_math.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_math.py index 044f4d4ebea35..ff61aebfb7526 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_math.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_math.py @@ -190,3 +190,29 @@ def test_paddle_asin( on_device=on_device, x=x[0], ) + + +# log +@handle_frontend_test( + fn_tree="paddle.log", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), + ), +) +def test_paddle_log( + *, + dtype_and_x, + on_device, + fn_tree, + frontend, + test_flags, +): + input_dtype, x = dtype_and_x + helpers.test_frontend_function( + input_dtypes=input_dtype, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=x[0], + )
internetarchive__openlibrary-4591
Adding to lists broken Adding an item to a list no longer works as of 12-02-2021. ### Evidence / Screenshot (if possible) ### Relevant url? <!-- `https://openlibrary.org/...` --> ### Steps to Reproduce <!-- What steps caused you to find the bug? --> 1. Go to ...an edition, etc. 2. Do ...add item to list. <!-- What actually happened after these steps? What did you expect to happen? --> * Actual: List link loads list page. * Expected: Item should be added to list. ### Details - **Logged in (Y/N)?** Y - **Browser type/version?** Chrome Version 88.0.4324.150 (Official Build) (x86_64) - **Operating system?** Mac Big Sur - **Environment (prod/dev/local)?** prod <!-- If not sure, put prod --> ### Proposal & Constraints <!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? --> ### Related files <!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. --> ### Stakeholders <!-- @ tag stakeholders of this bug --> @cclauss
[ { "content": "\"\"\"Generic helper functions to use in the templates and the webapp.\n\"\"\"\nimport web\nfrom datetime import datetime\nimport re\n\nimport six\nfrom six.moves.urllib.parse import urlsplit\n\nif six.PY2: # See #4525 json.dump(indent) MUST be an int on PY2\n import simplejson as json\nelse:\n import json\n\nimport babel\nimport babel.core\nimport babel.dates\nimport babel.numbers\n\ntry:\n import genshi\n import genshi.filters\nexcept ImportError:\n genshi = None\n\ntry:\n from bs4 import BeautifulSoup\nexcept ImportError:\n BeautifulSoup = None\n\nfrom infogami import config\n\n# handy utility to parse ISO date strings\nfrom infogami.infobase.utils import parse_datetime\nfrom infogami.utils.view import safeint\n\n# TODO: i18n should be moved to core or infogami\nfrom openlibrary.i18n import gettext as _ # noqa: F401\n\n__all__ = [\n \"sanitize\",\n \"json_encode\",\n \"safesort\",\n \"days_since\", \"datestr\", \"format_date\",\n \"sprintf\", \"cond\", \"commify\", \"truncate\", \"datetimestr_utc\",\n \"urlsafe\", \"texsafe\",\n \"percentage\", \"affiliate_id\", \"bookreader_host\",\n \"private_collections\", \"private_collection_in\",\n\n # functions imported from elsewhere\n \"parse_datetime\", \"safeint\"\n]\n__docformat__ = \"restructuredtext en\"\n\ndef sanitize(html, encoding='utf8'):\n \"\"\"Removes unsafe tags and attributes from html and adds\n ``rel=\"nofollow\"`` attribute to all external links.\n Using encoding=None if passing unicode strings e.g. for Python 3.\n encoding=\"utf8\" matches default format for earlier versions of Genshi\n https://genshi.readthedocs.io/en/latest/upgrade/#upgrading-from-genshi-0-6-x-to-the-development-version\n \"\"\"\n\n # Can't sanitize unless genshi module is available\n if genshi is None:\n return html\n\n def get_nofollow(name, event):\n attrs = event[1][1]\n href = attrs.get('href', '')\n\n if href:\n # add rel=nofollow to all absolute links\n _, host, _, _, _ = urlsplit(href)\n if host:\n return 'nofollow'\n\n try:\n html = genshi.HTML(html, encoding=encoding)\n\n # except (genshi.ParseError, UnicodeDecodeError, UnicodeError) as e:\n # don't catch Unicode errors so we can tell if we're getting bytes\n except genshi.ParseError:\n if BeautifulSoup:\n # Bad html. Tidy it up using BeautifulSoup\n html = str(BeautifulSoup(html, \"lxml\"))\n try:\n html = genshi.HTML(html)\n except Exception:\n # Failed to sanitize.\n # We can't do any better than returning the original HTML, without sanitizing.\n return html\n else:\n raise\n\n stream = html \\\n | genshi.filters.HTMLSanitizer() \\\n | genshi.filters.Transformer(\"//a\").attr(\"rel\", get_nofollow)\n return stream.render()\n\n\ndef json_encode(d, **kw):\n \"\"\"Same as json.dumps.\n \"\"\"\n return json.dumps(d or {}, **kw)\n\n\ndef safesort(iterable, key=None, reverse=False):\n \"\"\"Sorts heterogeneous of objects without raising errors.\n\n Sorting heterogeneous objects sometimes causes error. For example,\n datetime and Nones don't go well together. This function takes special\n care to make that work.\n \"\"\"\n key = key or (lambda x: x)\n def safekey(x):\n k = key(x)\n return (k.__class__.__name__, k)\n return sorted(iterable, key=safekey, reverse=reverse)\n\n\ndef days_since(then, now=None):\n delta = then - (now or datetime.now())\n return abs(delta.days)\n\n\ndef datestr(then, now=None, lang=None, relative=True):\n \"\"\"Internationalized version of web.datestr.\"\"\"\n lang = lang or web.ctx.get('lang') or \"en\"\n if relative:\n if now is None:\n now = datetime.now()\n delta = then - now\n if abs(delta.days) < 4: # Threshold from web.py\n return babel.dates.format_timedelta(delta,\n add_direction=True,\n locale=_get_babel_locale(lang))\n return format_date(then, lang=lang)\n\n\ndef datetimestr_utc(then):\n return then.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\ndef format_date(date, lang=None):\n lang = lang or web.ctx.get('lang') or \"en\"\n locale = _get_babel_locale(lang)\n return babel.dates.format_date(date, format=\"long\", locale=locale)\n\ndef _get_babel_locale(lang):\n try:\n return babel.Locale(lang)\n except babel.core.UnknownLocaleError:\n return babel.Locale(\"en\")\n\n\ndef sprintf(s, *a, **kw):\n \"\"\"Handy utility for string replacements.\n\n >>> sprintf('hello %s', 'python')\n 'hello python'\n >>> sprintf('hello %(name)s', name='python')\n 'hello python'\n \"\"\"\n args = kw or a\n if args:\n return s % args\n else:\n return s\n\n\ndef cond(pred, true_value, false_value=\"\"):\n \"\"\"Lisp style cond function.\n\n Hanly to use instead of if-else expression.\n \"\"\"\n if pred:\n return true_value\n else:\n return false_value\n\n\ndef commify(number, lang=None):\n \"\"\"localized version of web.commify\"\"\"\n try:\n lang = lang or web.ctx.get(\"lang\") or \"en\"\n return babel.numbers.format_number(int(number), lang)\n except:\n return six.text_type(number)\n\n\ndef truncate(text, limit):\n \"\"\"Truncate text and add ellipses if it longer than specified limit.\"\"\"\n if not text:\n return ''\n if len(text) <= limit:\n return text\n return text[:limit] + \"...\"\n\n\ndef urlsafe(path):\n \"\"\"Replaces the unsafe chars from path with underscores.\n \"\"\"\n return _get_safepath_re().sub('_', path).strip('_')[:100]\n\[email protected]\ndef _get_safepath_re():\n \"\"\"Make regular expression that matches all unsafe chars.\"\"\"\n # unsafe chars according to RFC 2396\n reserved = \";/?:@&=+$,\"\n delims = '<>#%\"'\n unwise = \"{}|\\\\^[]`\"\n space = ' \\n\\r'\n\n unsafe = reserved + delims + unwise + space\n pattern = '[%s]+' % \"\".join(re.escape(c) for c in unsafe)\n return re.compile(pattern)\n\n\ndef get_coverstore_url():\n \"\"\"Returns the base url of coverstore by looking at the config.\"\"\"\n return config.get('coverstore_url', 'https://covers.openlibrary.org').rstrip('/')\n\n\n_texsafe_map = {\n '\"': r'\\textquotedbl{}',\n '#': r'\\#',\n '$': r'\\$',\n '%': r'\\%',\n '&': r'\\&',\n '<': r'\\textless{}',\n '>': r'\\textgreater{}',\n '\\\\': r'\\textbackslash{}',\n '^': r'\\^{}',\n '_': r'\\_{}',\n '{': r'\\{',\n '}': r'\\}',\n '|': r'\\textbar{}',\n '~': r'\\~{}',\n}\n\n_texsafe_re = None\n\ndef texsafe(text):\n \"\"\"Escapes the special characters in the given text for using it in tex type setting.\n\n Tex (or Latex) uses some characters in the ascii character range for\n special notations. These characters must be escaped when occur in the\n regular text. This function escapes those special characters.\n\n The list of special characters and the latex command to typeset them can\n be found in `The Comprehensive LaTeX Symbol List`_.\n\n .. _The Comprehensive LaTeX Symbol List: http://www.ctan.org/tex-archive/info/symbols/comprehensive/symbols-a4.pdf\n \"\"\"\n global _texsafe_re\n if _texsafe_re is None:\n pattern = \"[%s]\" % re.escape(\"\".join(list(_texsafe_map)))\n _texsafe_re = re.compile(pattern)\n\n return _texsafe_re.sub(lambda m: _texsafe_map[m.group(0)], text)\n\ndef percentage(value, total):\n \"\"\"Computes percentage.\n\n >>> percentage(1, 10)\n 10.0\n >>> percentage(0, 0)\n 0.0\n \"\"\"\n return (value * 100.0) / total if total else 0.0\n\ndef uniq(values, key=None):\n \"\"\"Returns the unique entries from the given values in the original order.\n\n The value of the optional `key` parameter should be a function that takes\n a single argument and returns a key to test the uniqueness.\n \"\"\"\n key = key or (lambda x: x)\n s = set()\n result = []\n for v in values:\n k = key(v)\n if k not in s:\n s.add(k)\n result.append(v)\n return result\n\ndef affiliate_id(affiliate):\n return config.get('affiliate_ids', {}).get(affiliate, '')\n\ndef bookreader_host():\n return config.get('bookreader_host', '')\n\ndef private_collections():\n \"\"\"Collections which are lendable but should not be linked from OL\n TODO: Remove when we can handle institutional books\"\"\"\n return ['georgetown-university-law-library-rr']\n\ndef private_collection_in(collections):\n return any(x in private_collections() for x in collections)\n\ndef _get_helpers():\n _globals = globals()\n return web.storage((k, _globals[k]) for k in __all__)\n\n\n## This must be at the end of this module\nhelpers = _get_helpers()\n", "path": "openlibrary/core/helpers.py" } ]
[ { "content": "\"\"\"Generic helper functions to use in the templates and the webapp.\n\"\"\"\nimport web\nfrom datetime import datetime\nimport re\n\nimport six\nfrom six.moves.urllib.parse import urlsplit\n\nif six.PY2: # See #4525 json.dump(indent) MUST be an int on PY2\n import simplejson as json\nelse:\n import json\n\nimport babel\nimport babel.core\nimport babel.dates\nimport babel.numbers\n\ntry:\n import genshi\n import genshi.filters\nexcept ImportError:\n genshi = None\n\ntry:\n from bs4 import BeautifulSoup\nexcept ImportError:\n BeautifulSoup = None\n\nfrom infogami import config\n\n# handy utility to parse ISO date strings\nfrom infogami.infobase.utils import parse_datetime\nfrom infogami.utils.view import safeint\n\n# TODO: i18n should be moved to core or infogami\nfrom openlibrary.i18n import gettext as _ # noqa: F401\n\n__all__ = [\n \"sanitize\",\n \"json_encode\",\n \"safesort\",\n \"days_since\", \"datestr\", \"format_date\",\n \"sprintf\", \"cond\", \"commify\", \"truncate\", \"datetimestr_utc\",\n \"urlsafe\", \"texsafe\",\n \"percentage\", \"affiliate_id\", \"bookreader_host\",\n \"private_collections\", \"private_collection_in\",\n\n # functions imported from elsewhere\n \"parse_datetime\", \"safeint\"\n]\n__docformat__ = \"restructuredtext en\"\n\ndef sanitize(html, encoding='utf8'):\n \"\"\"Removes unsafe tags and attributes from html and adds\n ``rel=\"nofollow\"`` attribute to all external links.\n Using encoding=None if passing unicode strings e.g. for Python 3.\n encoding=\"utf8\" matches default format for earlier versions of Genshi\n https://genshi.readthedocs.io/en/latest/upgrade/#upgrading-from-genshi-0-6-x-to-the-development-version\n \"\"\"\n\n # Can't sanitize unless genshi module is available\n if genshi is None:\n return html\n\n def get_nofollow(name, event):\n attrs = event[1][1]\n href = attrs.get('href', '')\n\n if href:\n # add rel=nofollow to all absolute links\n _, host, _, _, _ = urlsplit(href)\n if host:\n return 'nofollow'\n\n try:\n html = genshi.HTML(html, encoding=encoding)\n\n # except (genshi.ParseError, UnicodeDecodeError, UnicodeError) as e:\n # don't catch Unicode errors so we can tell if we're getting bytes\n except genshi.ParseError:\n if BeautifulSoup:\n # Bad html. Tidy it up using BeautifulSoup\n html = str(BeautifulSoup(html, \"lxml\"))\n try:\n html = genshi.HTML(html)\n except Exception:\n # Failed to sanitize.\n # We can't do any better than returning the original HTML, without sanitizing.\n return html\n else:\n raise\n\n stream = html \\\n | genshi.filters.HTMLSanitizer() \\\n | genshi.filters.Transformer(\"//a\").attr(\"rel\", get_nofollow)\n return stream.render()\n\n\ndef json_encode(d, **kw):\n \"\"\"Same as json.dumps.\n \"\"\"\n return json.dumps(d, **kw)\n\n\ndef safesort(iterable, key=None, reverse=False):\n \"\"\"Sorts heterogeneous of objects without raising errors.\n\n Sorting heterogeneous objects sometimes causes error. For example,\n datetime and Nones don't go well together. This function takes special\n care to make that work.\n \"\"\"\n key = key or (lambda x: x)\n def safekey(x):\n k = key(x)\n return (k.__class__.__name__, k)\n return sorted(iterable, key=safekey, reverse=reverse)\n\n\ndef days_since(then, now=None):\n delta = then - (now or datetime.now())\n return abs(delta.days)\n\n\ndef datestr(then, now=None, lang=None, relative=True):\n \"\"\"Internationalized version of web.datestr.\"\"\"\n lang = lang or web.ctx.get('lang') or \"en\"\n if relative:\n if now is None:\n now = datetime.now()\n delta = then - now\n if abs(delta.days) < 4: # Threshold from web.py\n return babel.dates.format_timedelta(delta,\n add_direction=True,\n locale=_get_babel_locale(lang))\n return format_date(then, lang=lang)\n\n\ndef datetimestr_utc(then):\n return then.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\ndef format_date(date, lang=None):\n lang = lang or web.ctx.get('lang') or \"en\"\n locale = _get_babel_locale(lang)\n return babel.dates.format_date(date, format=\"long\", locale=locale)\n\ndef _get_babel_locale(lang):\n try:\n return babel.Locale(lang)\n except babel.core.UnknownLocaleError:\n return babel.Locale(\"en\")\n\n\ndef sprintf(s, *a, **kw):\n \"\"\"Handy utility for string replacements.\n\n >>> sprintf('hello %s', 'python')\n 'hello python'\n >>> sprintf('hello %(name)s', name='python')\n 'hello python'\n \"\"\"\n args = kw or a\n if args:\n return s % args\n else:\n return s\n\n\ndef cond(pred, true_value, false_value=\"\"):\n \"\"\"Lisp style cond function.\n\n Hanly to use instead of if-else expression.\n \"\"\"\n if pred:\n return true_value\n else:\n return false_value\n\n\ndef commify(number, lang=None):\n \"\"\"localized version of web.commify\"\"\"\n try:\n lang = lang or web.ctx.get(\"lang\") or \"en\"\n return babel.numbers.format_number(int(number), lang)\n except:\n return six.text_type(number)\n\n\ndef truncate(text, limit):\n \"\"\"Truncate text and add ellipses if it longer than specified limit.\"\"\"\n if not text:\n return ''\n if len(text) <= limit:\n return text\n return text[:limit] + \"...\"\n\n\ndef urlsafe(path):\n \"\"\"Replaces the unsafe chars from path with underscores.\n \"\"\"\n return _get_safepath_re().sub('_', path).strip('_')[:100]\n\[email protected]\ndef _get_safepath_re():\n \"\"\"Make regular expression that matches all unsafe chars.\"\"\"\n # unsafe chars according to RFC 2396\n reserved = \";/?:@&=+$,\"\n delims = '<>#%\"'\n unwise = \"{}|\\\\^[]`\"\n space = ' \\n\\r'\n\n unsafe = reserved + delims + unwise + space\n pattern = '[%s]+' % \"\".join(re.escape(c) for c in unsafe)\n return re.compile(pattern)\n\n\ndef get_coverstore_url():\n \"\"\"Returns the base url of coverstore by looking at the config.\"\"\"\n return config.get('coverstore_url', 'https://covers.openlibrary.org').rstrip('/')\n\n\n_texsafe_map = {\n '\"': r'\\textquotedbl{}',\n '#': r'\\#',\n '$': r'\\$',\n '%': r'\\%',\n '&': r'\\&',\n '<': r'\\textless{}',\n '>': r'\\textgreater{}',\n '\\\\': r'\\textbackslash{}',\n '^': r'\\^{}',\n '_': r'\\_{}',\n '{': r'\\{',\n '}': r'\\}',\n '|': r'\\textbar{}',\n '~': r'\\~{}',\n}\n\n_texsafe_re = None\n\ndef texsafe(text):\n \"\"\"Escapes the special characters in the given text for using it in tex type setting.\n\n Tex (or Latex) uses some characters in the ascii character range for\n special notations. These characters must be escaped when occur in the\n regular text. This function escapes those special characters.\n\n The list of special characters and the latex command to typeset them can\n be found in `The Comprehensive LaTeX Symbol List`_.\n\n .. _The Comprehensive LaTeX Symbol List: http://www.ctan.org/tex-archive/info/symbols/comprehensive/symbols-a4.pdf\n \"\"\"\n global _texsafe_re\n if _texsafe_re is None:\n pattern = \"[%s]\" % re.escape(\"\".join(list(_texsafe_map)))\n _texsafe_re = re.compile(pattern)\n\n return _texsafe_re.sub(lambda m: _texsafe_map[m.group(0)], text)\n\ndef percentage(value, total):\n \"\"\"Computes percentage.\n\n >>> percentage(1, 10)\n 10.0\n >>> percentage(0, 0)\n 0.0\n \"\"\"\n return (value * 100.0) / total if total else 0.0\n\ndef uniq(values, key=None):\n \"\"\"Returns the unique entries from the given values in the original order.\n\n The value of the optional `key` parameter should be a function that takes\n a single argument and returns a key to test the uniqueness.\n \"\"\"\n key = key or (lambda x: x)\n s = set()\n result = []\n for v in values:\n k = key(v)\n if k not in s:\n s.add(k)\n result.append(v)\n return result\n\ndef affiliate_id(affiliate):\n return config.get('affiliate_ids', {}).get(affiliate, '')\n\ndef bookreader_host():\n return config.get('bookreader_host', '')\n\ndef private_collections():\n \"\"\"Collections which are lendable but should not be linked from OL\n TODO: Remove when we can handle institutional books\"\"\"\n return ['georgetown-university-law-library-rr']\n\ndef private_collection_in(collections):\n return any(x in private_collections() for x in collections)\n\ndef _get_helpers():\n _globals = globals()\n return web.storage((k, _globals[k]) for k in __all__)\n\n\n## This must be at the end of this module\nhelpers = _get_helpers()\n", "path": "openlibrary/core/helpers.py" } ]
diff --git a/openlibrary/core/helpers.py b/openlibrary/core/helpers.py index b4ec1476453..de212412998 100644 --- a/openlibrary/core/helpers.py +++ b/openlibrary/core/helpers.py @@ -101,7 +101,7 @@ def get_nofollow(name, event): def json_encode(d, **kw): """Same as json.dumps. """ - return json.dumps(d or {}, **kw) + return json.dumps(d, **kw) def safesort(iterable, key=None, reverse=False):
numpy__numpy-4666
`column_stack()`: error in documentation in `numy/lib/shape_base.py`, around line 277: ``` This function is equivalent to ``np.vstack(tup).T``. ``` If I'm not mistaken, this was true for the old behaviour. Currently, inputs with >= 2 dimensions are not transposed (which is good!) and therefore it is not equivalent anymore. Here is an example, the commented line gives an error: ``` python a = array([[1, 2], [3, 4], [5, 6]]) b = array([11, 12, 13]) column_stack((a, b)) #vstack((a, b)).T ```
[ { "content": "from __future__ import division, absolute_import, print_function\n\n__all__ = ['column_stack', 'row_stack', 'dstack', 'array_split', 'split', 'hsplit',\n 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',\n 'apply_along_axis', 'kron', 'tile', 'get_array_wrap']\n\nimport warnings\n\nimport numpy.core.numeric as _nx\nfrom numpy.core.numeric import asarray, zeros, newaxis, outer, \\\n concatenate, isscalar, array, asanyarray\nfrom numpy.core.fromnumeric import product, reshape\nfrom numpy.core import hstack, vstack, atleast_3d\n\ndef apply_along_axis(func1d,axis,arr,*args):\n \"\"\"\n Apply a function to 1-D slices along the given axis.\n\n Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`\n is a 1-D slice of `arr` along `axis`.\n\n Parameters\n ----------\n func1d : function\n This function should accept 1-D arrays. It is applied to 1-D\n slices of `arr` along the specified axis.\n axis : integer\n Axis along which `arr` is sliced.\n arr : ndarray\n Input array.\n args : any\n Additional arguments to `func1d`.\n\n Returns\n -------\n apply_along_axis : ndarray\n The output array. The shape of `outarr` is identical to the shape of\n `arr`, except along the `axis` dimension, where the length of `outarr`\n is equal to the size of the return value of `func1d`. If `func1d`\n returns a scalar `outarr` will have one fewer dimensions than `arr`.\n\n See Also\n --------\n apply_over_axes : Apply a function repeatedly over multiple axes.\n\n Examples\n --------\n >>> def my_func(a):\n ... \\\"\\\"\\\"Average first and last element of a 1-D array\\\"\\\"\\\"\n ... return (a[0] + a[-1]) * 0.5\n >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])\n >>> np.apply_along_axis(my_func, 0, b)\n array([ 4., 5., 6.])\n >>> np.apply_along_axis(my_func, 1, b)\n array([ 2., 5., 8.])\n\n For a function that doesn't return a scalar, the number of dimensions in\n `outarr` is the same as `arr`.\n\n >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])\n >>> np.apply_along_axis(sorted, 1, b)\n array([[1, 7, 8],\n [3, 4, 9],\n [2, 5, 6]])\n\n \"\"\"\n arr = asarray(arr)\n nd = arr.ndim\n if axis < 0:\n axis += nd\n if (axis >= nd):\n raise ValueError(\"axis must be less than arr.ndim; axis=%d, rank=%d.\"\n % (axis, nd))\n ind = [0]*(nd-1)\n i = zeros(nd, 'O')\n indlist = list(range(nd))\n indlist.remove(axis)\n i[axis] = slice(None, None)\n outshape = asarray(arr.shape).take(indlist)\n i.put(indlist, ind)\n res = func1d(arr[tuple(i.tolist())],*args)\n # if res is a number, then we have a smaller output array\n if isscalar(res):\n outarr = zeros(outshape, asarray(res).dtype)\n outarr[tuple(ind)] = res\n Ntot = product(outshape)\n k = 1\n while k < Ntot:\n # increment the index\n ind[-1] += 1\n n = -1\n while (ind[n] >= outshape[n]) and (n > (1-nd)):\n ind[n-1] += 1\n ind[n] = 0\n n -= 1\n i.put(indlist, ind)\n res = func1d(arr[tuple(i.tolist())],*args)\n outarr[tuple(ind)] = res\n k += 1\n return outarr\n else:\n Ntot = product(outshape)\n holdshape = outshape\n outshape = list(arr.shape)\n outshape[axis] = len(res)\n outarr = zeros(outshape, asarray(res).dtype)\n outarr[tuple(i.tolist())] = res\n k = 1\n while k < Ntot:\n # increment the index\n ind[-1] += 1\n n = -1\n while (ind[n] >= holdshape[n]) and (n > (1-nd)):\n ind[n-1] += 1\n ind[n] = 0\n n -= 1\n i.put(indlist, ind)\n res = func1d(arr[tuple(i.tolist())],*args)\n outarr[tuple(i.tolist())] = res\n k += 1\n return outarr\n\n\ndef apply_over_axes(func, a, axes):\n \"\"\"\n Apply a function repeatedly over multiple axes.\n\n `func` is called as `res = func(a, axis)`, where `axis` is the first\n element of `axes`. The result `res` of the function call must have\n either the same dimensions as `a` or one less dimension. If `res`\n has one less dimension than `a`, a dimension is inserted before\n `axis`. The call to `func` is then repeated for each axis in `axes`,\n with `res` as the first argument.\n\n Parameters\n ----------\n func : function\n This function must take two arguments, `func(a, axis)`.\n a : array_like\n Input array.\n axes : array_like\n Axes over which `func` is applied; the elements must be integers.\n\n Returns\n -------\n apply_over_axis : ndarray\n The output array. The number of dimensions is the same as `a`,\n but the shape can be different. This depends on whether `func`\n changes the shape of its output with respect to its input.\n\n See Also\n --------\n apply_along_axis :\n Apply a function to 1-D slices of an array along the given axis.\n\n Notes\n ------\n This function is equivalent to tuple axis arguments to reorderable ufuncs\n with keepdims=True. Tuple axis arguments to ufuncs have been availabe since\n version 1.7.0.\n\n Examples\n --------\n >>> a = np.arange(24).reshape(2,3,4)\n >>> a\n array([[[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]],\n [[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23]]])\n\n Sum over axes 0 and 2. The result has same number of dimensions\n as the original array:\n\n >>> np.apply_over_axes(np.sum, a, [0,2])\n array([[[ 60],\n [ 92],\n [124]]])\n\n Tuple axis arguments to ufuncs are equivalent:\n\n >>> np.sum(a, axis=(0,2), keepdims=True)\n array([[[ 60],\n [ 92],\n [124]]])\n\n \"\"\"\n val = asarray(a)\n N = a.ndim\n if array(axes).ndim == 0:\n axes = (axes,)\n for axis in axes:\n if axis < 0: axis = N + axis\n args = (val, axis)\n res = func(*args)\n if res.ndim == val.ndim:\n val = res\n else:\n res = expand_dims(res, axis)\n if res.ndim == val.ndim:\n val = res\n else:\n raise ValueError(\"function is not returning \"\n \"an array of the correct shape\")\n return val\n\ndef expand_dims(a, axis):\n \"\"\"\n Expand the shape of an array.\n\n Insert a new axis, corresponding to a given position in the array shape.\n\n Parameters\n ----------\n a : array_like\n Input array.\n axis : int\n Position (amongst axes) where new axis is to be inserted.\n\n Returns\n -------\n res : ndarray\n Output array. The number of dimensions is one greater than that of\n the input array.\n\n See Also\n --------\n doc.indexing, atleast_1d, atleast_2d, atleast_3d\n\n Examples\n --------\n >>> x = np.array([1,2])\n >>> x.shape\n (2,)\n\n The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``:\n\n >>> y = np.expand_dims(x, axis=0)\n >>> y\n array([[1, 2]])\n >>> y.shape\n (1, 2)\n\n >>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,newaxis]\n >>> y\n array([[1],\n [2]])\n >>> y.shape\n (2, 1)\n\n Note that some examples may use ``None`` instead of ``np.newaxis``. These\n are the same objects:\n\n >>> np.newaxis is None\n True\n\n \"\"\"\n a = asarray(a)\n shape = a.shape\n if axis < 0:\n axis = axis + len(shape) + 1\n return a.reshape(shape[:axis] + (1,) + shape[axis:])\n\nrow_stack = vstack\n\ndef column_stack(tup):\n \"\"\"\n Stack 1-D arrays as columns into a 2-D array.\n\n Take a sequence of 1-D arrays and stack them as columns\n to make a single 2-D array. 2-D arrays are stacked as-is,\n just like with `hstack`. 1-D arrays are turned into 2-D columns\n first.\n\n Parameters\n ----------\n tup : sequence of 1-D or 2-D arrays.\n Arrays to stack. All of them must have the same first dimension.\n\n Returns\n -------\n stacked : 2-D array\n The array formed by stacking the given arrays.\n\n See Also\n --------\n hstack, vstack, concatenate\n\n Notes\n -----\n This function is equivalent to ``np.vstack(tup).T``.\n\n Examples\n --------\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.column_stack((a,b))\n array([[1, 2],\n [2, 3],\n [3, 4]])\n\n \"\"\"\n arrays = []\n for v in tup:\n arr = array(v, copy=False, subok=True)\n if arr.ndim < 2:\n arr = array(arr, copy=False, subok=True, ndmin=2).T\n arrays.append(arr)\n return _nx.concatenate(arrays, 1)\n\ndef dstack(tup):\n \"\"\"\n Stack arrays in sequence depth wise (along third axis).\n\n Takes a sequence of arrays and stack them along the third axis\n to make a single array. Rebuilds arrays divided by `dsplit`.\n This is a simple way to stack 2D arrays (images) into a single\n 3D array for processing.\n\n Parameters\n ----------\n tup : sequence of arrays\n Arrays to stack. All of them must have the same shape along all\n but the third axis.\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays.\n\n See Also\n --------\n vstack : Stack along first axis.\n hstack : Stack along second axis.\n concatenate : Join arrays.\n dsplit : Split array along third axis.\n\n Notes\n -----\n Equivalent to ``np.concatenate(tup, axis=2)``.\n\n Examples\n --------\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.dstack((a,b))\n array([[[1, 2],\n [2, 3],\n [3, 4]]])\n\n >>> a = np.array([[1],[2],[3]])\n >>> b = np.array([[2],[3],[4]])\n >>> np.dstack((a,b))\n array([[[1, 2]],\n [[2, 3]],\n [[3, 4]]])\n\n \"\"\"\n return _nx.concatenate([atleast_3d(_m) for _m in tup], 2)\n\ndef _replace_zero_by_x_arrays(sub_arys):\n for i in range(len(sub_arys)):\n if len(_nx.shape(sub_arys[i])) == 0:\n sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)\n elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)):\n sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)\n return sub_arys\n\ndef array_split(ary,indices_or_sections,axis = 0):\n \"\"\"\n Split an array into multiple sub-arrays.\n\n Please refer to the ``split`` documentation. The only difference\n between these functions is that ``array_split`` allows\n `indices_or_sections` to be an integer that does *not* equally\n divide the axis.\n\n See Also\n --------\n split : Split array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(8.0)\n >>> np.array_split(x, 3)\n [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])]\n\n \"\"\"\n try:\n Ntotal = ary.shape[axis]\n except AttributeError:\n Ntotal = len(ary)\n try: # handle scalar case.\n Nsections = len(indices_or_sections) + 1\n div_points = [0] + list(indices_or_sections) + [Ntotal]\n except TypeError: #indices_or_sections is a scalar, not an array.\n Nsections = int(indices_or_sections)\n if Nsections <= 0:\n raise ValueError('number sections must be larger than 0.')\n Neach_section, extras = divmod(Ntotal, Nsections)\n section_sizes = [0] + \\\n extras * [Neach_section+1] + \\\n (Nsections-extras) * [Neach_section]\n div_points = _nx.array(section_sizes).cumsum()\n\n sub_arys = []\n sary = _nx.swapaxes(ary, axis, 0)\n for i in range(Nsections):\n st = div_points[i]; end = div_points[i+1]\n sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))\n\n # This \"kludge\" was introduced here to replace arrays shaped (0, 10)\n # or similar with an array shaped (0,).\n # There seems no need for this, so give a FutureWarning to remove later.\n if sub_arys[-1].size == 0 and sub_arys[-1].ndim != 1:\n warnings.warn(\"in the future np.array_split will retain the shape of \"\n \"arrays with a zero size, instead of replacing them by \"\n \"`array([])`, which always has a shape of (0,).\",\n FutureWarning)\n sub_arys = _replace_zero_by_x_arrays(sub_arys)\n\n return sub_arys\n\ndef split(ary,indices_or_sections,axis=0):\n \"\"\"\n Split an array into multiple sub-arrays.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1-D array\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an error is raised.\n\n If `indices_or_sections` is a 1-D array of sorted integers, the entries\n indicate where along `axis` the array is split. For example,\n ``[2, 3]`` would, for ``axis=0``, result in\n\n - ary[:2]\n - ary[2:3]\n - ary[3:]\n\n If an index exceeds the dimension of the array along `axis`,\n an empty sub-array is returned correspondingly.\n axis : int, optional\n The axis along which to split, default is 0.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n Raises\n ------\n ValueError\n If `indices_or_sections` is given as an integer, but\n a split does not result in equal division.\n\n See Also\n --------\n array_split : Split an array into multiple sub-arrays of equal or\n near-equal size. Does not raise an exception if\n an equal division cannot be made.\n hsplit : Split array into multiple sub-arrays horizontally (column-wise).\n vsplit : Split array into multiple sub-arrays vertically (row wise).\n dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).\n concatenate : Join arrays together.\n hstack : Stack arrays in sequence horizontally (column wise).\n vstack : Stack arrays in sequence vertically (row wise).\n dstack : Stack arrays in sequence depth wise (along third dimension).\n\n Examples\n --------\n >>> x = np.arange(9.0)\n >>> np.split(x, 3)\n [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])]\n\n >>> x = np.arange(8.0)\n >>> np.split(x, [3, 5, 6, 10])\n [array([ 0., 1., 2.]),\n array([ 3., 4.]),\n array([ 5.]),\n array([ 6., 7.]),\n array([], dtype=float64)]\n\n \"\"\"\n try: len(indices_or_sections)\n except TypeError:\n sections = indices_or_sections\n N = ary.shape[axis]\n if N % sections:\n raise ValueError('array split does not result in an equal division')\n res = array_split(ary, indices_or_sections, axis)\n return res\n\ndef hsplit(ary, indices_or_sections):\n \"\"\"\n Split an array into multiple sub-arrays horizontally (column-wise).\n\n Please refer to the `split` documentation. `hsplit` is equivalent\n to `split` with ``axis=1``, the array is always split along the second\n axis regardless of the array dimension.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [ 12., 13., 14., 15.]])\n >>> np.hsplit(x, 2)\n [array([[ 0., 1.],\n [ 4., 5.],\n [ 8., 9.],\n [ 12., 13.]]),\n array([[ 2., 3.],\n [ 6., 7.],\n [ 10., 11.],\n [ 14., 15.]])]\n >>> np.hsplit(x, np.array([3, 6]))\n [array([[ 0., 1., 2.],\n [ 4., 5., 6.],\n [ 8., 9., 10.],\n [ 12., 13., 14.]]),\n array([[ 3.],\n [ 7.],\n [ 11.],\n [ 15.]]),\n array([], dtype=float64)]\n\n With a higher dimensional array the split is still along the second axis.\n\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[ 0., 1.],\n [ 2., 3.]],\n [[ 4., 5.],\n [ 6., 7.]]])\n >>> np.hsplit(x, 2)\n [array([[[ 0., 1.]],\n [[ 4., 5.]]]),\n array([[[ 2., 3.]],\n [[ 6., 7.]]])]\n\n \"\"\"\n if len(_nx.shape(ary)) == 0:\n raise ValueError('hsplit only works on arrays of 1 or more dimensions')\n if len(ary.shape) > 1:\n return split(ary, indices_or_sections, 1)\n else:\n return split(ary, indices_or_sections, 0)\n\ndef vsplit(ary, indices_or_sections):\n \"\"\"\n Split an array into multiple sub-arrays vertically (row-wise).\n\n Please refer to the ``split`` documentation. ``vsplit`` is equivalent\n to ``split`` with `axis=0` (default), the array is always split along the\n first axis regardless of the array dimension.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [ 12., 13., 14., 15.]])\n >>> np.vsplit(x, 2)\n [array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.]]),\n array([[ 8., 9., 10., 11.],\n [ 12., 13., 14., 15.]])]\n >>> np.vsplit(x, np.array([3, 6]))\n [array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.]]),\n array([[ 12., 13., 14., 15.]]),\n array([], dtype=float64)]\n\n With a higher dimensional array the split is still along the first axis.\n\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[ 0., 1.],\n [ 2., 3.]],\n [[ 4., 5.],\n [ 6., 7.]]])\n >>> np.vsplit(x, 2)\n [array([[[ 0., 1.],\n [ 2., 3.]]]),\n array([[[ 4., 5.],\n [ 6., 7.]]])]\n\n \"\"\"\n if len(_nx.shape(ary)) < 2:\n raise ValueError('vsplit only works on arrays of 2 or more dimensions')\n return split(ary, indices_or_sections, 0)\n\ndef dsplit(ary, indices_or_sections):\n \"\"\"\n Split array into multiple sub-arrays along the 3rd axis (depth).\n\n Please refer to the `split` documentation. `dsplit` is equivalent\n to `split` with ``axis=2``, the array is always split along the third\n axis provided the array dimension is greater than or equal to 3.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(2, 2, 4)\n >>> x\n array([[[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.]],\n [[ 8., 9., 10., 11.],\n [ 12., 13., 14., 15.]]])\n >>> np.dsplit(x, 2)\n [array([[[ 0., 1.],\n [ 4., 5.]],\n [[ 8., 9.],\n [ 12., 13.]]]),\n array([[[ 2., 3.],\n [ 6., 7.]],\n [[ 10., 11.],\n [ 14., 15.]]])]\n >>> np.dsplit(x, np.array([3, 6]))\n [array([[[ 0., 1., 2.],\n [ 4., 5., 6.]],\n [[ 8., 9., 10.],\n [ 12., 13., 14.]]]),\n array([[[ 3.],\n [ 7.]],\n [[ 11.],\n [ 15.]]]),\n array([], dtype=float64)]\n\n \"\"\"\n if len(_nx.shape(ary)) < 3:\n raise ValueError('dsplit only works on arrays of 3 or more dimensions')\n return split(ary, indices_or_sections, 2)\n\ndef get_array_prepare(*args):\n \"\"\"Find the wrapper for the array with the highest priority.\n\n In case of ties, leftmost wins. If no wrapper is found, return None\n \"\"\"\n wrappers = sorted((getattr(x, '__array_priority__', 0), -i,\n x.__array_prepare__) for i, x in enumerate(args)\n if hasattr(x, '__array_prepare__'))\n if wrappers:\n return wrappers[-1][-1]\n return None\n\ndef get_array_wrap(*args):\n \"\"\"Find the wrapper for the array with the highest priority.\n\n In case of ties, leftmost wins. If no wrapper is found, return None\n \"\"\"\n wrappers = sorted((getattr(x, '__array_priority__', 0), -i,\n x.__array_wrap__) for i, x in enumerate(args)\n if hasattr(x, '__array_wrap__'))\n if wrappers:\n return wrappers[-1][-1]\n return None\n\ndef kron(a, b):\n \"\"\"\n Kronecker product of two arrays.\n\n Computes the Kronecker product, a composite array made of blocks of the\n second array scaled by the first.\n\n Parameters\n ----------\n a, b : array_like\n\n Returns\n -------\n out : ndarray\n\n See Also\n --------\n outer : The outer product\n\n Notes\n -----\n The function assumes that the number of dimenensions of `a` and `b`\n are the same, if necessary prepending the smallest with ones.\n If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,\n the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.\n The elements are products of elements from `a` and `b`, organized\n explicitly by::\n\n kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]\n\n where::\n\n kt = it * st + jt, t = 0,...,N\n\n In the common 2-D case (N=1), the block structure can be visualized::\n\n [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],\n [ ... ... ],\n [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]\n\n\n Examples\n --------\n >>> np.kron([1,10,100], [5,6,7])\n array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])\n >>> np.kron([5,6,7], [1,10,100])\n array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])\n\n >>> np.kron(np.eye(2), np.ones((2,2)))\n array([[ 1., 1., 0., 0.],\n [ 1., 1., 0., 0.],\n [ 0., 0., 1., 1.],\n [ 0., 0., 1., 1.]])\n\n >>> a = np.arange(100).reshape((2,5,2,5))\n >>> b = np.arange(24).reshape((2,3,4))\n >>> c = np.kron(a,b)\n >>> c.shape\n (2, 10, 6, 20)\n >>> I = (1,3,0,2)\n >>> J = (0,2,1)\n >>> J1 = (0,) + J # extend to ndim=4\n >>> S1 = (1,) + b.shape\n >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))\n >>> c[K] == a[I]*b[J]\n True\n\n \"\"\"\n b = asanyarray(b)\n a = array(a, copy=False, subok=True, ndmin=b.ndim)\n ndb, nda = b.ndim, a.ndim\n if (nda == 0 or ndb == 0):\n return _nx.multiply(a, b)\n as_ = a.shape\n bs = b.shape\n if not a.flags.contiguous:\n a = reshape(a, as_)\n if not b.flags.contiguous:\n b = reshape(b, bs)\n nd = ndb\n if (ndb != nda):\n if (ndb > nda):\n as_ = (1,)*(ndb-nda) + as_\n else:\n bs = (1,)*(nda-ndb) + bs\n nd = nda\n result = outer(a, b).reshape(as_+bs)\n axis = nd-1\n for _ in range(nd):\n result = concatenate(result, axis=axis)\n wrapper = get_array_prepare(a, b)\n if wrapper is not None:\n result = wrapper(result)\n wrapper = get_array_wrap(a, b)\n if wrapper is not None:\n result = wrapper(result)\n return result\n\n\ndef tile(A, reps):\n \"\"\"\n Construct an array by repeating A the number of times given by reps.\n\n If `reps` has length ``d``, the result will have dimension of\n ``max(d, A.ndim)``.\n\n If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new\n axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,\n or shape (1, 1, 3) for 3-D replication. If this is not the desired\n behavior, promote `A` to d-dimensions manually before calling this\n function.\n\n If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.\n Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as\n (1, 1, 2, 2).\n\n Parameters\n ----------\n A : array_like\n The input array.\n reps : array_like\n The number of repetitions of `A` along each axis.\n\n Returns\n -------\n c : ndarray\n The tiled output array.\n\n See Also\n --------\n repeat : Repeat elements of an array.\n\n Examples\n --------\n >>> a = np.array([0, 1, 2])\n >>> np.tile(a, 2)\n array([0, 1, 2, 0, 1, 2])\n >>> np.tile(a, (2, 2))\n array([[0, 1, 2, 0, 1, 2],\n [0, 1, 2, 0, 1, 2]])\n >>> np.tile(a, (2, 1, 2))\n array([[[0, 1, 2, 0, 1, 2]],\n [[0, 1, 2, 0, 1, 2]]])\n\n >>> b = np.array([[1, 2], [3, 4]])\n >>> np.tile(b, 2)\n array([[1, 2, 1, 2],\n [3, 4, 3, 4]])\n >>> np.tile(b, (2, 1))\n array([[1, 2],\n [3, 4],\n [1, 2],\n [3, 4]])\n\n \"\"\"\n try:\n tup = tuple(reps)\n except TypeError:\n tup = (reps,)\n d = len(tup)\n c = _nx.array(A, copy=False, subok=True, ndmin=d)\n shape = list(c.shape)\n n = max(c.size, 1)\n if (d < c.ndim):\n tup = (1,)*(c.ndim-d) + tup\n for i, nrep in enumerate(tup):\n if nrep!=1:\n c = c.reshape(-1, n).repeat(nrep, 0)\n dim_in = shape[i]\n dim_out = dim_in*nrep\n shape[i] = dim_out\n n //= max(dim_in, 1)\n return c.reshape(shape)\n", "path": "numpy/lib/shape_base.py" } ]
[ { "content": "from __future__ import division, absolute_import, print_function\n\n__all__ = ['column_stack', 'row_stack', 'dstack', 'array_split', 'split', 'hsplit',\n 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',\n 'apply_along_axis', 'kron', 'tile', 'get_array_wrap']\n\nimport warnings\n\nimport numpy.core.numeric as _nx\nfrom numpy.core.numeric import asarray, zeros, newaxis, outer, \\\n concatenate, isscalar, array, asanyarray\nfrom numpy.core.fromnumeric import product, reshape\nfrom numpy.core import hstack, vstack, atleast_3d\n\ndef apply_along_axis(func1d,axis,arr,*args):\n \"\"\"\n Apply a function to 1-D slices along the given axis.\n\n Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`\n is a 1-D slice of `arr` along `axis`.\n\n Parameters\n ----------\n func1d : function\n This function should accept 1-D arrays. It is applied to 1-D\n slices of `arr` along the specified axis.\n axis : integer\n Axis along which `arr` is sliced.\n arr : ndarray\n Input array.\n args : any\n Additional arguments to `func1d`.\n\n Returns\n -------\n apply_along_axis : ndarray\n The output array. The shape of `outarr` is identical to the shape of\n `arr`, except along the `axis` dimension, where the length of `outarr`\n is equal to the size of the return value of `func1d`. If `func1d`\n returns a scalar `outarr` will have one fewer dimensions than `arr`.\n\n See Also\n --------\n apply_over_axes : Apply a function repeatedly over multiple axes.\n\n Examples\n --------\n >>> def my_func(a):\n ... \\\"\\\"\\\"Average first and last element of a 1-D array\\\"\\\"\\\"\n ... return (a[0] + a[-1]) * 0.5\n >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])\n >>> np.apply_along_axis(my_func, 0, b)\n array([ 4., 5., 6.])\n >>> np.apply_along_axis(my_func, 1, b)\n array([ 2., 5., 8.])\n\n For a function that doesn't return a scalar, the number of dimensions in\n `outarr` is the same as `arr`.\n\n >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])\n >>> np.apply_along_axis(sorted, 1, b)\n array([[1, 7, 8],\n [3, 4, 9],\n [2, 5, 6]])\n\n \"\"\"\n arr = asarray(arr)\n nd = arr.ndim\n if axis < 0:\n axis += nd\n if (axis >= nd):\n raise ValueError(\"axis must be less than arr.ndim; axis=%d, rank=%d.\"\n % (axis, nd))\n ind = [0]*(nd-1)\n i = zeros(nd, 'O')\n indlist = list(range(nd))\n indlist.remove(axis)\n i[axis] = slice(None, None)\n outshape = asarray(arr.shape).take(indlist)\n i.put(indlist, ind)\n res = func1d(arr[tuple(i.tolist())],*args)\n # if res is a number, then we have a smaller output array\n if isscalar(res):\n outarr = zeros(outshape, asarray(res).dtype)\n outarr[tuple(ind)] = res\n Ntot = product(outshape)\n k = 1\n while k < Ntot:\n # increment the index\n ind[-1] += 1\n n = -1\n while (ind[n] >= outshape[n]) and (n > (1-nd)):\n ind[n-1] += 1\n ind[n] = 0\n n -= 1\n i.put(indlist, ind)\n res = func1d(arr[tuple(i.tolist())],*args)\n outarr[tuple(ind)] = res\n k += 1\n return outarr\n else:\n Ntot = product(outshape)\n holdshape = outshape\n outshape = list(arr.shape)\n outshape[axis] = len(res)\n outarr = zeros(outshape, asarray(res).dtype)\n outarr[tuple(i.tolist())] = res\n k = 1\n while k < Ntot:\n # increment the index\n ind[-1] += 1\n n = -1\n while (ind[n] >= holdshape[n]) and (n > (1-nd)):\n ind[n-1] += 1\n ind[n] = 0\n n -= 1\n i.put(indlist, ind)\n res = func1d(arr[tuple(i.tolist())],*args)\n outarr[tuple(i.tolist())] = res\n k += 1\n return outarr\n\n\ndef apply_over_axes(func, a, axes):\n \"\"\"\n Apply a function repeatedly over multiple axes.\n\n `func` is called as `res = func(a, axis)`, where `axis` is the first\n element of `axes`. The result `res` of the function call must have\n either the same dimensions as `a` or one less dimension. If `res`\n has one less dimension than `a`, a dimension is inserted before\n `axis`. The call to `func` is then repeated for each axis in `axes`,\n with `res` as the first argument.\n\n Parameters\n ----------\n func : function\n This function must take two arguments, `func(a, axis)`.\n a : array_like\n Input array.\n axes : array_like\n Axes over which `func` is applied; the elements must be integers.\n\n Returns\n -------\n apply_over_axis : ndarray\n The output array. The number of dimensions is the same as `a`,\n but the shape can be different. This depends on whether `func`\n changes the shape of its output with respect to its input.\n\n See Also\n --------\n apply_along_axis :\n Apply a function to 1-D slices of an array along the given axis.\n\n Notes\n ------\n This function is equivalent to tuple axis arguments to reorderable ufuncs\n with keepdims=True. Tuple axis arguments to ufuncs have been availabe since\n version 1.7.0.\n\n Examples\n --------\n >>> a = np.arange(24).reshape(2,3,4)\n >>> a\n array([[[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]],\n [[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23]]])\n\n Sum over axes 0 and 2. The result has same number of dimensions\n as the original array:\n\n >>> np.apply_over_axes(np.sum, a, [0,2])\n array([[[ 60],\n [ 92],\n [124]]])\n\n Tuple axis arguments to ufuncs are equivalent:\n\n >>> np.sum(a, axis=(0,2), keepdims=True)\n array([[[ 60],\n [ 92],\n [124]]])\n\n \"\"\"\n val = asarray(a)\n N = a.ndim\n if array(axes).ndim == 0:\n axes = (axes,)\n for axis in axes:\n if axis < 0: axis = N + axis\n args = (val, axis)\n res = func(*args)\n if res.ndim == val.ndim:\n val = res\n else:\n res = expand_dims(res, axis)\n if res.ndim == val.ndim:\n val = res\n else:\n raise ValueError(\"function is not returning \"\n \"an array of the correct shape\")\n return val\n\ndef expand_dims(a, axis):\n \"\"\"\n Expand the shape of an array.\n\n Insert a new axis, corresponding to a given position in the array shape.\n\n Parameters\n ----------\n a : array_like\n Input array.\n axis : int\n Position (amongst axes) where new axis is to be inserted.\n\n Returns\n -------\n res : ndarray\n Output array. The number of dimensions is one greater than that of\n the input array.\n\n See Also\n --------\n doc.indexing, atleast_1d, atleast_2d, atleast_3d\n\n Examples\n --------\n >>> x = np.array([1,2])\n >>> x.shape\n (2,)\n\n The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``:\n\n >>> y = np.expand_dims(x, axis=0)\n >>> y\n array([[1, 2]])\n >>> y.shape\n (1, 2)\n\n >>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,newaxis]\n >>> y\n array([[1],\n [2]])\n >>> y.shape\n (2, 1)\n\n Note that some examples may use ``None`` instead of ``np.newaxis``. These\n are the same objects:\n\n >>> np.newaxis is None\n True\n\n \"\"\"\n a = asarray(a)\n shape = a.shape\n if axis < 0:\n axis = axis + len(shape) + 1\n return a.reshape(shape[:axis] + (1,) + shape[axis:])\n\nrow_stack = vstack\n\ndef column_stack(tup):\n \"\"\"\n Stack 1-D arrays as columns into a 2-D array.\n\n Take a sequence of 1-D arrays and stack them as columns\n to make a single 2-D array. 2-D arrays are stacked as-is,\n just like with `hstack`. 1-D arrays are turned into 2-D columns\n first.\n\n Parameters\n ----------\n tup : sequence of 1-D or 2-D arrays.\n Arrays to stack. All of them must have the same first dimension.\n\n Returns\n -------\n stacked : 2-D array\n The array formed by stacking the given arrays.\n\n See Also\n --------\n hstack, vstack, concatenate\n\n Examples\n --------\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.column_stack((a,b))\n array([[1, 2],\n [2, 3],\n [3, 4]])\n\n \"\"\"\n arrays = []\n for v in tup:\n arr = array(v, copy=False, subok=True)\n if arr.ndim < 2:\n arr = array(arr, copy=False, subok=True, ndmin=2).T\n arrays.append(arr)\n return _nx.concatenate(arrays, 1)\n\ndef dstack(tup):\n \"\"\"\n Stack arrays in sequence depth wise (along third axis).\n\n Takes a sequence of arrays and stack them along the third axis\n to make a single array. Rebuilds arrays divided by `dsplit`.\n This is a simple way to stack 2D arrays (images) into a single\n 3D array for processing.\n\n Parameters\n ----------\n tup : sequence of arrays\n Arrays to stack. All of them must have the same shape along all\n but the third axis.\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays.\n\n See Also\n --------\n vstack : Stack along first axis.\n hstack : Stack along second axis.\n concatenate : Join arrays.\n dsplit : Split array along third axis.\n\n Notes\n -----\n Equivalent to ``np.concatenate(tup, axis=2)``.\n\n Examples\n --------\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.dstack((a,b))\n array([[[1, 2],\n [2, 3],\n [3, 4]]])\n\n >>> a = np.array([[1],[2],[3]])\n >>> b = np.array([[2],[3],[4]])\n >>> np.dstack((a,b))\n array([[[1, 2]],\n [[2, 3]],\n [[3, 4]]])\n\n \"\"\"\n return _nx.concatenate([atleast_3d(_m) for _m in tup], 2)\n\ndef _replace_zero_by_x_arrays(sub_arys):\n for i in range(len(sub_arys)):\n if len(_nx.shape(sub_arys[i])) == 0:\n sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)\n elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)):\n sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)\n return sub_arys\n\ndef array_split(ary,indices_or_sections,axis = 0):\n \"\"\"\n Split an array into multiple sub-arrays.\n\n Please refer to the ``split`` documentation. The only difference\n between these functions is that ``array_split`` allows\n `indices_or_sections` to be an integer that does *not* equally\n divide the axis.\n\n See Also\n --------\n split : Split array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(8.0)\n >>> np.array_split(x, 3)\n [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])]\n\n \"\"\"\n try:\n Ntotal = ary.shape[axis]\n except AttributeError:\n Ntotal = len(ary)\n try: # handle scalar case.\n Nsections = len(indices_or_sections) + 1\n div_points = [0] + list(indices_or_sections) + [Ntotal]\n except TypeError: #indices_or_sections is a scalar, not an array.\n Nsections = int(indices_or_sections)\n if Nsections <= 0:\n raise ValueError('number sections must be larger than 0.')\n Neach_section, extras = divmod(Ntotal, Nsections)\n section_sizes = [0] + \\\n extras * [Neach_section+1] + \\\n (Nsections-extras) * [Neach_section]\n div_points = _nx.array(section_sizes).cumsum()\n\n sub_arys = []\n sary = _nx.swapaxes(ary, axis, 0)\n for i in range(Nsections):\n st = div_points[i]; end = div_points[i+1]\n sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))\n\n # This \"kludge\" was introduced here to replace arrays shaped (0, 10)\n # or similar with an array shaped (0,).\n # There seems no need for this, so give a FutureWarning to remove later.\n if sub_arys[-1].size == 0 and sub_arys[-1].ndim != 1:\n warnings.warn(\"in the future np.array_split will retain the shape of \"\n \"arrays with a zero size, instead of replacing them by \"\n \"`array([])`, which always has a shape of (0,).\",\n FutureWarning)\n sub_arys = _replace_zero_by_x_arrays(sub_arys)\n\n return sub_arys\n\ndef split(ary,indices_or_sections,axis=0):\n \"\"\"\n Split an array into multiple sub-arrays.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1-D array\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an error is raised.\n\n If `indices_or_sections` is a 1-D array of sorted integers, the entries\n indicate where along `axis` the array is split. For example,\n ``[2, 3]`` would, for ``axis=0``, result in\n\n - ary[:2]\n - ary[2:3]\n - ary[3:]\n\n If an index exceeds the dimension of the array along `axis`,\n an empty sub-array is returned correspondingly.\n axis : int, optional\n The axis along which to split, default is 0.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n Raises\n ------\n ValueError\n If `indices_or_sections` is given as an integer, but\n a split does not result in equal division.\n\n See Also\n --------\n array_split : Split an array into multiple sub-arrays of equal or\n near-equal size. Does not raise an exception if\n an equal division cannot be made.\n hsplit : Split array into multiple sub-arrays horizontally (column-wise).\n vsplit : Split array into multiple sub-arrays vertically (row wise).\n dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).\n concatenate : Join arrays together.\n hstack : Stack arrays in sequence horizontally (column wise).\n vstack : Stack arrays in sequence vertically (row wise).\n dstack : Stack arrays in sequence depth wise (along third dimension).\n\n Examples\n --------\n >>> x = np.arange(9.0)\n >>> np.split(x, 3)\n [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])]\n\n >>> x = np.arange(8.0)\n >>> np.split(x, [3, 5, 6, 10])\n [array([ 0., 1., 2.]),\n array([ 3., 4.]),\n array([ 5.]),\n array([ 6., 7.]),\n array([], dtype=float64)]\n\n \"\"\"\n try: len(indices_or_sections)\n except TypeError:\n sections = indices_or_sections\n N = ary.shape[axis]\n if N % sections:\n raise ValueError('array split does not result in an equal division')\n res = array_split(ary, indices_or_sections, axis)\n return res\n\ndef hsplit(ary, indices_or_sections):\n \"\"\"\n Split an array into multiple sub-arrays horizontally (column-wise).\n\n Please refer to the `split` documentation. `hsplit` is equivalent\n to `split` with ``axis=1``, the array is always split along the second\n axis regardless of the array dimension.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [ 12., 13., 14., 15.]])\n >>> np.hsplit(x, 2)\n [array([[ 0., 1.],\n [ 4., 5.],\n [ 8., 9.],\n [ 12., 13.]]),\n array([[ 2., 3.],\n [ 6., 7.],\n [ 10., 11.],\n [ 14., 15.]])]\n >>> np.hsplit(x, np.array([3, 6]))\n [array([[ 0., 1., 2.],\n [ 4., 5., 6.],\n [ 8., 9., 10.],\n [ 12., 13., 14.]]),\n array([[ 3.],\n [ 7.],\n [ 11.],\n [ 15.]]),\n array([], dtype=float64)]\n\n With a higher dimensional array the split is still along the second axis.\n\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[ 0., 1.],\n [ 2., 3.]],\n [[ 4., 5.],\n [ 6., 7.]]])\n >>> np.hsplit(x, 2)\n [array([[[ 0., 1.]],\n [[ 4., 5.]]]),\n array([[[ 2., 3.]],\n [[ 6., 7.]]])]\n\n \"\"\"\n if len(_nx.shape(ary)) == 0:\n raise ValueError('hsplit only works on arrays of 1 or more dimensions')\n if len(ary.shape) > 1:\n return split(ary, indices_or_sections, 1)\n else:\n return split(ary, indices_or_sections, 0)\n\ndef vsplit(ary, indices_or_sections):\n \"\"\"\n Split an array into multiple sub-arrays vertically (row-wise).\n\n Please refer to the ``split`` documentation. ``vsplit`` is equivalent\n to ``split`` with `axis=0` (default), the array is always split along the\n first axis regardless of the array dimension.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [ 12., 13., 14., 15.]])\n >>> np.vsplit(x, 2)\n [array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.]]),\n array([[ 8., 9., 10., 11.],\n [ 12., 13., 14., 15.]])]\n >>> np.vsplit(x, np.array([3, 6]))\n [array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.]]),\n array([[ 12., 13., 14., 15.]]),\n array([], dtype=float64)]\n\n With a higher dimensional array the split is still along the first axis.\n\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[ 0., 1.],\n [ 2., 3.]],\n [[ 4., 5.],\n [ 6., 7.]]])\n >>> np.vsplit(x, 2)\n [array([[[ 0., 1.],\n [ 2., 3.]]]),\n array([[[ 4., 5.],\n [ 6., 7.]]])]\n\n \"\"\"\n if len(_nx.shape(ary)) < 2:\n raise ValueError('vsplit only works on arrays of 2 or more dimensions')\n return split(ary, indices_or_sections, 0)\n\ndef dsplit(ary, indices_or_sections):\n \"\"\"\n Split array into multiple sub-arrays along the 3rd axis (depth).\n\n Please refer to the `split` documentation. `dsplit` is equivalent\n to `split` with ``axis=2``, the array is always split along the third\n axis provided the array dimension is greater than or equal to 3.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(2, 2, 4)\n >>> x\n array([[[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.]],\n [[ 8., 9., 10., 11.],\n [ 12., 13., 14., 15.]]])\n >>> np.dsplit(x, 2)\n [array([[[ 0., 1.],\n [ 4., 5.]],\n [[ 8., 9.],\n [ 12., 13.]]]),\n array([[[ 2., 3.],\n [ 6., 7.]],\n [[ 10., 11.],\n [ 14., 15.]]])]\n >>> np.dsplit(x, np.array([3, 6]))\n [array([[[ 0., 1., 2.],\n [ 4., 5., 6.]],\n [[ 8., 9., 10.],\n [ 12., 13., 14.]]]),\n array([[[ 3.],\n [ 7.]],\n [[ 11.],\n [ 15.]]]),\n array([], dtype=float64)]\n\n \"\"\"\n if len(_nx.shape(ary)) < 3:\n raise ValueError('dsplit only works on arrays of 3 or more dimensions')\n return split(ary, indices_or_sections, 2)\n\ndef get_array_prepare(*args):\n \"\"\"Find the wrapper for the array with the highest priority.\n\n In case of ties, leftmost wins. If no wrapper is found, return None\n \"\"\"\n wrappers = sorted((getattr(x, '__array_priority__', 0), -i,\n x.__array_prepare__) for i, x in enumerate(args)\n if hasattr(x, '__array_prepare__'))\n if wrappers:\n return wrappers[-1][-1]\n return None\n\ndef get_array_wrap(*args):\n \"\"\"Find the wrapper for the array with the highest priority.\n\n In case of ties, leftmost wins. If no wrapper is found, return None\n \"\"\"\n wrappers = sorted((getattr(x, '__array_priority__', 0), -i,\n x.__array_wrap__) for i, x in enumerate(args)\n if hasattr(x, '__array_wrap__'))\n if wrappers:\n return wrappers[-1][-1]\n return None\n\ndef kron(a, b):\n \"\"\"\n Kronecker product of two arrays.\n\n Computes the Kronecker product, a composite array made of blocks of the\n second array scaled by the first.\n\n Parameters\n ----------\n a, b : array_like\n\n Returns\n -------\n out : ndarray\n\n See Also\n --------\n outer : The outer product\n\n Notes\n -----\n The function assumes that the number of dimenensions of `a` and `b`\n are the same, if necessary prepending the smallest with ones.\n If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,\n the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.\n The elements are products of elements from `a` and `b`, organized\n explicitly by::\n\n kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]\n\n where::\n\n kt = it * st + jt, t = 0,...,N\n\n In the common 2-D case (N=1), the block structure can be visualized::\n\n [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],\n [ ... ... ],\n [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]\n\n\n Examples\n --------\n >>> np.kron([1,10,100], [5,6,7])\n array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])\n >>> np.kron([5,6,7], [1,10,100])\n array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])\n\n >>> np.kron(np.eye(2), np.ones((2,2)))\n array([[ 1., 1., 0., 0.],\n [ 1., 1., 0., 0.],\n [ 0., 0., 1., 1.],\n [ 0., 0., 1., 1.]])\n\n >>> a = np.arange(100).reshape((2,5,2,5))\n >>> b = np.arange(24).reshape((2,3,4))\n >>> c = np.kron(a,b)\n >>> c.shape\n (2, 10, 6, 20)\n >>> I = (1,3,0,2)\n >>> J = (0,2,1)\n >>> J1 = (0,) + J # extend to ndim=4\n >>> S1 = (1,) + b.shape\n >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))\n >>> c[K] == a[I]*b[J]\n True\n\n \"\"\"\n b = asanyarray(b)\n a = array(a, copy=False, subok=True, ndmin=b.ndim)\n ndb, nda = b.ndim, a.ndim\n if (nda == 0 or ndb == 0):\n return _nx.multiply(a, b)\n as_ = a.shape\n bs = b.shape\n if not a.flags.contiguous:\n a = reshape(a, as_)\n if not b.flags.contiguous:\n b = reshape(b, bs)\n nd = ndb\n if (ndb != nda):\n if (ndb > nda):\n as_ = (1,)*(ndb-nda) + as_\n else:\n bs = (1,)*(nda-ndb) + bs\n nd = nda\n result = outer(a, b).reshape(as_+bs)\n axis = nd-1\n for _ in range(nd):\n result = concatenate(result, axis=axis)\n wrapper = get_array_prepare(a, b)\n if wrapper is not None:\n result = wrapper(result)\n wrapper = get_array_wrap(a, b)\n if wrapper is not None:\n result = wrapper(result)\n return result\n\n\ndef tile(A, reps):\n \"\"\"\n Construct an array by repeating A the number of times given by reps.\n\n If `reps` has length ``d``, the result will have dimension of\n ``max(d, A.ndim)``.\n\n If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new\n axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,\n or shape (1, 1, 3) for 3-D replication. If this is not the desired\n behavior, promote `A` to d-dimensions manually before calling this\n function.\n\n If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.\n Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as\n (1, 1, 2, 2).\n\n Parameters\n ----------\n A : array_like\n The input array.\n reps : array_like\n The number of repetitions of `A` along each axis.\n\n Returns\n -------\n c : ndarray\n The tiled output array.\n\n See Also\n --------\n repeat : Repeat elements of an array.\n\n Examples\n --------\n >>> a = np.array([0, 1, 2])\n >>> np.tile(a, 2)\n array([0, 1, 2, 0, 1, 2])\n >>> np.tile(a, (2, 2))\n array([[0, 1, 2, 0, 1, 2],\n [0, 1, 2, 0, 1, 2]])\n >>> np.tile(a, (2, 1, 2))\n array([[[0, 1, 2, 0, 1, 2]],\n [[0, 1, 2, 0, 1, 2]]])\n\n >>> b = np.array([[1, 2], [3, 4]])\n >>> np.tile(b, 2)\n array([[1, 2, 1, 2],\n [3, 4, 3, 4]])\n >>> np.tile(b, (2, 1))\n array([[1, 2],\n [3, 4],\n [1, 2],\n [3, 4]])\n\n \"\"\"\n try:\n tup = tuple(reps)\n except TypeError:\n tup = (reps,)\n d = len(tup)\n c = _nx.array(A, copy=False, subok=True, ndmin=d)\n shape = list(c.shape)\n n = max(c.size, 1)\n if (d < c.ndim):\n tup = (1,)*(c.ndim-d) + tup\n for i, nrep in enumerate(tup):\n if nrep!=1:\n c = c.reshape(-1, n).repeat(nrep, 0)\n dim_in = shape[i]\n dim_out = dim_in*nrep\n shape[i] = dim_out\n n //= max(dim_in, 1)\n return c.reshape(shape)\n", "path": "numpy/lib/shape_base.py" } ]
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py index 38b928d57605..43e98ae3d338 100644 --- a/numpy/lib/shape_base.py +++ b/numpy/lib/shape_base.py @@ -287,10 +287,6 @@ def column_stack(tup): -------- hstack, vstack, concatenate - Notes - ----- - This function is equivalent to ``np.vstack(tup).T``. - Examples -------- >>> a = np.array((1,2,3))
certbot__certbot-5941
V2 order ready status not recognized, causes deserialization error ## I installed Certbot with (certbot-auto, OS package manager, pip, etc): Cloned from git: ``` $> git rev-parse HEAD 6b29d159a2f221c3437770bdb43924ee6f953c4b ``` ## I ran this command and it produced this output: `certbot_test --server http://localhost:4001/directory certonly --standalone -d one.wtf --preferred-challenges http-01 ` Note: This is against a Boulder instance configured with the `OrderReadyStatus` feature flag enabled (See https://github.com/letsencrypt/boulder/pull/3644). ## Certbot's behavior differed from what I expected because: Certbot POSTed `newOrder`. In response an order object with `"status": "ready"` was returned. This caused a `DeserializationError` indicating "Could not decode 'status' (u'ready'): Deserialization error: Status not recognized". The "ready" status was added to the ACME specification in draft-10 before Let's Encrypt launched its production ACMEv2 endpoint. Boulder does not use this new status in staging/production yet but we will in the near future (~next month). Draft-10 says: > Once all of the authorizations listed in the order object are in the "valid" state, the order transitions to the "ready" state. This state is used to indicate that an order is ready for finalization. Previously the order would remain in "processing" when all of its authorizations were in the "valid" state. ## Here is a Certbot log showing the issue (if available): ``` http://localhost:4001 "POST /acme/new-order HTTP/1.1" 201 323 Received response: HTTP 201 Boulder-Requester: 2141 Cache-Control: public, max-age=0, no-cache Content-Type: application/json Location: http://localhost:4001/acme/order/2141/932 Replay-Nonce: Aeop9czyFGXSMBH0TfD4MwI5klCloEnml8AFsRzBPDU Date: Thu, 12 Apr 2018 17:06:51 GMT Content-Length: 323 { "status": "ready", "expires": "2018-04-19T17:06:51.98458014Z", "identifiers": [ { "type": "dns", "value": "one.wtf" } ], "authorizations": [ "http://localhost:4001/acme/authz/qklYRnxxHtf8PAaR8IpgK2ex7uPqWYzWgPEQrPiqEKc" ], "finalize": "http://localhost:4001/acme/finalize/2141/932" } Storing nonce: Aeop9czyFGXSMBH0TfD4MwI5klCloEnml8AFsRzBPDU Exiting abnormally: Traceback (most recent call last): File "/home/daniel/Code/certbot/venv/bin/certbot", line 11, in <module> load_entry_point('certbot', 'console_scripts', 'certbot')() File "/home/daniel/Code/certbot/certbot/main.py", line 1266, in main return config.func(config, plugins) File "/home/daniel/Code/certbot/certbot/main.py", line 1157, in certonly lineage = _get_and_save_cert(le_client, config, domains, certname, lineage) File "/home/daniel/Code/certbot/certbot/main.py", line 113, in _get_and_save_cert renewal.renew_cert(config, domains, le_client, lineage) File "/home/daniel/Code/certbot/certbot/renewal.py", line 297, in renew_cert new_cert, new_chain, new_key, _ = le_client.obtain_certificate(domains) File "/home/daniel/Code/certbot/certbot/client.py", line 294, in obtain_certificate orderr = self._get_order_and_authorizations(csr.data, self.config.allow_subset_of_names) File "/home/daniel/Code/certbot/certbot/client.py", line 326, in _get_order_and_authorizations orderr = self.acme.new_order(csr_pem) File "/home/daniel/Code/certbot/acme/acme/client.py", line 779, in new_order return self.client.new_order(csr_pem) File "/home/daniel/Code/certbot/acme/acme/client.py", line 606, in new_order body = messages.Order.from_json(response.json()) File "/home/daniel/Code/certbot/venv/local/lib/python2.7/site-packages/josepy/json_util.py", line 289, in from_json return cls(**cls.fields_from_json(jobj)) File "/home/daniel/Code/certbot/venv/local/lib/python2.7/site-packages/josepy/json_util.py", line 284, in fields_from_json slot, value, error)) DeserializationError: Deserialization error: Could not decode 'status' (u'ready'): Deserialization error: Status not recognized Please see the logfiles in /tmp/leitSN33/logs for more details. ```
[ { "content": "\"\"\"ACME protocol messages.\"\"\"\nimport collections\nimport six\n\nimport josepy as jose\n\nfrom acme import challenges\nfrom acme import errors\nfrom acme import fields\nfrom acme import util\n\nOLD_ERROR_PREFIX = \"urn:acme:error:\"\nERROR_PREFIX = \"urn:ietf:params:acme:error:\"\n\nERROR_CODES = {\n 'badCSR': 'The CSR is unacceptable (e.g., due to a short key)',\n 'badNonce': 'The client sent an unacceptable anti-replay nonce',\n 'connection': ('The server could not connect to the client to verify the'\n ' domain'),\n 'dnssec': 'The server could not validate a DNSSEC signed domain',\n # deprecate invalidEmail\n 'invalidEmail': 'The provided email for a registration was invalid',\n 'invalidContact': 'The provided contact URI was invalid',\n 'malformed': 'The request message was malformed',\n 'rateLimited': 'There were too many requests of a given type',\n 'serverInternal': 'The server experienced an internal error',\n 'tls': 'The server experienced a TLS error during domain verification',\n 'unauthorized': 'The client lacks sufficient authorization',\n 'unknownHost': 'The server could not resolve a domain name',\n}\n\nERROR_TYPE_DESCRIPTIONS = dict(\n (ERROR_PREFIX + name, desc) for name, desc in ERROR_CODES.items())\n\nERROR_TYPE_DESCRIPTIONS.update(dict( # add errors with old prefix, deprecate me\n (OLD_ERROR_PREFIX + name, desc) for name, desc in ERROR_CODES.items()))\n\n\ndef is_acme_error(err):\n \"\"\"Check if argument is an ACME error.\"\"\"\n if isinstance(err, Error) and (err.typ is not None):\n return (ERROR_PREFIX in err.typ) or (OLD_ERROR_PREFIX in err.typ)\n else:\n return False\n\n\[email protected]_2_unicode_compatible\nclass Error(jose.JSONObjectWithFields, errors.Error):\n \"\"\"ACME error.\n\n https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00\n\n :ivar unicode typ:\n :ivar unicode title:\n :ivar unicode detail:\n\n \"\"\"\n typ = jose.Field('type', omitempty=True, default='about:blank')\n title = jose.Field('title', omitempty=True)\n detail = jose.Field('detail', omitempty=True)\n\n @classmethod\n def with_code(cls, code, **kwargs):\n \"\"\"Create an Error instance with an ACME Error code.\n\n :unicode code: An ACME error code, like 'dnssec'.\n :kwargs: kwargs to pass to Error.\n\n \"\"\"\n if code not in ERROR_CODES:\n raise ValueError(\"The supplied code: %s is not a known ACME error\"\n \" code\" % code)\n typ = ERROR_PREFIX + code\n return cls(typ=typ, **kwargs)\n\n @property\n def description(self):\n \"\"\"Hardcoded error description based on its type.\n\n :returns: Description if standard ACME error or ``None``.\n :rtype: unicode\n\n \"\"\"\n return ERROR_TYPE_DESCRIPTIONS.get(self.typ)\n\n @property\n def code(self):\n \"\"\"ACME error code.\n\n Basically self.typ without the ERROR_PREFIX.\n\n :returns: error code if standard ACME code or ``None``.\n :rtype: unicode\n\n \"\"\"\n code = str(self.typ).split(':')[-1]\n if code in ERROR_CODES:\n return code\n\n def __str__(self):\n return b' :: '.join(\n part.encode('ascii', 'backslashreplace') for part in\n (self.typ, self.description, self.detail, self.title)\n if part is not None).decode()\n\n\nclass _Constant(jose.JSONDeSerializable, collections.Hashable): # type: ignore\n \"\"\"ACME constant.\"\"\"\n __slots__ = ('name',)\n POSSIBLE_NAMES = NotImplemented\n\n def __init__(self, name):\n self.POSSIBLE_NAMES[name] = self\n self.name = name\n\n def to_partial_json(self):\n return self.name\n\n @classmethod\n def from_json(cls, value):\n if value not in cls.POSSIBLE_NAMES:\n raise jose.DeserializationError(\n '{0} not recognized'.format(cls.__name__))\n return cls.POSSIBLE_NAMES[value]\n\n def __repr__(self):\n return '{0}({1})'.format(self.__class__.__name__, self.name)\n\n def __eq__(self, other):\n return isinstance(other, type(self)) and other.name == self.name\n\n def __hash__(self):\n return hash((self.__class__, self.name))\n\n def __ne__(self, other):\n return not self == other\n\n\nclass Status(_Constant):\n \"\"\"ACME \"status\" field.\"\"\"\n POSSIBLE_NAMES = {} # type: dict\nSTATUS_UNKNOWN = Status('unknown')\nSTATUS_PENDING = Status('pending')\nSTATUS_PROCESSING = Status('processing')\nSTATUS_VALID = Status('valid')\nSTATUS_INVALID = Status('invalid')\nSTATUS_REVOKED = Status('revoked')\n\n\nclass IdentifierType(_Constant):\n \"\"\"ACME identifier type.\"\"\"\n POSSIBLE_NAMES = {} # type: dict\nIDENTIFIER_FQDN = IdentifierType('dns') # IdentifierDNS in Boulder\n\n\nclass Identifier(jose.JSONObjectWithFields):\n \"\"\"ACME identifier.\n\n :ivar IdentifierType typ:\n :ivar unicode value:\n\n \"\"\"\n typ = jose.Field('type', decoder=IdentifierType.from_json)\n value = jose.Field('value')\n\n\nclass Directory(jose.JSONDeSerializable):\n \"\"\"Directory.\"\"\"\n\n _REGISTERED_TYPES = {} # type: dict\n\n class Meta(jose.JSONObjectWithFields):\n \"\"\"Directory Meta.\"\"\"\n _terms_of_service = jose.Field('terms-of-service', omitempty=True)\n _terms_of_service_v2 = jose.Field('termsOfService', omitempty=True)\n website = jose.Field('website', omitempty=True)\n caa_identities = jose.Field('caaIdentities', omitempty=True)\n\n def __init__(self, **kwargs):\n kwargs = dict((self._internal_name(k), v) for k, v in kwargs.items())\n # pylint: disable=star-args\n super(Directory.Meta, self).__init__(**kwargs)\n\n @property\n def terms_of_service(self):\n \"\"\"URL for the CA TOS\"\"\"\n return self._terms_of_service or self._terms_of_service_v2\n\n def __iter__(self):\n # When iterating over fields, use the external name 'terms_of_service' instead of\n # the internal '_terms_of_service'.\n for name in super(Directory.Meta, self).__iter__():\n yield name[1:] if name == '_terms_of_service' else name\n\n def _internal_name(self, name):\n return '_' + name if name == 'terms_of_service' else name\n\n\n @classmethod\n def _canon_key(cls, key):\n return getattr(key, 'resource_type', key)\n\n @classmethod\n def register(cls, resource_body_cls):\n \"\"\"Register resource.\"\"\"\n resource_type = resource_body_cls.resource_type\n assert resource_type not in cls._REGISTERED_TYPES\n cls._REGISTERED_TYPES[resource_type] = resource_body_cls\n return resource_body_cls\n\n def __init__(self, jobj):\n canon_jobj = util.map_keys(jobj, self._canon_key)\n # TODO: check that everything is an absolute URL; acme-spec is\n # not clear on that\n self._jobj = canon_jobj\n\n def __getattr__(self, name):\n try:\n return self[name.replace('_', '-')]\n except KeyError as error:\n raise AttributeError(str(error) + ': ' + name)\n\n def __getitem__(self, name):\n try:\n return self._jobj[self._canon_key(name)]\n except KeyError:\n raise KeyError('Directory field not found')\n\n def to_partial_json(self):\n return self._jobj\n\n @classmethod\n def from_json(cls, jobj):\n jobj['meta'] = cls.Meta.from_json(jobj.pop('meta', {}))\n return cls(jobj)\n\n\nclass Resource(jose.JSONObjectWithFields):\n \"\"\"ACME Resource.\n\n :ivar acme.messages.ResourceBody body: Resource body.\n\n \"\"\"\n body = jose.Field('body')\n\n\nclass ResourceWithURI(Resource):\n \"\"\"ACME Resource with URI.\n\n :ivar unicode uri: Location of the resource.\n\n \"\"\"\n uri = jose.Field('uri') # no ChallengeResource.uri\n\n\nclass ResourceBody(jose.JSONObjectWithFields):\n \"\"\"ACME Resource Body.\"\"\"\n\n\nclass Registration(ResourceBody):\n \"\"\"Registration Resource Body.\n\n :ivar josepy.jwk.JWK key: Public key.\n :ivar tuple contact: Contact information following ACME spec,\n `tuple` of `unicode`.\n :ivar unicode agreement:\n\n \"\"\"\n # on new-reg key server ignores 'key' and populates it based on\n # JWS.signature.combined.jwk\n key = jose.Field('key', omitempty=True, decoder=jose.JWK.from_json)\n contact = jose.Field('contact', omitempty=True, default=())\n agreement = jose.Field('agreement', omitempty=True)\n status = jose.Field('status', omitempty=True)\n terms_of_service_agreed = jose.Field('termsOfServiceAgreed', omitempty=True)\n\n phone_prefix = 'tel:'\n email_prefix = 'mailto:'\n\n @classmethod\n def from_data(cls, phone=None, email=None, **kwargs):\n \"\"\"Create registration resource from contact details.\"\"\"\n details = list(kwargs.pop('contact', ()))\n if phone is not None:\n details.append(cls.phone_prefix + phone)\n if email is not None:\n details.append(cls.email_prefix + email)\n kwargs['contact'] = tuple(details)\n return cls(**kwargs)\n\n def _filter_contact(self, prefix):\n return tuple(\n detail[len(prefix):] for detail in self.contact\n if detail.startswith(prefix))\n\n @property\n def phones(self):\n \"\"\"All phones found in the ``contact`` field.\"\"\"\n return self._filter_contact(self.phone_prefix)\n\n @property\n def emails(self):\n \"\"\"All emails found in the ``contact`` field.\"\"\"\n return self._filter_contact(self.email_prefix)\n\n\[email protected]\nclass NewRegistration(Registration):\n \"\"\"New registration.\"\"\"\n resource_type = 'new-reg'\n resource = fields.Resource(resource_type)\n\n\nclass UpdateRegistration(Registration):\n \"\"\"Update registration.\"\"\"\n resource_type = 'reg'\n resource = fields.Resource(resource_type)\n\n\nclass RegistrationResource(ResourceWithURI):\n \"\"\"Registration Resource.\n\n :ivar acme.messages.Registration body:\n :ivar unicode new_authzr_uri: Deprecated. Do not use.\n :ivar unicode terms_of_service: URL for the CA TOS.\n\n \"\"\"\n body = jose.Field('body', decoder=Registration.from_json)\n new_authzr_uri = jose.Field('new_authzr_uri', omitempty=True)\n terms_of_service = jose.Field('terms_of_service', omitempty=True)\n\n\nclass ChallengeBody(ResourceBody):\n \"\"\"Challenge Resource Body.\n\n .. todo::\n Confusingly, this has a similar name to `.challenges.Challenge`,\n as well as `.achallenges.AnnotatedChallenge`. Please use names\n such as ``challb`` to distinguish instances of this class from\n ``achall``.\n\n :ivar acme.challenges.Challenge: Wrapped challenge.\n Conveniently, all challenge fields are proxied, i.e. you can\n call ``challb.x`` to get ``challb.chall.x`` contents.\n :ivar acme.messages.Status status:\n :ivar datetime.datetime validated:\n :ivar messages.Error error:\n\n \"\"\"\n __slots__ = ('chall',)\n # ACMEv1 has a \"uri\" field in challenges. ACMEv2 has a \"url\" field. This\n # challenge object supports either one, but should be accessed through the\n # name \"uri\". In Client.answer_challenge, whichever one is set will be\n # used.\n _uri = jose.Field('uri', omitempty=True, default=None)\n _url = jose.Field('url', omitempty=True, default=None)\n status = jose.Field('status', decoder=Status.from_json,\n omitempty=True, default=STATUS_PENDING)\n validated = fields.RFC3339Field('validated', omitempty=True)\n error = jose.Field('error', decoder=Error.from_json,\n omitempty=True, default=None)\n\n def __init__(self, **kwargs):\n kwargs = dict((self._internal_name(k), v) for k, v in kwargs.items())\n # pylint: disable=star-args\n super(ChallengeBody, self).__init__(**kwargs)\n\n def encode(self, name):\n return super(ChallengeBody, self).encode(self._internal_name(name))\n\n def to_partial_json(self):\n jobj = super(ChallengeBody, self).to_partial_json()\n jobj.update(self.chall.to_partial_json())\n return jobj\n\n @classmethod\n def fields_from_json(cls, jobj):\n jobj_fields = super(ChallengeBody, cls).fields_from_json(jobj)\n jobj_fields['chall'] = challenges.Challenge.from_json(jobj)\n return jobj_fields\n\n @property\n def uri(self):\n \"\"\"The URL of this challenge.\"\"\"\n return self._url or self._uri\n\n def __getattr__(self, name):\n return getattr(self.chall, name)\n\n def __iter__(self):\n # When iterating over fields, use the external name 'uri' instead of\n # the internal '_uri'.\n for name in super(ChallengeBody, self).__iter__():\n yield name[1:] if name == '_uri' else name\n\n def _internal_name(self, name):\n return '_' + name if name == 'uri' else name\n\n\nclass ChallengeResource(Resource):\n \"\"\"Challenge Resource.\n\n :ivar acme.messages.ChallengeBody body:\n :ivar unicode authzr_uri: URI found in the 'up' ``Link`` header.\n\n \"\"\"\n body = jose.Field('body', decoder=ChallengeBody.from_json)\n authzr_uri = jose.Field('authzr_uri')\n\n @property\n def uri(self):\n \"\"\"The URL of the challenge body.\"\"\"\n # pylint: disable=function-redefined,no-member\n return self.body.uri\n\n\nclass Authorization(ResourceBody):\n \"\"\"Authorization Resource Body.\n\n :ivar acme.messages.Identifier identifier:\n :ivar list challenges: `list` of `.ChallengeBody`\n :ivar tuple combinations: Challenge combinations (`tuple` of `tuple`\n of `int`, as opposed to `list` of `list` from the spec).\n :ivar acme.messages.Status status:\n :ivar datetime.datetime expires:\n\n \"\"\"\n identifier = jose.Field('identifier', decoder=Identifier.from_json)\n challenges = jose.Field('challenges', omitempty=True)\n combinations = jose.Field('combinations', omitempty=True)\n\n status = jose.Field('status', omitempty=True, decoder=Status.from_json)\n # TODO: 'expires' is allowed for Authorization Resources in\n # general, but for Key Authorization '[t]he \"expires\" field MUST\n # be absent'... then acme-spec gives example with 'expires'\n # present... That's confusing!\n expires = fields.RFC3339Field('expires', omitempty=True)\n wildcard = jose.Field('wildcard', omitempty=True)\n\n @challenges.decoder\n def challenges(value): # pylint: disable=missing-docstring,no-self-argument\n return tuple(ChallengeBody.from_json(chall) for chall in value)\n\n @property\n def resolved_combinations(self):\n \"\"\"Combinations with challenges instead of indices.\"\"\"\n return tuple(tuple(self.challenges[idx] for idx in combo)\n for combo in self.combinations)\n\n\[email protected]\nclass NewAuthorization(Authorization):\n \"\"\"New authorization.\"\"\"\n resource_type = 'new-authz'\n resource = fields.Resource(resource_type)\n\n\nclass AuthorizationResource(ResourceWithURI):\n \"\"\"Authorization Resource.\n\n :ivar acme.messages.Authorization body:\n :ivar unicode new_cert_uri: Deprecated. Do not use.\n\n \"\"\"\n body = jose.Field('body', decoder=Authorization.from_json)\n new_cert_uri = jose.Field('new_cert_uri', omitempty=True)\n\n\[email protected]\nclass CertificateRequest(jose.JSONObjectWithFields):\n \"\"\"ACME new-cert request.\n\n :ivar josepy.util.ComparableX509 csr:\n `OpenSSL.crypto.X509Req` wrapped in `.ComparableX509`\n\n \"\"\"\n resource_type = 'new-cert'\n resource = fields.Resource(resource_type)\n csr = jose.Field('csr', decoder=jose.decode_csr, encoder=jose.encode_csr)\n\n\nclass CertificateResource(ResourceWithURI):\n \"\"\"Certificate Resource.\n\n :ivar josepy.util.ComparableX509 body:\n `OpenSSL.crypto.X509` wrapped in `.ComparableX509`\n :ivar unicode cert_chain_uri: URI found in the 'up' ``Link`` header\n :ivar tuple authzrs: `tuple` of `AuthorizationResource`.\n\n \"\"\"\n cert_chain_uri = jose.Field('cert_chain_uri')\n authzrs = jose.Field('authzrs')\n\n\[email protected]\nclass Revocation(jose.JSONObjectWithFields):\n \"\"\"Revocation message.\n\n :ivar .ComparableX509 certificate: `OpenSSL.crypto.X509` wrapped in\n `.ComparableX509`\n\n \"\"\"\n resource_type = 'revoke-cert'\n resource = fields.Resource(resource_type)\n certificate = jose.Field(\n 'certificate', decoder=jose.decode_cert, encoder=jose.encode_cert)\n reason = jose.Field('reason')\n\n\nclass Order(ResourceBody):\n \"\"\"Order Resource Body.\n\n :ivar list of .Identifier: List of identifiers for the certificate.\n :ivar acme.messages.Status status:\n :ivar list of str authorizations: URLs of authorizations.\n :ivar str certificate: URL to download certificate as a fullchain PEM.\n :ivar str finalize: URL to POST to to request issuance once all\n authorizations have \"valid\" status.\n :ivar datetime.datetime expires: When the order expires.\n :ivar .Error error: Any error that occurred during finalization, if applicable.\n \"\"\"\n identifiers = jose.Field('identifiers', omitempty=True)\n status = jose.Field('status', decoder=Status.from_json,\n omitempty=True, default=STATUS_PENDING)\n authorizations = jose.Field('authorizations', omitempty=True)\n certificate = jose.Field('certificate', omitempty=True)\n finalize = jose.Field('finalize', omitempty=True)\n expires = fields.RFC3339Field('expires', omitempty=True)\n error = jose.Field('error', omitempty=True, decoder=Error.from_json)\n\n @identifiers.decoder\n def identifiers(value): # pylint: disable=missing-docstring,no-self-argument\n return tuple(Identifier.from_json(identifier) for identifier in value)\n\nclass OrderResource(ResourceWithURI):\n \"\"\"Order Resource.\n\n :ivar acme.messages.Order body:\n :ivar str csr_pem: The CSR this Order will be finalized with.\n :ivar list of acme.messages.AuthorizationResource authorizations:\n Fully-fetched AuthorizationResource objects.\n :ivar str fullchain_pem: The fetched contents of the certificate URL\n produced once the order was finalized, if it's present.\n \"\"\"\n body = jose.Field('body', decoder=Order.from_json)\n csr_pem = jose.Field('csr_pem', omitempty=True)\n authorizations = jose.Field('authorizations')\n fullchain_pem = jose.Field('fullchain_pem', omitempty=True)\n\[email protected]\nclass NewOrder(Order):\n \"\"\"New order.\"\"\"\n resource_type = 'new-order'\n resource = fields.Resource(resource_type)\n", "path": "acme/acme/messages.py" } ]
[ { "content": "\"\"\"ACME protocol messages.\"\"\"\nimport collections\nimport six\n\nimport josepy as jose\n\nfrom acme import challenges\nfrom acme import errors\nfrom acme import fields\nfrom acme import util\n\nOLD_ERROR_PREFIX = \"urn:acme:error:\"\nERROR_PREFIX = \"urn:ietf:params:acme:error:\"\n\nERROR_CODES = {\n 'badCSR': 'The CSR is unacceptable (e.g., due to a short key)',\n 'badNonce': 'The client sent an unacceptable anti-replay nonce',\n 'connection': ('The server could not connect to the client to verify the'\n ' domain'),\n 'dnssec': 'The server could not validate a DNSSEC signed domain',\n # deprecate invalidEmail\n 'invalidEmail': 'The provided email for a registration was invalid',\n 'invalidContact': 'The provided contact URI was invalid',\n 'malformed': 'The request message was malformed',\n 'rateLimited': 'There were too many requests of a given type',\n 'serverInternal': 'The server experienced an internal error',\n 'tls': 'The server experienced a TLS error during domain verification',\n 'unauthorized': 'The client lacks sufficient authorization',\n 'unknownHost': 'The server could not resolve a domain name',\n}\n\nERROR_TYPE_DESCRIPTIONS = dict(\n (ERROR_PREFIX + name, desc) for name, desc in ERROR_CODES.items())\n\nERROR_TYPE_DESCRIPTIONS.update(dict( # add errors with old prefix, deprecate me\n (OLD_ERROR_PREFIX + name, desc) for name, desc in ERROR_CODES.items()))\n\n\ndef is_acme_error(err):\n \"\"\"Check if argument is an ACME error.\"\"\"\n if isinstance(err, Error) and (err.typ is not None):\n return (ERROR_PREFIX in err.typ) or (OLD_ERROR_PREFIX in err.typ)\n else:\n return False\n\n\[email protected]_2_unicode_compatible\nclass Error(jose.JSONObjectWithFields, errors.Error):\n \"\"\"ACME error.\n\n https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00\n\n :ivar unicode typ:\n :ivar unicode title:\n :ivar unicode detail:\n\n \"\"\"\n typ = jose.Field('type', omitempty=True, default='about:blank')\n title = jose.Field('title', omitempty=True)\n detail = jose.Field('detail', omitempty=True)\n\n @classmethod\n def with_code(cls, code, **kwargs):\n \"\"\"Create an Error instance with an ACME Error code.\n\n :unicode code: An ACME error code, like 'dnssec'.\n :kwargs: kwargs to pass to Error.\n\n \"\"\"\n if code not in ERROR_CODES:\n raise ValueError(\"The supplied code: %s is not a known ACME error\"\n \" code\" % code)\n typ = ERROR_PREFIX + code\n return cls(typ=typ, **kwargs)\n\n @property\n def description(self):\n \"\"\"Hardcoded error description based on its type.\n\n :returns: Description if standard ACME error or ``None``.\n :rtype: unicode\n\n \"\"\"\n return ERROR_TYPE_DESCRIPTIONS.get(self.typ)\n\n @property\n def code(self):\n \"\"\"ACME error code.\n\n Basically self.typ without the ERROR_PREFIX.\n\n :returns: error code if standard ACME code or ``None``.\n :rtype: unicode\n\n \"\"\"\n code = str(self.typ).split(':')[-1]\n if code in ERROR_CODES:\n return code\n\n def __str__(self):\n return b' :: '.join(\n part.encode('ascii', 'backslashreplace') for part in\n (self.typ, self.description, self.detail, self.title)\n if part is not None).decode()\n\n\nclass _Constant(jose.JSONDeSerializable, collections.Hashable): # type: ignore\n \"\"\"ACME constant.\"\"\"\n __slots__ = ('name',)\n POSSIBLE_NAMES = NotImplemented\n\n def __init__(self, name):\n self.POSSIBLE_NAMES[name] = self\n self.name = name\n\n def to_partial_json(self):\n return self.name\n\n @classmethod\n def from_json(cls, value):\n if value not in cls.POSSIBLE_NAMES:\n raise jose.DeserializationError(\n '{0} not recognized'.format(cls.__name__))\n return cls.POSSIBLE_NAMES[value]\n\n def __repr__(self):\n return '{0}({1})'.format(self.__class__.__name__, self.name)\n\n def __eq__(self, other):\n return isinstance(other, type(self)) and other.name == self.name\n\n def __hash__(self):\n return hash((self.__class__, self.name))\n\n def __ne__(self, other):\n return not self == other\n\n\nclass Status(_Constant):\n \"\"\"ACME \"status\" field.\"\"\"\n POSSIBLE_NAMES = {} # type: dict\nSTATUS_UNKNOWN = Status('unknown')\nSTATUS_PENDING = Status('pending')\nSTATUS_PROCESSING = Status('processing')\nSTATUS_VALID = Status('valid')\nSTATUS_INVALID = Status('invalid')\nSTATUS_REVOKED = Status('revoked')\nSTATUS_READY = Status('ready')\n\n\nclass IdentifierType(_Constant):\n \"\"\"ACME identifier type.\"\"\"\n POSSIBLE_NAMES = {} # type: dict\nIDENTIFIER_FQDN = IdentifierType('dns') # IdentifierDNS in Boulder\n\n\nclass Identifier(jose.JSONObjectWithFields):\n \"\"\"ACME identifier.\n\n :ivar IdentifierType typ:\n :ivar unicode value:\n\n \"\"\"\n typ = jose.Field('type', decoder=IdentifierType.from_json)\n value = jose.Field('value')\n\n\nclass Directory(jose.JSONDeSerializable):\n \"\"\"Directory.\"\"\"\n\n _REGISTERED_TYPES = {} # type: dict\n\n class Meta(jose.JSONObjectWithFields):\n \"\"\"Directory Meta.\"\"\"\n _terms_of_service = jose.Field('terms-of-service', omitempty=True)\n _terms_of_service_v2 = jose.Field('termsOfService', omitempty=True)\n website = jose.Field('website', omitempty=True)\n caa_identities = jose.Field('caaIdentities', omitempty=True)\n\n def __init__(self, **kwargs):\n kwargs = dict((self._internal_name(k), v) for k, v in kwargs.items())\n # pylint: disable=star-args\n super(Directory.Meta, self).__init__(**kwargs)\n\n @property\n def terms_of_service(self):\n \"\"\"URL for the CA TOS\"\"\"\n return self._terms_of_service or self._terms_of_service_v2\n\n def __iter__(self):\n # When iterating over fields, use the external name 'terms_of_service' instead of\n # the internal '_terms_of_service'.\n for name in super(Directory.Meta, self).__iter__():\n yield name[1:] if name == '_terms_of_service' else name\n\n def _internal_name(self, name):\n return '_' + name if name == 'terms_of_service' else name\n\n\n @classmethod\n def _canon_key(cls, key):\n return getattr(key, 'resource_type', key)\n\n @classmethod\n def register(cls, resource_body_cls):\n \"\"\"Register resource.\"\"\"\n resource_type = resource_body_cls.resource_type\n assert resource_type not in cls._REGISTERED_TYPES\n cls._REGISTERED_TYPES[resource_type] = resource_body_cls\n return resource_body_cls\n\n def __init__(self, jobj):\n canon_jobj = util.map_keys(jobj, self._canon_key)\n # TODO: check that everything is an absolute URL; acme-spec is\n # not clear on that\n self._jobj = canon_jobj\n\n def __getattr__(self, name):\n try:\n return self[name.replace('_', '-')]\n except KeyError as error:\n raise AttributeError(str(error) + ': ' + name)\n\n def __getitem__(self, name):\n try:\n return self._jobj[self._canon_key(name)]\n except KeyError:\n raise KeyError('Directory field not found')\n\n def to_partial_json(self):\n return self._jobj\n\n @classmethod\n def from_json(cls, jobj):\n jobj['meta'] = cls.Meta.from_json(jobj.pop('meta', {}))\n return cls(jobj)\n\n\nclass Resource(jose.JSONObjectWithFields):\n \"\"\"ACME Resource.\n\n :ivar acme.messages.ResourceBody body: Resource body.\n\n \"\"\"\n body = jose.Field('body')\n\n\nclass ResourceWithURI(Resource):\n \"\"\"ACME Resource with URI.\n\n :ivar unicode uri: Location of the resource.\n\n \"\"\"\n uri = jose.Field('uri') # no ChallengeResource.uri\n\n\nclass ResourceBody(jose.JSONObjectWithFields):\n \"\"\"ACME Resource Body.\"\"\"\n\n\nclass Registration(ResourceBody):\n \"\"\"Registration Resource Body.\n\n :ivar josepy.jwk.JWK key: Public key.\n :ivar tuple contact: Contact information following ACME spec,\n `tuple` of `unicode`.\n :ivar unicode agreement:\n\n \"\"\"\n # on new-reg key server ignores 'key' and populates it based on\n # JWS.signature.combined.jwk\n key = jose.Field('key', omitempty=True, decoder=jose.JWK.from_json)\n contact = jose.Field('contact', omitempty=True, default=())\n agreement = jose.Field('agreement', omitempty=True)\n status = jose.Field('status', omitempty=True)\n terms_of_service_agreed = jose.Field('termsOfServiceAgreed', omitempty=True)\n\n phone_prefix = 'tel:'\n email_prefix = 'mailto:'\n\n @classmethod\n def from_data(cls, phone=None, email=None, **kwargs):\n \"\"\"Create registration resource from contact details.\"\"\"\n details = list(kwargs.pop('contact', ()))\n if phone is not None:\n details.append(cls.phone_prefix + phone)\n if email is not None:\n details.append(cls.email_prefix + email)\n kwargs['contact'] = tuple(details)\n return cls(**kwargs)\n\n def _filter_contact(self, prefix):\n return tuple(\n detail[len(prefix):] for detail in self.contact\n if detail.startswith(prefix))\n\n @property\n def phones(self):\n \"\"\"All phones found in the ``contact`` field.\"\"\"\n return self._filter_contact(self.phone_prefix)\n\n @property\n def emails(self):\n \"\"\"All emails found in the ``contact`` field.\"\"\"\n return self._filter_contact(self.email_prefix)\n\n\[email protected]\nclass NewRegistration(Registration):\n \"\"\"New registration.\"\"\"\n resource_type = 'new-reg'\n resource = fields.Resource(resource_type)\n\n\nclass UpdateRegistration(Registration):\n \"\"\"Update registration.\"\"\"\n resource_type = 'reg'\n resource = fields.Resource(resource_type)\n\n\nclass RegistrationResource(ResourceWithURI):\n \"\"\"Registration Resource.\n\n :ivar acme.messages.Registration body:\n :ivar unicode new_authzr_uri: Deprecated. Do not use.\n :ivar unicode terms_of_service: URL for the CA TOS.\n\n \"\"\"\n body = jose.Field('body', decoder=Registration.from_json)\n new_authzr_uri = jose.Field('new_authzr_uri', omitempty=True)\n terms_of_service = jose.Field('terms_of_service', omitempty=True)\n\n\nclass ChallengeBody(ResourceBody):\n \"\"\"Challenge Resource Body.\n\n .. todo::\n Confusingly, this has a similar name to `.challenges.Challenge`,\n as well as `.achallenges.AnnotatedChallenge`. Please use names\n such as ``challb`` to distinguish instances of this class from\n ``achall``.\n\n :ivar acme.challenges.Challenge: Wrapped challenge.\n Conveniently, all challenge fields are proxied, i.e. you can\n call ``challb.x`` to get ``challb.chall.x`` contents.\n :ivar acme.messages.Status status:\n :ivar datetime.datetime validated:\n :ivar messages.Error error:\n\n \"\"\"\n __slots__ = ('chall',)\n # ACMEv1 has a \"uri\" field in challenges. ACMEv2 has a \"url\" field. This\n # challenge object supports either one, but should be accessed through the\n # name \"uri\". In Client.answer_challenge, whichever one is set will be\n # used.\n _uri = jose.Field('uri', omitempty=True, default=None)\n _url = jose.Field('url', omitempty=True, default=None)\n status = jose.Field('status', decoder=Status.from_json,\n omitempty=True, default=STATUS_PENDING)\n validated = fields.RFC3339Field('validated', omitempty=True)\n error = jose.Field('error', decoder=Error.from_json,\n omitempty=True, default=None)\n\n def __init__(self, **kwargs):\n kwargs = dict((self._internal_name(k), v) for k, v in kwargs.items())\n # pylint: disable=star-args\n super(ChallengeBody, self).__init__(**kwargs)\n\n def encode(self, name):\n return super(ChallengeBody, self).encode(self._internal_name(name))\n\n def to_partial_json(self):\n jobj = super(ChallengeBody, self).to_partial_json()\n jobj.update(self.chall.to_partial_json())\n return jobj\n\n @classmethod\n def fields_from_json(cls, jobj):\n jobj_fields = super(ChallengeBody, cls).fields_from_json(jobj)\n jobj_fields['chall'] = challenges.Challenge.from_json(jobj)\n return jobj_fields\n\n @property\n def uri(self):\n \"\"\"The URL of this challenge.\"\"\"\n return self._url or self._uri\n\n def __getattr__(self, name):\n return getattr(self.chall, name)\n\n def __iter__(self):\n # When iterating over fields, use the external name 'uri' instead of\n # the internal '_uri'.\n for name in super(ChallengeBody, self).__iter__():\n yield name[1:] if name == '_uri' else name\n\n def _internal_name(self, name):\n return '_' + name if name == 'uri' else name\n\n\nclass ChallengeResource(Resource):\n \"\"\"Challenge Resource.\n\n :ivar acme.messages.ChallengeBody body:\n :ivar unicode authzr_uri: URI found in the 'up' ``Link`` header.\n\n \"\"\"\n body = jose.Field('body', decoder=ChallengeBody.from_json)\n authzr_uri = jose.Field('authzr_uri')\n\n @property\n def uri(self):\n \"\"\"The URL of the challenge body.\"\"\"\n # pylint: disable=function-redefined,no-member\n return self.body.uri\n\n\nclass Authorization(ResourceBody):\n \"\"\"Authorization Resource Body.\n\n :ivar acme.messages.Identifier identifier:\n :ivar list challenges: `list` of `.ChallengeBody`\n :ivar tuple combinations: Challenge combinations (`tuple` of `tuple`\n of `int`, as opposed to `list` of `list` from the spec).\n :ivar acme.messages.Status status:\n :ivar datetime.datetime expires:\n\n \"\"\"\n identifier = jose.Field('identifier', decoder=Identifier.from_json)\n challenges = jose.Field('challenges', omitempty=True)\n combinations = jose.Field('combinations', omitempty=True)\n\n status = jose.Field('status', omitempty=True, decoder=Status.from_json)\n # TODO: 'expires' is allowed for Authorization Resources in\n # general, but for Key Authorization '[t]he \"expires\" field MUST\n # be absent'... then acme-spec gives example with 'expires'\n # present... That's confusing!\n expires = fields.RFC3339Field('expires', omitempty=True)\n wildcard = jose.Field('wildcard', omitempty=True)\n\n @challenges.decoder\n def challenges(value): # pylint: disable=missing-docstring,no-self-argument\n return tuple(ChallengeBody.from_json(chall) for chall in value)\n\n @property\n def resolved_combinations(self):\n \"\"\"Combinations with challenges instead of indices.\"\"\"\n return tuple(tuple(self.challenges[idx] for idx in combo)\n for combo in self.combinations)\n\n\[email protected]\nclass NewAuthorization(Authorization):\n \"\"\"New authorization.\"\"\"\n resource_type = 'new-authz'\n resource = fields.Resource(resource_type)\n\n\nclass AuthorizationResource(ResourceWithURI):\n \"\"\"Authorization Resource.\n\n :ivar acme.messages.Authorization body:\n :ivar unicode new_cert_uri: Deprecated. Do not use.\n\n \"\"\"\n body = jose.Field('body', decoder=Authorization.from_json)\n new_cert_uri = jose.Field('new_cert_uri', omitempty=True)\n\n\[email protected]\nclass CertificateRequest(jose.JSONObjectWithFields):\n \"\"\"ACME new-cert request.\n\n :ivar josepy.util.ComparableX509 csr:\n `OpenSSL.crypto.X509Req` wrapped in `.ComparableX509`\n\n \"\"\"\n resource_type = 'new-cert'\n resource = fields.Resource(resource_type)\n csr = jose.Field('csr', decoder=jose.decode_csr, encoder=jose.encode_csr)\n\n\nclass CertificateResource(ResourceWithURI):\n \"\"\"Certificate Resource.\n\n :ivar josepy.util.ComparableX509 body:\n `OpenSSL.crypto.X509` wrapped in `.ComparableX509`\n :ivar unicode cert_chain_uri: URI found in the 'up' ``Link`` header\n :ivar tuple authzrs: `tuple` of `AuthorizationResource`.\n\n \"\"\"\n cert_chain_uri = jose.Field('cert_chain_uri')\n authzrs = jose.Field('authzrs')\n\n\[email protected]\nclass Revocation(jose.JSONObjectWithFields):\n \"\"\"Revocation message.\n\n :ivar .ComparableX509 certificate: `OpenSSL.crypto.X509` wrapped in\n `.ComparableX509`\n\n \"\"\"\n resource_type = 'revoke-cert'\n resource = fields.Resource(resource_type)\n certificate = jose.Field(\n 'certificate', decoder=jose.decode_cert, encoder=jose.encode_cert)\n reason = jose.Field('reason')\n\n\nclass Order(ResourceBody):\n \"\"\"Order Resource Body.\n\n :ivar list of .Identifier: List of identifiers for the certificate.\n :ivar acme.messages.Status status:\n :ivar list of str authorizations: URLs of authorizations.\n :ivar str certificate: URL to download certificate as a fullchain PEM.\n :ivar str finalize: URL to POST to to request issuance once all\n authorizations have \"valid\" status.\n :ivar datetime.datetime expires: When the order expires.\n :ivar .Error error: Any error that occurred during finalization, if applicable.\n \"\"\"\n identifiers = jose.Field('identifiers', omitempty=True)\n status = jose.Field('status', decoder=Status.from_json,\n omitempty=True, default=STATUS_PENDING)\n authorizations = jose.Field('authorizations', omitempty=True)\n certificate = jose.Field('certificate', omitempty=True)\n finalize = jose.Field('finalize', omitempty=True)\n expires = fields.RFC3339Field('expires', omitempty=True)\n error = jose.Field('error', omitempty=True, decoder=Error.from_json)\n\n @identifiers.decoder\n def identifiers(value): # pylint: disable=missing-docstring,no-self-argument\n return tuple(Identifier.from_json(identifier) for identifier in value)\n\nclass OrderResource(ResourceWithURI):\n \"\"\"Order Resource.\n\n :ivar acme.messages.Order body:\n :ivar str csr_pem: The CSR this Order will be finalized with.\n :ivar list of acme.messages.AuthorizationResource authorizations:\n Fully-fetched AuthorizationResource objects.\n :ivar str fullchain_pem: The fetched contents of the certificate URL\n produced once the order was finalized, if it's present.\n \"\"\"\n body = jose.Field('body', decoder=Order.from_json)\n csr_pem = jose.Field('csr_pem', omitempty=True)\n authorizations = jose.Field('authorizations')\n fullchain_pem = jose.Field('fullchain_pem', omitempty=True)\n\[email protected]\nclass NewOrder(Order):\n \"\"\"New order.\"\"\"\n resource_type = 'new-order'\n resource = fields.Resource(resource_type)\n", "path": "acme/acme/messages.py" } ]
diff --git a/acme/acme/messages.py b/acme/acme/messages.py index a69b3bbc4d0..03dbc325579 100644 --- a/acme/acme/messages.py +++ b/acme/acme/messages.py @@ -145,6 +145,7 @@ class Status(_Constant): STATUS_VALID = Status('valid') STATUS_INVALID = Status('invalid') STATUS_REVOKED = Status('revoked') +STATUS_READY = Status('ready') class IdentifierType(_Constant):
python-pillow__Pillow-6834
Endianness is inconsistent Image class methods: | Method | Parameter | Endianness | |---------------|----------------|-------------| | blend | alpha | native | | new | color | native | | frombytes | data | mode | | frombuffer | data | mode | | getcolors | | native | | getdata all | | mode | | getdata band | | native | | getextrema | | native | | getpalette | | mode | | getpixel | | native | | point | | unsupported | | putdata | data | native | | putpalette | data | mode | | putpixel | value | native | | remap_palette | source_palette | mode | | tobytes | | mode | Methods that use one of the above methods: `eval` calls `point` `fromarray` calls `frombuffer` `rotate` calls `transform` `transform` calls `new` Related: #2228
[ { "content": "#\n# The Python Imaging Library\n# Pillow fork\n#\n# Python implementation of the PixelAccess Object\n#\n# Copyright (c) 1997-2009 by Secret Labs AB. All rights reserved.\n# Copyright (c) 1995-2009 by Fredrik Lundh.\n# Copyright (c) 2013 Eric Soroos\n#\n# See the README file for information on usage and redistribution\n#\n\n# Notes:\n#\n# * Implements the pixel access object following Access.c\n# * Taking only the tuple form, which is used from python.\n# * Fill.c uses the integer form, but it's still going to use the old\n# Access.c implementation.\n#\n\nimport logging\nimport sys\n\ntry:\n from cffi import FFI\n\n defs = \"\"\"\n struct Pixel_RGBA {\n unsigned char r,g,b,a;\n };\n struct Pixel_I16 {\n unsigned char l,r;\n };\n \"\"\"\n ffi = FFI()\n ffi.cdef(defs)\nexcept ImportError as ex:\n # Allow error import for doc purposes, but error out when accessing\n # anything in core.\n from ._util import DeferredError\n\n FFI = ffi = DeferredError(ex)\n\nlogger = logging.getLogger(__name__)\n\n\nclass PyAccess:\n def __init__(self, img, readonly=False):\n vals = dict(img.im.unsafe_ptrs)\n self.readonly = readonly\n self.image8 = ffi.cast(\"unsigned char **\", vals[\"image8\"])\n self.image32 = ffi.cast(\"int **\", vals[\"image32\"])\n self.image = ffi.cast(\"unsigned char **\", vals[\"image\"])\n self.xsize, self.ysize = img.im.size\n self._img = img\n\n # Keep pointer to im object to prevent dereferencing.\n self._im = img.im\n if self._im.mode in (\"P\", \"PA\"):\n self._palette = img.palette\n\n # Debugging is polluting test traces, only useful here\n # when hacking on PyAccess\n # logger.debug(\"%s\", vals)\n self._post_init()\n\n def _post_init(self):\n pass\n\n def __setitem__(self, xy, color):\n \"\"\"\n Modifies the pixel at x,y. The color is given as a single\n numerical value for single band images, and a tuple for\n multi-band images\n\n :param xy: The pixel coordinate, given as (x, y). See\n :ref:`coordinate-system`.\n :param color: The pixel value.\n \"\"\"\n if self.readonly:\n msg = \"Attempt to putpixel a read only image\"\n raise ValueError(msg)\n (x, y) = xy\n if x < 0:\n x = self.xsize + x\n if y < 0:\n y = self.ysize + y\n (x, y) = self.check_xy((x, y))\n\n if (\n self._im.mode in (\"P\", \"PA\")\n and isinstance(color, (list, tuple))\n and len(color) in [3, 4]\n ):\n # RGB or RGBA value for a P or PA image\n if self._im.mode == \"PA\":\n alpha = color[3] if len(color) == 4 else 255\n color = color[:3]\n color = self._palette.getcolor(color, self._img)\n if self._im.mode == \"PA\":\n color = (color, alpha)\n\n return self.set_pixel(x, y, color)\n\n def __getitem__(self, xy):\n \"\"\"\n Returns the pixel at x,y. The pixel is returned as a single\n value for single band images or a tuple for multiple band\n images\n\n :param xy: The pixel coordinate, given as (x, y). See\n :ref:`coordinate-system`.\n :returns: a pixel value for single band images, a tuple of\n pixel values for multiband images.\n \"\"\"\n (x, y) = xy\n if x < 0:\n x = self.xsize + x\n if y < 0:\n y = self.ysize + y\n (x, y) = self.check_xy((x, y))\n return self.get_pixel(x, y)\n\n putpixel = __setitem__\n getpixel = __getitem__\n\n def check_xy(self, xy):\n (x, y) = xy\n if not (0 <= x < self.xsize and 0 <= y < self.ysize):\n msg = \"pixel location out of range\"\n raise ValueError(msg)\n return xy\n\n\nclass _PyAccess32_2(PyAccess):\n \"\"\"PA, LA, stored in first and last bytes of a 32 bit word\"\"\"\n\n def _post_init(self, *args, **kwargs):\n self.pixels = ffi.cast(\"struct Pixel_RGBA **\", self.image32)\n\n def get_pixel(self, x, y):\n pixel = self.pixels[y][x]\n return pixel.r, pixel.a\n\n def set_pixel(self, x, y, color):\n pixel = self.pixels[y][x]\n # tuple\n pixel.r = min(color[0], 255)\n pixel.a = min(color[1], 255)\n\n\nclass _PyAccess32_3(PyAccess):\n \"\"\"RGB and friends, stored in the first three bytes of a 32 bit word\"\"\"\n\n def _post_init(self, *args, **kwargs):\n self.pixels = ffi.cast(\"struct Pixel_RGBA **\", self.image32)\n\n def get_pixel(self, x, y):\n pixel = self.pixels[y][x]\n return pixel.r, pixel.g, pixel.b\n\n def set_pixel(self, x, y, color):\n pixel = self.pixels[y][x]\n # tuple\n pixel.r = min(color[0], 255)\n pixel.g = min(color[1], 255)\n pixel.b = min(color[2], 255)\n pixel.a = 255\n\n\nclass _PyAccess32_4(PyAccess):\n \"\"\"RGBA etc, all 4 bytes of a 32 bit word\"\"\"\n\n def _post_init(self, *args, **kwargs):\n self.pixels = ffi.cast(\"struct Pixel_RGBA **\", self.image32)\n\n def get_pixel(self, x, y):\n pixel = self.pixels[y][x]\n return pixel.r, pixel.g, pixel.b, pixel.a\n\n def set_pixel(self, x, y, color):\n pixel = self.pixels[y][x]\n # tuple\n pixel.r = min(color[0], 255)\n pixel.g = min(color[1], 255)\n pixel.b = min(color[2], 255)\n pixel.a = min(color[3], 255)\n\n\nclass _PyAccess8(PyAccess):\n \"\"\"1, L, P, 8 bit images stored as uint8\"\"\"\n\n def _post_init(self, *args, **kwargs):\n self.pixels = self.image8\n\n def get_pixel(self, x, y):\n return self.pixels[y][x]\n\n def set_pixel(self, x, y, color):\n try:\n # integer\n self.pixels[y][x] = min(color, 255)\n except TypeError:\n # tuple\n self.pixels[y][x] = min(color[0], 255)\n\n\nclass _PyAccessI16_N(PyAccess):\n \"\"\"I;16 access, native bitendian without conversion\"\"\"\n\n def _post_init(self, *args, **kwargs):\n self.pixels = ffi.cast(\"unsigned short **\", self.image)\n\n def get_pixel(self, x, y):\n return self.pixels[y][x]\n\n def set_pixel(self, x, y, color):\n try:\n # integer\n self.pixels[y][x] = min(color, 65535)\n except TypeError:\n # tuple\n self.pixels[y][x] = min(color[0], 65535)\n\n\nclass _PyAccessI16_L(PyAccess):\n \"\"\"I;16L access, with conversion\"\"\"\n\n def _post_init(self, *args, **kwargs):\n self.pixels = ffi.cast(\"struct Pixel_I16 **\", self.image)\n\n def get_pixel(self, x, y):\n pixel = self.pixels[y][x]\n return pixel.l + pixel.r * 256\n\n def set_pixel(self, x, y, color):\n pixel = self.pixels[y][x]\n try:\n color = min(color, 65535)\n except TypeError:\n color = min(color[0], 65535)\n\n pixel.l = color & 0xFF # noqa: E741\n pixel.r = color >> 8\n\n\nclass _PyAccessI16_B(PyAccess):\n \"\"\"I;16B access, with conversion\"\"\"\n\n def _post_init(self, *args, **kwargs):\n self.pixels = ffi.cast(\"struct Pixel_I16 **\", self.image)\n\n def get_pixel(self, x, y):\n pixel = self.pixels[y][x]\n return pixel.l * 256 + pixel.r\n\n def set_pixel(self, x, y, color):\n pixel = self.pixels[y][x]\n try:\n color = min(color, 65535)\n except Exception:\n color = min(color[0], 65535)\n\n pixel.l = color >> 8 # noqa: E741\n pixel.r = color & 0xFF\n\n\nclass _PyAccessI32_N(PyAccess):\n \"\"\"Signed Int32 access, native endian\"\"\"\n\n def _post_init(self, *args, **kwargs):\n self.pixels = self.image32\n\n def get_pixel(self, x, y):\n return self.pixels[y][x]\n\n def set_pixel(self, x, y, color):\n self.pixels[y][x] = color\n\n\nclass _PyAccessI32_Swap(PyAccess):\n \"\"\"I;32L/B access, with byteswapping conversion\"\"\"\n\n def _post_init(self, *args, **kwargs):\n self.pixels = self.image32\n\n def reverse(self, i):\n orig = ffi.new(\"int *\", i)\n chars = ffi.cast(\"unsigned char *\", orig)\n chars[0], chars[1], chars[2], chars[3] = chars[3], chars[2], chars[1], chars[0]\n return ffi.cast(\"int *\", chars)[0]\n\n def get_pixel(self, x, y):\n return self.reverse(self.pixels[y][x])\n\n def set_pixel(self, x, y, color):\n self.pixels[y][x] = self.reverse(color)\n\n\nclass _PyAccessF(PyAccess):\n \"\"\"32 bit float access\"\"\"\n\n def _post_init(self, *args, **kwargs):\n self.pixels = ffi.cast(\"float **\", self.image32)\n\n def get_pixel(self, x, y):\n return self.pixels[y][x]\n\n def set_pixel(self, x, y, color):\n try:\n # not a tuple\n self.pixels[y][x] = color\n except TypeError:\n # tuple\n self.pixels[y][x] = color[0]\n\n\nmode_map = {\n \"1\": _PyAccess8,\n \"L\": _PyAccess8,\n \"P\": _PyAccess8,\n \"LA\": _PyAccess32_2,\n \"La\": _PyAccess32_2,\n \"PA\": _PyAccess32_2,\n \"RGB\": _PyAccess32_3,\n \"LAB\": _PyAccess32_3,\n \"HSV\": _PyAccess32_3,\n \"YCbCr\": _PyAccess32_3,\n \"RGBA\": _PyAccess32_4,\n \"RGBa\": _PyAccess32_4,\n \"RGBX\": _PyAccess32_4,\n \"CMYK\": _PyAccess32_4,\n \"F\": _PyAccessF,\n \"I\": _PyAccessI32_N,\n}\n\nif sys.byteorder == \"little\":\n mode_map[\"I;16\"] = _PyAccessI16_N\n mode_map[\"I;16L\"] = _PyAccessI16_N\n mode_map[\"I;16B\"] = _PyAccessI16_B\n\n mode_map[\"I;32L\"] = _PyAccessI32_N\n mode_map[\"I;32B\"] = _PyAccessI32_Swap\nelse:\n mode_map[\"I;16\"] = _PyAccessI16_L\n mode_map[\"I;16L\"] = _PyAccessI16_L\n mode_map[\"I;16B\"] = _PyAccessI16_N\n\n mode_map[\"I;32L\"] = _PyAccessI32_Swap\n mode_map[\"I;32B\"] = _PyAccessI32_N\n\n\ndef new(img, readonly=False):\n access_type = mode_map.get(img.mode, None)\n if not access_type:\n logger.debug(\"PyAccess Not Implemented: %s\", img.mode)\n return None\n return access_type(img, readonly)\n", "path": "src/PIL/PyAccess.py" } ]
[ { "content": "#\n# The Python Imaging Library\n# Pillow fork\n#\n# Python implementation of the PixelAccess Object\n#\n# Copyright (c) 1997-2009 by Secret Labs AB. All rights reserved.\n# Copyright (c) 1995-2009 by Fredrik Lundh.\n# Copyright (c) 2013 Eric Soroos\n#\n# See the README file for information on usage and redistribution\n#\n\n# Notes:\n#\n# * Implements the pixel access object following Access.c\n# * Taking only the tuple form, which is used from python.\n# * Fill.c uses the integer form, but it's still going to use the old\n# Access.c implementation.\n#\n\nimport logging\nimport sys\n\ntry:\n from cffi import FFI\n\n defs = \"\"\"\n struct Pixel_RGBA {\n unsigned char r,g,b,a;\n };\n struct Pixel_I16 {\n unsigned char l,r;\n };\n \"\"\"\n ffi = FFI()\n ffi.cdef(defs)\nexcept ImportError as ex:\n # Allow error import for doc purposes, but error out when accessing\n # anything in core.\n from ._util import DeferredError\n\n FFI = ffi = DeferredError(ex)\n\nlogger = logging.getLogger(__name__)\n\n\nclass PyAccess:\n def __init__(self, img, readonly=False):\n vals = dict(img.im.unsafe_ptrs)\n self.readonly = readonly\n self.image8 = ffi.cast(\"unsigned char **\", vals[\"image8\"])\n self.image32 = ffi.cast(\"int **\", vals[\"image32\"])\n self.image = ffi.cast(\"unsigned char **\", vals[\"image\"])\n self.xsize, self.ysize = img.im.size\n self._img = img\n\n # Keep pointer to im object to prevent dereferencing.\n self._im = img.im\n if self._im.mode in (\"P\", \"PA\"):\n self._palette = img.palette\n\n # Debugging is polluting test traces, only useful here\n # when hacking on PyAccess\n # logger.debug(\"%s\", vals)\n self._post_init()\n\n def _post_init(self):\n pass\n\n def __setitem__(self, xy, color):\n \"\"\"\n Modifies the pixel at x,y. The color is given as a single\n numerical value for single band images, and a tuple for\n multi-band images\n\n :param xy: The pixel coordinate, given as (x, y). See\n :ref:`coordinate-system`.\n :param color: The pixel value.\n \"\"\"\n if self.readonly:\n msg = \"Attempt to putpixel a read only image\"\n raise ValueError(msg)\n (x, y) = xy\n if x < 0:\n x = self.xsize + x\n if y < 0:\n y = self.ysize + y\n (x, y) = self.check_xy((x, y))\n\n if (\n self._im.mode in (\"P\", \"PA\")\n and isinstance(color, (list, tuple))\n and len(color) in [3, 4]\n ):\n # RGB or RGBA value for a P or PA image\n if self._im.mode == \"PA\":\n alpha = color[3] if len(color) == 4 else 255\n color = color[:3]\n color = self._palette.getcolor(color, self._img)\n if self._im.mode == \"PA\":\n color = (color, alpha)\n\n return self.set_pixel(x, y, color)\n\n def __getitem__(self, xy):\n \"\"\"\n Returns the pixel at x,y. The pixel is returned as a single\n value for single band images or a tuple for multiple band\n images\n\n :param xy: The pixel coordinate, given as (x, y). See\n :ref:`coordinate-system`.\n :returns: a pixel value for single band images, a tuple of\n pixel values for multiband images.\n \"\"\"\n (x, y) = xy\n if x < 0:\n x = self.xsize + x\n if y < 0:\n y = self.ysize + y\n (x, y) = self.check_xy((x, y))\n return self.get_pixel(x, y)\n\n putpixel = __setitem__\n getpixel = __getitem__\n\n def check_xy(self, xy):\n (x, y) = xy\n if not (0 <= x < self.xsize and 0 <= y < self.ysize):\n msg = \"pixel location out of range\"\n raise ValueError(msg)\n return xy\n\n\nclass _PyAccess32_2(PyAccess):\n \"\"\"PA, LA, stored in first and last bytes of a 32 bit word\"\"\"\n\n def _post_init(self, *args, **kwargs):\n self.pixels = ffi.cast(\"struct Pixel_RGBA **\", self.image32)\n\n def get_pixel(self, x, y):\n pixel = self.pixels[y][x]\n return pixel.r, pixel.a\n\n def set_pixel(self, x, y, color):\n pixel = self.pixels[y][x]\n # tuple\n pixel.r = min(color[0], 255)\n pixel.a = min(color[1], 255)\n\n\nclass _PyAccess32_3(PyAccess):\n \"\"\"RGB and friends, stored in the first three bytes of a 32 bit word\"\"\"\n\n def _post_init(self, *args, **kwargs):\n self.pixels = ffi.cast(\"struct Pixel_RGBA **\", self.image32)\n\n def get_pixel(self, x, y):\n pixel = self.pixels[y][x]\n return pixel.r, pixel.g, pixel.b\n\n def set_pixel(self, x, y, color):\n pixel = self.pixels[y][x]\n # tuple\n pixel.r = min(color[0], 255)\n pixel.g = min(color[1], 255)\n pixel.b = min(color[2], 255)\n pixel.a = 255\n\n\nclass _PyAccess32_4(PyAccess):\n \"\"\"RGBA etc, all 4 bytes of a 32 bit word\"\"\"\n\n def _post_init(self, *args, **kwargs):\n self.pixels = ffi.cast(\"struct Pixel_RGBA **\", self.image32)\n\n def get_pixel(self, x, y):\n pixel = self.pixels[y][x]\n return pixel.r, pixel.g, pixel.b, pixel.a\n\n def set_pixel(self, x, y, color):\n pixel = self.pixels[y][x]\n # tuple\n pixel.r = min(color[0], 255)\n pixel.g = min(color[1], 255)\n pixel.b = min(color[2], 255)\n pixel.a = min(color[3], 255)\n\n\nclass _PyAccess8(PyAccess):\n \"\"\"1, L, P, 8 bit images stored as uint8\"\"\"\n\n def _post_init(self, *args, **kwargs):\n self.pixels = self.image8\n\n def get_pixel(self, x, y):\n return self.pixels[y][x]\n\n def set_pixel(self, x, y, color):\n try:\n # integer\n self.pixels[y][x] = min(color, 255)\n except TypeError:\n # tuple\n self.pixels[y][x] = min(color[0], 255)\n\n\nclass _PyAccessI16_N(PyAccess):\n \"\"\"I;16 access, native bitendian without conversion\"\"\"\n\n def _post_init(self, *args, **kwargs):\n self.pixels = ffi.cast(\"unsigned short **\", self.image)\n\n def get_pixel(self, x, y):\n return self.pixels[y][x]\n\n def set_pixel(self, x, y, color):\n try:\n # integer\n self.pixels[y][x] = min(color, 65535)\n except TypeError:\n # tuple\n self.pixels[y][x] = min(color[0], 65535)\n\n\nclass _PyAccessI16_L(PyAccess):\n \"\"\"I;16L access, with conversion\"\"\"\n\n def _post_init(self, *args, **kwargs):\n self.pixels = ffi.cast(\"struct Pixel_I16 **\", self.image)\n\n def get_pixel(self, x, y):\n pixel = self.pixels[y][x]\n return pixel.l + pixel.r * 256\n\n def set_pixel(self, x, y, color):\n pixel = self.pixels[y][x]\n try:\n color = min(color, 65535)\n except TypeError:\n color = min(color[0], 65535)\n\n pixel.l = color & 0xFF # noqa: E741\n pixel.r = color >> 8\n\n\nclass _PyAccessI16_B(PyAccess):\n \"\"\"I;16B access, with conversion\"\"\"\n\n def _post_init(self, *args, **kwargs):\n self.pixels = ffi.cast(\"struct Pixel_I16 **\", self.image)\n\n def get_pixel(self, x, y):\n pixel = self.pixels[y][x]\n return pixel.l * 256 + pixel.r\n\n def set_pixel(self, x, y, color):\n pixel = self.pixels[y][x]\n try:\n color = min(color, 65535)\n except Exception:\n color = min(color[0], 65535)\n\n pixel.l = color >> 8 # noqa: E741\n pixel.r = color & 0xFF\n\n\nclass _PyAccessI32_N(PyAccess):\n \"\"\"Signed Int32 access, native endian\"\"\"\n\n def _post_init(self, *args, **kwargs):\n self.pixels = self.image32\n\n def get_pixel(self, x, y):\n return self.pixels[y][x]\n\n def set_pixel(self, x, y, color):\n self.pixels[y][x] = color\n\n\nclass _PyAccessI32_Swap(PyAccess):\n \"\"\"I;32L/B access, with byteswapping conversion\"\"\"\n\n def _post_init(self, *args, **kwargs):\n self.pixels = self.image32\n\n def reverse(self, i):\n orig = ffi.new(\"int *\", i)\n chars = ffi.cast(\"unsigned char *\", orig)\n chars[0], chars[1], chars[2], chars[3] = chars[3], chars[2], chars[1], chars[0]\n return ffi.cast(\"int *\", chars)[0]\n\n def get_pixel(self, x, y):\n return self.reverse(self.pixels[y][x])\n\n def set_pixel(self, x, y, color):\n self.pixels[y][x] = self.reverse(color)\n\n\nclass _PyAccessF(PyAccess):\n \"\"\"32 bit float access\"\"\"\n\n def _post_init(self, *args, **kwargs):\n self.pixels = ffi.cast(\"float **\", self.image32)\n\n def get_pixel(self, x, y):\n return self.pixels[y][x]\n\n def set_pixel(self, x, y, color):\n try:\n # not a tuple\n self.pixels[y][x] = color\n except TypeError:\n # tuple\n self.pixels[y][x] = color[0]\n\n\nmode_map = {\n \"1\": _PyAccess8,\n \"L\": _PyAccess8,\n \"P\": _PyAccess8,\n \"I;16N\": _PyAccessI16_N,\n \"LA\": _PyAccess32_2,\n \"La\": _PyAccess32_2,\n \"PA\": _PyAccess32_2,\n \"RGB\": _PyAccess32_3,\n \"LAB\": _PyAccess32_3,\n \"HSV\": _PyAccess32_3,\n \"YCbCr\": _PyAccess32_3,\n \"RGBA\": _PyAccess32_4,\n \"RGBa\": _PyAccess32_4,\n \"RGBX\": _PyAccess32_4,\n \"CMYK\": _PyAccess32_4,\n \"F\": _PyAccessF,\n \"I\": _PyAccessI32_N,\n}\n\nif sys.byteorder == \"little\":\n mode_map[\"I;16\"] = _PyAccessI16_N\n mode_map[\"I;16L\"] = _PyAccessI16_N\n mode_map[\"I;16B\"] = _PyAccessI16_B\n\n mode_map[\"I;32L\"] = _PyAccessI32_N\n mode_map[\"I;32B\"] = _PyAccessI32_Swap\nelse:\n mode_map[\"I;16\"] = _PyAccessI16_L\n mode_map[\"I;16L\"] = _PyAccessI16_L\n mode_map[\"I;16B\"] = _PyAccessI16_N\n\n mode_map[\"I;32L\"] = _PyAccessI32_Swap\n mode_map[\"I;32B\"] = _PyAccessI32_N\n\n\ndef new(img, readonly=False):\n access_type = mode_map.get(img.mode, None)\n if not access_type:\n logger.debug(\"PyAccess Not Implemented: %s\", img.mode)\n return None\n return access_type(img, readonly)\n", "path": "src/PIL/PyAccess.py" } ]
diff --git a/Tests/test_image_access.py b/Tests/test_image_access.py index 4079d935800..027af5d56fa 100644 --- a/Tests/test_image_access.py +++ b/Tests/test_image_access.py @@ -275,15 +275,10 @@ def test_get_vs_c(self): # self._test_get_access(hopper('PA')) # PA -- how do I make a PA image? self._test_get_access(hopper("F")) - im = Image.new("I;16", (10, 10), 40000) - self._test_get_access(im) - im = Image.new("I;16L", (10, 10), 40000) - self._test_get_access(im) - im = Image.new("I;16B", (10, 10), 40000) - self._test_get_access(im) - - im = Image.new("I", (10, 10), 40000) - self._test_get_access(im) + for mode in ("I;16", "I;16L", "I;16B", "I;16N", "I"): + im = Image.new(mode, (10, 10), 40000) + self._test_get_access(im) + # These don't actually appear to be modes that I can actually make, # as unpack sets them directly into the I mode. # im = Image.new('I;32L', (10, 10), -2**10) @@ -322,15 +317,10 @@ def test_set_vs_c(self): # self._test_set_access(i, (128, 128)) #PA -- undone how to make self._test_set_access(hopper("F"), 1024.0) - im = Image.new("I;16", (10, 10), 40000) - self._test_set_access(im, 45000) - im = Image.new("I;16L", (10, 10), 40000) - self._test_set_access(im, 45000) - im = Image.new("I;16B", (10, 10), 40000) - self._test_set_access(im, 45000) + for mode in ("I;16", "I;16L", "I;16B", "I;16N", "I"): + im = Image.new(mode, (10, 10), 40000) + self._test_set_access(im, 45000) - im = Image.new("I", (10, 10), 40000) - self._test_set_access(im, 45000) # im = Image.new('I;32L', (10, 10), -(2**10)) # self._test_set_access(im, -(2**13)+1) # im = Image.new('I;32B', (10, 10), 2**10) diff --git a/Tests/test_lib_pack.py b/Tests/test_lib_pack.py index 979806cae99..de3e7d1569b 100644 --- a/Tests/test_lib_pack.py +++ b/Tests/test_lib_pack.py @@ -207,6 +207,9 @@ def test_I(self): 0x01000083, ) + def test_I16(self): + self.assert_pack("I;16N", "I;16N", 2, 0x0201, 0x0403, 0x0605) + def test_F_float(self): self.assert_pack("F", "F;32F", 4, 1.539989614439558e-36, 4.063216068939723e-34) @@ -761,10 +764,12 @@ def test_I16(self): self.assert_unpack("I;16", "I;16N", 2, 0x0201, 0x0403, 0x0605) self.assert_unpack("I;16B", "I;16N", 2, 0x0201, 0x0403, 0x0605) self.assert_unpack("I;16L", "I;16N", 2, 0x0201, 0x0403, 0x0605) + self.assert_unpack("I;16N", "I;16N", 2, 0x0201, 0x0403, 0x0605) else: self.assert_unpack("I;16", "I;16N", 2, 0x0102, 0x0304, 0x0506) self.assert_unpack("I;16B", "I;16N", 2, 0x0102, 0x0304, 0x0506) self.assert_unpack("I;16L", "I;16N", 2, 0x0102, 0x0304, 0x0506) + self.assert_unpack("I;16N", "I;16N", 2, 0x0102, 0x0304, 0x0506) def test_CMYK16(self): self.assert_unpack("CMYK", "CMYK;16L", 8, (2, 4, 6, 8), (10, 12, 14, 16)) diff --git a/Tests/test_mode_i16.py b/Tests/test_mode_i16.py index dcdee3d416d..1786dba3847 100644 --- a/Tests/test_mode_i16.py +++ b/Tests/test_mode_i16.py @@ -88,10 +88,7 @@ def tobytes(mode): def test_convert(): im = original.copy() - verify(im.convert("I;16")) - verify(im.convert("I;16").convert("L")) - verify(im.convert("I;16").convert("I")) - - verify(im.convert("I;16B")) - verify(im.convert("I;16B").convert("L")) - verify(im.convert("I;16B").convert("I")) + for mode in ("I;16", "I;16B", "I;16N"): + verify(im.convert(mode)) + verify(im.convert(mode).convert("L")) + verify(im.convert(mode).convert("I")) diff --git a/docs/releasenotes/9.5.0.rst b/docs/releasenotes/9.5.0.rst index bd6e586932a..13c99313a22 100644 --- a/docs/releasenotes/9.5.0.rst +++ b/docs/releasenotes/9.5.0.rst @@ -63,3 +63,10 @@ Added support for saving PDFs in RGBA mode ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Using the JPXDecode filter, PDFs can now be saved in RGBA mode. + + +Improved I;16N support +^^^^^^^^^^^^^^^^^^^^^^ + +Support has been added for I;16N access, packing and unpacking. Conversion to +and from L mode has also been added. diff --git a/src/PIL/PyAccess.py b/src/PIL/PyAccess.py index e9cb34ceda1..39747b4f311 100644 --- a/src/PIL/PyAccess.py +++ b/src/PIL/PyAccess.py @@ -320,6 +320,7 @@ def set_pixel(self, x, y, color): "1": _PyAccess8, "L": _PyAccess8, "P": _PyAccess8, + "I;16N": _PyAccessI16_N, "LA": _PyAccess32_2, "La": _PyAccess32_2, "PA": _PyAccess32_2, diff --git a/src/libImaging/Access.c b/src/libImaging/Access.c index 83860c38a7e..f00939da0b3 100644 --- a/src/libImaging/Access.c +++ b/src/libImaging/Access.c @@ -13,7 +13,7 @@ /* use make_hash.py from the pillow-scripts repository to calculate these values */ #define ACCESS_TABLE_SIZE 27 -#define ACCESS_TABLE_HASH 3078 +#define ACCESS_TABLE_HASH 33051 static struct ImagingAccessInstance access_table[ACCESS_TABLE_SIZE]; @@ -92,6 +92,12 @@ get_pixel_16B(Imaging im, int x, int y, void *color) { #endif } +static void +get_pixel_16(Imaging im, int x, int y, void *color) { + UINT8 *in = (UINT8 *)&im->image[y][x + x]; + memcpy(color, in, sizeof(UINT16)); +} + static void get_pixel_32(Imaging im, int x, int y, void *color) { memcpy(color, &im->image32[y][x], sizeof(INT32)); @@ -186,6 +192,7 @@ ImagingAccessInit() { ADD("I;16", get_pixel_16L, put_pixel_16L); ADD("I;16L", get_pixel_16L, put_pixel_16L); ADD("I;16B", get_pixel_16B, put_pixel_16B); + ADD("I;16N", get_pixel_16, put_pixel_16L); ADD("I;32L", get_pixel_32L, put_pixel_32L); ADD("I;32B", get_pixel_32B, put_pixel_32B); ADD("F", get_pixel_32, put_pixel_32); diff --git a/src/libImaging/Convert.c b/src/libImaging/Convert.c index b03bd02af2b..7fe24a63939 100644 --- a/src/libImaging/Convert.c +++ b/src/libImaging/Convert.c @@ -990,6 +990,13 @@ static struct { {"I;16L", "L", I16L_L}, {"L", "I;16B", L_I16B}, {"I;16B", "L", I16B_L}, +#ifdef WORDS_BIGENDIAN + {"L", "I;16N", L_I16B}, + {"I;16N", "L", I16B_L}, +#else + {"L", "I;16N", L_I16L}, + {"I;16N", "L", I16L_L}, +#endif {"I;16", "F", I16L_F}, {"I;16L", "F", I16L_F}, diff --git a/src/libImaging/Pack.c b/src/libImaging/Pack.c index 01760e742be..14c8f1461aa 100644 --- a/src/libImaging/Pack.c +++ b/src/libImaging/Pack.c @@ -664,6 +664,7 @@ static struct { #endif {"I;16B", "I;16B", 16, copy2}, {"I;16L", "I;16L", 16, copy2}, + {"I;16N", "I;16N", 16, copy2}, {"I;16", "I;16N", 16, packI16N_I16}, // LibTiff native->image endian. {"I;16L", "I;16N", 16, packI16N_I16}, {"I;16B", "I;16N", 16, packI16N_I16B}, diff --git a/src/libImaging/Unpack.c b/src/libImaging/Unpack.c index e426ed74fce..7eeadf944ea 100644 --- a/src/libImaging/Unpack.c +++ b/src/libImaging/Unpack.c @@ -1762,6 +1762,7 @@ static struct { {"I;16", "I;16", 16, copy2}, {"I;16B", "I;16B", 16, copy2}, {"I;16L", "I;16L", 16, copy2}, + {"I;16N", "I;16N", 16, copy2}, {"I;16", "I;16N", 16, unpackI16N_I16}, // LibTiff native->image endian. {"I;16L", "I;16N", 16, unpackI16N_I16}, // LibTiff native->image endian.
craiga__will-of-the-prophets-26
Clean up login form
[ { "content": "\"\"\"\nDjango settings for will_of_the_prophets project.\n\nGenerated by 'django-admin startproject' using Django 2.0.4.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.0/ref/settings/\n\"\"\"\n\nimport os\n\nimport django_heroku\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\n 'SECRET_KEY', 'bah!osmx@cpaoulc-!eohvd3ldoy*^oki#l25-v6tvq04=6npz')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get('DEBUG', False)\n\nALLOWED_HOSTS = ['*.herokuapp.com', 'localhost']\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'raven.contrib.django.raven_compat',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'sass_processor',\n 'bootstrap',\n 'will_of_the_prophets',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'will_of_the_prophets.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'will_of_the_prophets.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.0/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.UserAttributeSimilarityValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.MinimumLengthValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.CommonPasswordValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.NumericPasswordValidator'),\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.0/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.0/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # https://github.com/jrief/django-sass-processor\n 'sass_processor.finders.CssFinder',\n]\n\n\n# django-sass-processor\n# https://github.com/jrief/django-sass-processor\nSASS_OUTPUT_STYLE = 'compressed'\n\n\n# Configure Django App for Heroku.\ndjango_heroku.settings(locals())\n", "path": "will_of_the_prophets/settings/__init__.py" } ]
[ { "content": "\"\"\"\nDjango settings for will_of_the_prophets project.\n\nGenerated by 'django-admin startproject' using Django 2.0.4.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.0/ref/settings/\n\"\"\"\n\nimport os\n\nimport django_heroku\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\n 'SECRET_KEY', 'bah!osmx@cpaoulc-!eohvd3ldoy*^oki#l25-v6tvq04=6npz')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get('DEBUG', False)\n\nALLOWED_HOSTS = ['*.herokuapp.com', 'localhost']\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'raven.contrib.django.raven_compat',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'sass_processor',\n 'widget_tweaks',\n 'bootstrap',\n 'will_of_the_prophets',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'will_of_the_prophets.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'will_of_the_prophets.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.0/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.UserAttributeSimilarityValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.MinimumLengthValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.CommonPasswordValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.NumericPasswordValidator'),\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.0/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.0/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # https://github.com/jrief/django-sass-processor\n 'sass_processor.finders.CssFinder',\n]\n\n\n# django-sass-processor\n# https://github.com/jrief/django-sass-processor\nSASS_OUTPUT_STYLE = 'compressed'\n\n\n# Configure Django App for Heroku.\ndjango_heroku.settings(locals())\n", "path": "will_of_the_prophets/settings/__init__.py" } ]
diff --git a/Pipfile b/Pipfile index 9480b879..06aa4b45 100644 --- a/Pipfile +++ b/Pipfile @@ -11,6 +11,7 @@ pillow = "*" "psycopg2-binary" = "*" gunicorn = "*" raven = "*" +django-widget-tweaks = "*" [dev-packages] pycodestyle = "*" diff --git a/Pipfile.lock b/Pipfile.lock index e478903a..c8554e78 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "8657ca5d7cc006bba82b0f66005a8da9a167bb5cbafa1f6e9eeab9b88dd15868" + "sha256": "5f583f1faeb177c9474ddbc3be05cf923e6b0c86fb780d6ade8989509c18061c" }, "pipfile-spec": 6, "requires": { @@ -46,6 +46,14 @@ "index": "pypi", "version": "==0.7" }, + "django-widget-tweaks": { + "hashes": [ + "sha256:a31c8a2b88af98dba6471db4722a416d1c643c87efecf9a7f17f983a2a553632", + "sha256:f9961162c8ed272162e22e5877d29c7780476970441dce605118ef66da685e71" + ], + "index": "pypi", + "version": "==1.4.2" + }, "gunicorn": { "hashes": [ "sha256:7ef2b828b335ed58e3b64ffa84caceb0a7dd7c5ca12f217241350dec36a1d5dc", diff --git a/will_of_the_prophets/settings/__init__.py b/will_of_the_prophets/settings/__init__.py index 7421ded6..4e000b87 100644 --- a/will_of_the_prophets/settings/__init__.py +++ b/will_of_the_prophets/settings/__init__.py @@ -42,6 +42,7 @@ 'django.contrib.messages', 'django.contrib.staticfiles', 'sass_processor', + 'widget_tweaks', 'bootstrap', 'will_of_the_prophets', ] diff --git a/will_of_the_prophets/templates/registration/login.html b/will_of_the_prophets/templates/registration/login.html index c8f2ff5c..235d8138 100644 --- a/will_of_the_prophets/templates/registration/login.html +++ b/will_of_the_prophets/templates/registration/login.html @@ -1,38 +1,58 @@ {% extends 'base.html' %} +{% load widget_tweaks %} {% block content %} - {% if form.errors %} - {% for field in form %} - {% for error in field.errors %} +<div class="row pt-3"> + <div class="col-12"> + <p class="h3 text-orange">The Greatest Generation Presents</p> + <h1 class="text-blue">Game of Buttholes:<br>The Will of the Prophets</h1> + + {% if form.errors %} + {% for field in form %} + {% for error in field.errors %} + <div class="alert alert-danger"> + <strong>{{ error|escape }}</strong> + </div> + {% endfor %} + {% endfor %} + {% for error in form.non_field_errors %} <div class="alert alert-danger"> <strong>{{ error|escape }}</strong> </div> {% endfor %} - {% endfor %} - {% for error in form.non_field_errors %} + {% endif %} + + {% if next and user.is_authenticated %} <div class="alert alert-danger"> - <strong>{{ error|escape }}</strong> + <strong>Your account doesn't have access to this page. To proceed, + please login with an account that has access.</strong> </div> - {% endfor %} - {% endif %} - - {% if next %} - {% if user.is_authenticated %} - <p>Your account doesn't have access to this page. To proceed, - please login with an account that has access.</p> - {% else %} - <p>Please login to see this page.</p> {% endif %} - {% endif %} - <form method="post" action="{% url 'login' %}"> - {% csrf_token %} - <div>{{ form.username.label_tag }} {{ form.username }}</div> - <div>{{ form.password.label_tag }} {{ form.password }}</div> - <input type="submit" value="login" /> - <input type="hidden" name="next" value="{{ next }}" /> - </form> + <form method="post" action="{% url 'login' %}"> + {% csrf_token %} + <input type="hidden" name="next" value="{{ next }}" /> + <div class="form-row"> + <div class="form-group col-sm-6"> + {{ form.username.label_tag }} + {{ form.username|add_class:'form-control'|attr:'placeholder:Username' }} + </div> + <div class="form-group col-sm-6"> + {{ form.password.label_tag }} + {{ form.password|add_class:'form-control'|attr:'placeholder:Username' }} + </div> + </div> + <div class="form-row"> + <div class="form-group col-12"> + <button type="submit" class="btn btn-primary">Log In</button> + </div> + </div> + </form> + + <p><a href="{% url 'password_reset' %}">Lost password?</a></p> + + </div> +</div> - <p><a href="{% url 'password_reset' %}">Lost password?</a></p> {% endblock %}
django-wiki__django-wiki-400
Django 1.7 migrations are incomplete, fail after makemigrations My project uses Django 1.7.5 and Python 3.4. I installed `django-wiki` from the current `master` branch, added the necessary settings, and ran `manage.py migrate`, and everything worked. But my project's model definitions are in constant flux right now, so I re-create my initial migrations fairly often. The most recent time I did this, it generated a second migration for `django-wiki`: ``` $ manage.py makemigrations Migrations for 'wiki': 0002_auto_20150308_0558.py: - Remove field article from articleplugin - Remove field articleplugin_ptr from reusableplugin - Remove field articles from reusableplugin - Delete model ReusablePlugin - Remove field articleplugin_ptr from revisionplugin - Remove field current_revision from revisionplugin - Remove field plugin from revisionpluginrevision - Delete model RevisionPlugin - Remove field previous_revision from revisionpluginrevision - Remove field user from revisionpluginrevision - Delete model RevisionPluginRevision - Remove field article_revision from simpleplugin - Remove field articleplugin_ptr from simpleplugin - Delete model ArticlePlugin - Delete model SimplePlugin ``` And this migration failed to run, with the following error: ``` Running migrations: Applying wiki.0002_auto_20150308_0558...Traceback (most recent call last): ... django.db.migrations.state.InvalidBasesError: Cannot resolve bases for [<ModelState: 'wiki.SimplePlugin'>] This can happen if you are inheriting models from an app with migrations (e.g. contrib.auth) in an app with no migrations; see https://docs.djangoproject.com/en/1.7/topics/migrations/#dependencies for more ``` Are those models really intended to be concrete? Or should they be abstract mixins instead?
[ { "content": "from __future__ import absolute_import\nfrom __future__ import unicode_literals\n# -*- coding: utf-8 -*-\n\nfrom django import VERSION\nfrom django.conf import settings as django_settings\nfrom django.core.exceptions import ImproperlyConfigured\nimport warnings\nfrom six import string_types\n\n# TODO: Don't use wildcards\nfrom .article import *\nfrom .urlpath import *\n\n# TODO: Should the below stuff be executed a more logical place?\n# Follow Django's default_settings.py / settings.py pattern and put these\n# in d_s.py? That might be confusing, though.\n\n######################\n# Configuration stuff\n######################\n\nif not 'mptt' in django_settings.INSTALLED_APPS:\n raise ImproperlyConfigured('django-wiki: needs mptt in INSTALLED_APPS')\n\nif not 'sekizai' in django_settings.INSTALLED_APPS:\n raise ImproperlyConfigured('django-wiki: needs sekizai in INSTALLED_APPS')\n\n# if not 'django_nyt' in django_settings.INSTALLED_APPS:\n# raise ImproperlyConfigured('django-wiki: needs django_nyt in INSTALLED_APPS')\n\nif not 'django.contrib.humanize' in django_settings.INSTALLED_APPS:\n raise ImproperlyConfigured(\n 'django-wiki: needs django.contrib.humanize in INSTALLED_APPS')\n\nif not 'django.contrib.contenttypes' in django_settings.INSTALLED_APPS:\n raise ImproperlyConfigured(\n 'django-wiki: needs django.contrib.contenttypes in INSTALLED_APPS')\n\nif not 'django.contrib.auth.context_processors.auth' in django_settings.TEMPLATE_CONTEXT_PROCESSORS:\n raise ImproperlyConfigured(\n 'django-wiki: needs django.contrib.auth.context_processors.auth in TEMPLATE_CONTEXT_PROCESSORS')\n\nif not 'django.core.context_processors.request' in django_settings.TEMPLATE_CONTEXT_PROCESSORS:\n raise ImproperlyConfigured(\n 'django-wiki: needs django.core.context_processors.request in TEMPLATE_CONTEXT_PROCESSORS')\n\nif 'django_notify' in django_settings.INSTALLED_APPS:\n raise ImproperlyConfigured(\n 'django-wiki: You need to change from django_notify to django_nyt in INSTALLED_APPS and your urlconfig.')\n\n######################\n# Warnings\n######################\n\n\nif VERSION < (1, 7):\n if not 'south' in django_settings.INSTALLED_APPS:\n warnings.warn(\n \"django-wiki: No south in your INSTALLED_APPS. This is highly discouraged.\")\n\n\nfrom django.core import urlresolvers\n\noriginal_django_reverse = urlresolvers.reverse\n\n\ndef reverse(*args, **kwargs):\n \"\"\"Now this is a crazy and silly hack, but it is basically here to\n enforce that an empty path always takes precedence over an article_id\n such that the root article doesn't get resolved to /ID/ but /.\n\n Another crazy hack that this supports is transforming every wiki url\n by a function. If _transform_url is set on this function, it will\n return the result of calling reverse._transform_url(reversed_url)\n for every url in the wiki namespace.\n \"\"\"\n if isinstance(args[0], string_types) and args[0].startswith('wiki:'):\n url_kwargs = kwargs.get('kwargs', {})\n path = url_kwargs.get('path', False)\n # If a path is supplied then discard the article_id\n if path is not False:\n url_kwargs.pop('article_id', None)\n url_kwargs['path'] = path\n kwargs['kwargs'] = url_kwargs\n\n url = original_django_reverse(*args, **kwargs)\n if hasattr(reverse, '_transform_url'):\n url = reverse._transform_url(url)\n else:\n url = original_django_reverse(*args, **kwargs)\n\n return url\n\n# Now we redefine reverse method\nurlresolvers.reverse = reverse\n", "path": "wiki/models/__init__.py" } ]
[ { "content": "from __future__ import absolute_import\nfrom __future__ import unicode_literals\n# -*- coding: utf-8 -*-\n\nfrom django import VERSION\nfrom django.conf import settings as django_settings\nfrom django.core.exceptions import ImproperlyConfigured\nimport warnings\nfrom six import string_types\n\n# TODO: Don't use wildcards\nfrom .article import *\nfrom .pluginbase import *\nfrom .urlpath import *\n\n# TODO: Should the below stuff be executed a more logical place?\n# Follow Django's default_settings.py / settings.py pattern and put these\n# in d_s.py? That might be confusing, though.\n\n######################\n# Configuration stuff\n######################\n\nif not 'mptt' in django_settings.INSTALLED_APPS:\n raise ImproperlyConfigured('django-wiki: needs mptt in INSTALLED_APPS')\n\nif not 'sekizai' in django_settings.INSTALLED_APPS:\n raise ImproperlyConfigured('django-wiki: needs sekizai in INSTALLED_APPS')\n\n# if not 'django_nyt' in django_settings.INSTALLED_APPS:\n# raise ImproperlyConfigured('django-wiki: needs django_nyt in INSTALLED_APPS')\n\nif not 'django.contrib.humanize' in django_settings.INSTALLED_APPS:\n raise ImproperlyConfigured(\n 'django-wiki: needs django.contrib.humanize in INSTALLED_APPS')\n\nif not 'django.contrib.contenttypes' in django_settings.INSTALLED_APPS:\n raise ImproperlyConfigured(\n 'django-wiki: needs django.contrib.contenttypes in INSTALLED_APPS')\n\nif not 'django.contrib.auth.context_processors.auth' in django_settings.TEMPLATE_CONTEXT_PROCESSORS:\n raise ImproperlyConfigured(\n 'django-wiki: needs django.contrib.auth.context_processors.auth in TEMPLATE_CONTEXT_PROCESSORS')\n\nif not 'django.core.context_processors.request' in django_settings.TEMPLATE_CONTEXT_PROCESSORS:\n raise ImproperlyConfigured(\n 'django-wiki: needs django.core.context_processors.request in TEMPLATE_CONTEXT_PROCESSORS')\n\nif 'django_notify' in django_settings.INSTALLED_APPS:\n raise ImproperlyConfigured(\n 'django-wiki: You need to change from django_notify to django_nyt in INSTALLED_APPS and your urlconfig.')\n\n######################\n# Warnings\n######################\n\n\nif VERSION < (1, 7):\n if not 'south' in django_settings.INSTALLED_APPS:\n warnings.warn(\n \"django-wiki: No south in your INSTALLED_APPS. This is highly discouraged.\")\n\n\nfrom django.core import urlresolvers\n\noriginal_django_reverse = urlresolvers.reverse\n\n\ndef reverse(*args, **kwargs):\n \"\"\"Now this is a crazy and silly hack, but it is basically here to\n enforce that an empty path always takes precedence over an article_id\n such that the root article doesn't get resolved to /ID/ but /.\n\n Another crazy hack that this supports is transforming every wiki url\n by a function. If _transform_url is set on this function, it will\n return the result of calling reverse._transform_url(reversed_url)\n for every url in the wiki namespace.\n \"\"\"\n if isinstance(args[0], string_types) and args[0].startswith('wiki:'):\n url_kwargs = kwargs.get('kwargs', {})\n path = url_kwargs.get('path', False)\n # If a path is supplied then discard the article_id\n if path is not False:\n url_kwargs.pop('article_id', None)\n url_kwargs['path'] = path\n kwargs['kwargs'] = url_kwargs\n\n url = original_django_reverse(*args, **kwargs)\n if hasattr(reverse, '_transform_url'):\n url = reverse._transform_url(url)\n else:\n url = original_django_reverse(*args, **kwargs)\n\n return url\n\n# Now we redefine reverse method\nurlresolvers.reverse = reverse\n", "path": "wiki/models/__init__.py" } ]
diff --git a/wiki/models/__init__.py b/wiki/models/__init__.py index 266bef60b..e8c307490 100644 --- a/wiki/models/__init__.py +++ b/wiki/models/__init__.py @@ -10,6 +10,7 @@ # TODO: Don't use wildcards from .article import * +from .pluginbase import * from .urlpath import * # TODO: Should the below stuff be executed a more logical place?
scikit-hep__pyhf-336
bumpversion missing from setup.py[develop] # Description As titled, `bumpversion` is not in list of develop dependencies. # Expected Behavior Installing `pyhf` installs `bumpversion`. # Actual Behavior It does not install `bumpversion`. # Steps to Reproduce `pip install pyhf[develop]` # Checklist - [x] Run `git fetch` to get the most up to date version of `master` - [x] Searched through existing Issues to confirm this is not a duplicate issue - [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue
[ { "content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow>=1.10.0',\n 'tensorflow-probability==0.3.0',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch>=0.4.0'],\n 'mxnet': [\n 'mxnet>=1.0.0',\n 'requests<2.19.0,>=2.18.4',\n 'numpy<1.15.0,>=1.8.2',\n 'requests<2.19.0,>=2.18.4',\n ],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlimport': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest>=3.5.1',\n 'pytest-cov>=2.5.1',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot>=3.0.0',\n 'papermill',\n 'graphviz',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='pyhf',\n version='0.0.15',\n description='(partial) pure python histfactory implementation',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow>=1.10.0',\n 'tensorflow-probability==0.3.0',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch>=0.4.0'],\n 'mxnet': [\n 'mxnet>=1.0.0',\n 'requests<2.19.0,>=2.18.4',\n 'numpy<1.15.0,>=1.8.2',\n 'requests<2.19.0,>=2.18.4',\n ],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlimport': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest>=3.5.1',\n 'pytest-cov>=2.5.1',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot>=3.0.0',\n 'papermill',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='pyhf',\n version='0.0.15',\n description='(partial) pure python histfactory implementation',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 2b0ae3e678..c26875d832 100644 --- a/setup.py +++ b/setup.py @@ -35,6 +35,7 @@ 'uproot>=3.0.0', 'papermill', 'graphviz', + 'bumpversion', 'sphinx', 'sphinxcontrib-bibtex', 'sphinxcontrib-napoleon',
lutris__lutris-2472
Don't show Steam Linux Runtime when importing games Link to the tool on steamdb: https://steamdb.info/app/1070560/
[ { "content": "\"\"\"Steam service\"\"\"\nimport os\nimport re\n\nfrom lutris import pga\nfrom lutris.config import make_game_config_id, LutrisConfig\nfrom lutris.util.steam.appmanifest import AppManifest, get_appmanifests\nfrom lutris.util.steam.config import get_steamapps_paths\nfrom lutris.services.service_game import ServiceGame\n\nNAME = \"Steam\"\nICON = \"steam\"\nONLINE = False\n\n\nclass SteamGame(ServiceGame):\n \"\"\"ServiceGame for Steam games\"\"\"\n\n store = \"steam\"\n installer_slug = \"steam\"\n excluded_appids = [\n \"228980\", # Steamworks Common Redistributables\n ]\n\n @classmethod\n def new_from_steam_game(cls, appmanifest, game_id=None):\n \"\"\"Return a Steam game instance from an AppManifest\"\"\"\n steam_game = SteamGame()\n steam_game.appid = str(appmanifest.steamid)\n steam_game.game_id = game_id\n steam_game.name = appmanifest.name\n steam_game.slug = appmanifest.slug\n steam_game.runner = appmanifest.get_runner_name()\n return steam_game\n\n @classmethod\n def new_from_lutris_id(cls, game_id):\n steam_game = SteamGame()\n steam_game.game_id = game_id\n return steam_game\n\n @property\n def config_id(self):\n return make_game_config_id(self.slug)\n\n @classmethod\n def is_importable(cls, appmanifest):\n \"\"\"Return whether a Steam game should be imported\"\"\"\n if not appmanifest.is_installed():\n return False\n if appmanifest.steamid in cls.excluded_appids:\n return False\n if re.match(r\"^Proton \\d*\", appmanifest.name):\n return False\n return True\n\n def install(self, updated_info=None):\n \"\"\"Add an installed game to the library\n\n Params:\n updated_info (dict): Optional dictonary containing existing data not to overwrite\n \"\"\"\n if updated_info:\n name = updated_info[\"name\"]\n slug = updated_info[\"slug\"]\n else:\n name = self.name\n slug = self.slug\n self.game_id = pga.add_or_update(\n id=self.game_id,\n name=name,\n runner=self.runner,\n slug=slug,\n steamid=int(self.appid),\n installed=1,\n configpath=self.config_id,\n installer_slug=self.installer_slug,\n )\n self.create_config()\n return self.game_id\n\n def create_config(self):\n \"\"\"Create the game configuration for a Steam game\"\"\"\n game_config = LutrisConfig(\n runner_slug=self.runner, game_config_id=self.config_id\n )\n game_config.raw_game_config.update({\"appid\": self.appid})\n game_config.save()\n\n\nclass SteamSyncer:\n platform = \"linux\"\n\n def __init__(self):\n self._lutris_games = None\n self._lutris_steamids = None\n\n @property\n def runner(self):\n return \"steam\" if self.platform == \"linux\" else \"winesteam\"\n\n @property\n def lutris_games(self):\n if not self._lutris_games:\n self._lutris_games = pga.get_games_where(\n steamid__isnull=False, steamid__not=\"\"\n )\n return self._lutris_games\n\n @property\n def lutris_steamids(self):\n if not self._lutris_steamids:\n self._lutris_steamids = {str(game[\"steamid\"]) for game in self.lutris_games}\n return self._lutris_steamids\n\n def load(self, force_reload=False):\n \"\"\"Return importable Steam games\"\"\"\n games = []\n steamapps_paths = get_steamapps_paths()\n for steamapps_path in steamapps_paths[self.platform]:\n for appmanifest_file in get_appmanifests(steamapps_path):\n app_manifest = AppManifest(\n os.path.join(steamapps_path, appmanifest_file)\n )\n if SteamGame.is_importable(app_manifest):\n games.append(SteamGame.new_from_steam_game(app_manifest))\n return games\n\n def get_pga_game(self, game):\n \"\"\"Return a PGA game if one is found\"\"\"\n for pga_game in self.lutris_games:\n if (\n str(pga_game[\"steamid\"]) == game.appid\n and (pga_game[\"runner\"] == self.runner or not pga_game[\"runner\"])\n and not pga_game[\"installed\"]\n ):\n return pga_game\n\n def sync(self, games, full=False):\n \"\"\"Syncs Steam games to Lutris\"\"\"\n available_ids = set() # Set of Steam appids seen while browsing AppManifests\n added_games = []\n for game in games:\n steamid = game.appid\n available_ids.add(steamid)\n pga_game = self.get_pga_game(game)\n\n if pga_game:\n if (\n steamid in self.lutris_steamids\n and pga_game[\"installed\"] != 1\n and pga_game[\"installed\"]\n ):\n added_games.append(game.install())\n\n if steamid not in self.lutris_steamids:\n added_games.append(game.install())\n else:\n if pga_game:\n added_games.append(game.install(pga_game))\n\n if not full:\n return added_games, games\n\n removed_games = []\n unavailable_ids = self.lutris_steamids.difference(available_ids)\n for steamid in unavailable_ids:\n for pga_game in self.lutris_games:\n if (\n str(pga_game[\"steamid\"]) == steamid\n and pga_game[\"installed\"]\n and pga_game[\"runner\"] == self.runner\n ):\n game = SteamGame.new_from_lutris_id(pga_game[\"id\"])\n game.uninstall()\n removed_games.append(pga_game[\"id\"])\n return (added_games, removed_games)\n\n\nSYNCER = SteamSyncer\n", "path": "lutris/services/steam.py" } ]
[ { "content": "\"\"\"Steam service\"\"\"\nimport os\nimport re\n\nfrom lutris import pga\nfrom lutris.config import make_game_config_id, LutrisConfig\nfrom lutris.util.steam.appmanifest import AppManifest, get_appmanifests\nfrom lutris.util.steam.config import get_steamapps_paths\nfrom lutris.services.service_game import ServiceGame\n\nNAME = \"Steam\"\nICON = \"steam\"\nONLINE = False\n\n\nclass SteamGame(ServiceGame):\n \"\"\"ServiceGame for Steam games\"\"\"\n\n store = \"steam\"\n installer_slug = \"steam\"\n excluded_appids = [\n \"228980\", # Steamworks Common Redistributables\n \"1070560\", # Steam Linux Runtime\n ]\n\n @classmethod\n def new_from_steam_game(cls, appmanifest, game_id=None):\n \"\"\"Return a Steam game instance from an AppManifest\"\"\"\n steam_game = SteamGame()\n steam_game.appid = str(appmanifest.steamid)\n steam_game.game_id = game_id\n steam_game.name = appmanifest.name\n steam_game.slug = appmanifest.slug\n steam_game.runner = appmanifest.get_runner_name()\n return steam_game\n\n @classmethod\n def new_from_lutris_id(cls, game_id):\n steam_game = SteamGame()\n steam_game.game_id = game_id\n return steam_game\n\n @property\n def config_id(self):\n return make_game_config_id(self.slug)\n\n @classmethod\n def is_importable(cls, appmanifest):\n \"\"\"Return whether a Steam game should be imported\"\"\"\n if not appmanifest.is_installed():\n return False\n if appmanifest.steamid in cls.excluded_appids:\n return False\n if re.match(r\"^Proton \\d*\", appmanifest.name):\n return False\n return True\n\n def install(self, updated_info=None):\n \"\"\"Add an installed game to the library\n\n Params:\n updated_info (dict): Optional dictonary containing existing data not to overwrite\n \"\"\"\n if updated_info:\n name = updated_info[\"name\"]\n slug = updated_info[\"slug\"]\n else:\n name = self.name\n slug = self.slug\n self.game_id = pga.add_or_update(\n id=self.game_id,\n name=name,\n runner=self.runner,\n slug=slug,\n steamid=int(self.appid),\n installed=1,\n configpath=self.config_id,\n installer_slug=self.installer_slug,\n )\n self.create_config()\n return self.game_id\n\n def create_config(self):\n \"\"\"Create the game configuration for a Steam game\"\"\"\n game_config = LutrisConfig(\n runner_slug=self.runner, game_config_id=self.config_id\n )\n game_config.raw_game_config.update({\"appid\": self.appid})\n game_config.save()\n\n\nclass SteamSyncer:\n platform = \"linux\"\n\n def __init__(self):\n self._lutris_games = None\n self._lutris_steamids = None\n\n @property\n def runner(self):\n return \"steam\" if self.platform == \"linux\" else \"winesteam\"\n\n @property\n def lutris_games(self):\n if not self._lutris_games:\n self._lutris_games = pga.get_games_where(\n steamid__isnull=False, steamid__not=\"\"\n )\n return self._lutris_games\n\n @property\n def lutris_steamids(self):\n if not self._lutris_steamids:\n self._lutris_steamids = {str(game[\"steamid\"]) for game in self.lutris_games}\n return self._lutris_steamids\n\n def load(self, force_reload=False):\n \"\"\"Return importable Steam games\"\"\"\n games = []\n steamapps_paths = get_steamapps_paths()\n for steamapps_path in steamapps_paths[self.platform]:\n for appmanifest_file in get_appmanifests(steamapps_path):\n app_manifest = AppManifest(\n os.path.join(steamapps_path, appmanifest_file)\n )\n if SteamGame.is_importable(app_manifest):\n games.append(SteamGame.new_from_steam_game(app_manifest))\n return games\n\n def get_pga_game(self, game):\n \"\"\"Return a PGA game if one is found\"\"\"\n for pga_game in self.lutris_games:\n if (\n str(pga_game[\"steamid\"]) == game.appid\n and (pga_game[\"runner\"] == self.runner or not pga_game[\"runner\"])\n and not pga_game[\"installed\"]\n ):\n return pga_game\n\n def sync(self, games, full=False):\n \"\"\"Syncs Steam games to Lutris\"\"\"\n available_ids = set() # Set of Steam appids seen while browsing AppManifests\n added_games = []\n for game in games:\n steamid = game.appid\n available_ids.add(steamid)\n pga_game = self.get_pga_game(game)\n\n if pga_game:\n if (\n steamid in self.lutris_steamids\n and pga_game[\"installed\"] != 1\n and pga_game[\"installed\"]\n ):\n added_games.append(game.install())\n\n if steamid not in self.lutris_steamids:\n added_games.append(game.install())\n else:\n if pga_game:\n added_games.append(game.install(pga_game))\n\n if not full:\n return added_games, games\n\n removed_games = []\n unavailable_ids = self.lutris_steamids.difference(available_ids)\n for steamid in unavailable_ids:\n for pga_game in self.lutris_games:\n if (\n str(pga_game[\"steamid\"]) == steamid\n and pga_game[\"installed\"]\n and pga_game[\"runner\"] == self.runner\n ):\n game = SteamGame.new_from_lutris_id(pga_game[\"id\"])\n game.uninstall()\n removed_games.append(pga_game[\"id\"])\n return (added_games, removed_games)\n\n\nSYNCER = SteamSyncer\n", "path": "lutris/services/steam.py" } ]
diff --git a/lutris/services/steam.py b/lutris/services/steam.py index 53aba8daf8..2e672a4cd6 100644 --- a/lutris/services/steam.py +++ b/lutris/services/steam.py @@ -20,6 +20,7 @@ class SteamGame(ServiceGame): installer_slug = "steam" excluded_appids = [ "228980", # Steamworks Common Redistributables + "1070560", # Steam Linux Runtime ] @classmethod
optuna__optuna-1088
[RFC] CI for examples/visualization It'll be better if we can validate that all the visualization examples run without failures with CI. I guess https://github.com/computationalmodelling/nbval/ might be useful.
[ { "content": "import os\nimport sys\n\nimport pkg_resources\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\n\ndef get_version() -> str:\n\n version_filepath = os.path.join(os.path.dirname(__file__), \"optuna\", \"version.py\")\n with open(version_filepath) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ndef get_long_description() -> str:\n\n readme_filepath = os.path.join(os.path.dirname(__file__), \"README.md\")\n with open(readme_filepath) as f:\n return f.read()\n\n\ndef get_install_requires() -> List[str]:\n\n return [\n \"alembic\",\n \"cliff\",\n \"cmaes>=0.3.2\",\n \"colorlog\",\n \"joblib\",\n \"numpy\",\n \"scipy!=1.4.0\",\n \"sqlalchemy>=1.1.0\",\n \"tqdm\",\n ]\n\n\ndef get_tests_require() -> List[str]:\n\n return get_extras_require()[\"testing\"]\n\n\ndef get_extras_require() -> Dict[str, List[str]]:\n\n requirements = {\n \"checking\": [\"black\", \"hacking\", \"mypy\",],\n \"codecov\": [\"codecov\", \"pytest-cov\",],\n \"doctest\": [\"cma\", \"pandas\", \"plotly>=4.0.0\", \"scikit-learn>=0.19.0\", \"scikit-optimize\",],\n \"document\": [\"sphinx\", \"sphinx_rtd_theme\",],\n \"example\": [\n \"catboost\",\n \"chainer\",\n \"lightgbm\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"pytorch-ignite\",\n \"scikit-image\",\n \"scikit-learn\",\n \"torch\",\n \"torchvision>=0.5.0\",\n \"xgboost\",\n ]\n + ([\"allennlp\", \"fastai<2\"] if (3, 5) < sys.version_info[:2] < (3, 8) else [])\n + (\n [\n \"dask[dataframe]\",\n \"dask-ml\",\n \"keras\",\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/997.\n \"pytorch-lightning<0.7.0\",\n \"tensorflow>=2.0.0\",\n \"tensorflow-datasets\",\n ]\n if sys.version_info[:2] < (3, 8)\n else []\n ),\n \"testing\": [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n \"bokeh<2.0.0\",\n \"chainer>=5.0.0\",\n \"cma\",\n \"fanova\",\n \"lightgbm\",\n \"mock\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"pytest\",\n \"pytorch-ignite\",\n \"scikit-learn>=0.19.0\",\n \"scikit-optimize\",\n \"torch\",\n \"torchvision>=0.5.0\",\n \"xgboost\",\n ]\n + ([\"fastai<2\"] if (3, 5) < sys.version_info[:2] < (3, 8) else [])\n + (\n [\n \"keras\",\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/997.\n \"pytorch-lightning<0.7.0\",\n \"tensorflow\",\n \"tensorflow-datasets\",\n ]\n if sys.version_info[:2] < (3, 8)\n else []\n ),\n }\n\n return requirements\n\n\ndef find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:\n\n for pkg in pkgs:\n try:\n return pkg_resources.get_distribution(pkg)\n except pkg_resources.DistributionNotFound:\n pass\n return None\n\n\npfnopt_pkg = find_any_distribution([\"pfnopt\"])\nif pfnopt_pkg is not None:\n msg = (\n \"We detected that PFNOpt is installed in your environment.\\n\"\n \"PFNOpt has been renamed Optuna. Please uninstall the old\\n\"\n \"PFNOpt in advance (e.g. by executing `$ pip uninstall pfnopt`).\"\n )\n print(msg)\n exit(1)\n\nsetup(\n name=\"optuna\",\n version=get_version(),\n description=\"A hyperparameter optimization framework\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Takuya Akiba\",\n author_email=\"[email protected]\",\n url=\"https://optuna.org/\",\n packages=find_packages(),\n package_data={\n \"optuna\": [\n \"storages/rdb/alembic.ini\",\n \"storages/rdb/alembic/*.*\",\n \"storages/rdb/alembic/versions/*.*\",\n ]\n },\n install_requires=get_install_requires(),\n tests_require=get_tests_require(),\n extras_require=get_extras_require(),\n entry_points={\"console_scripts\": [\"optuna = optuna.cli:main\"]},\n)\n", "path": "setup.py" } ]
[ { "content": "import os\nimport sys\n\nimport pkg_resources\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\n\ndef get_version() -> str:\n\n version_filepath = os.path.join(os.path.dirname(__file__), \"optuna\", \"version.py\")\n with open(version_filepath) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ndef get_long_description() -> str:\n\n readme_filepath = os.path.join(os.path.dirname(__file__), \"README.md\")\n with open(readme_filepath) as f:\n return f.read()\n\n\ndef get_install_requires() -> List[str]:\n\n return [\n \"alembic\",\n \"cliff\",\n \"cmaes\",\n \"colorlog\",\n \"joblib\",\n \"numpy\",\n \"scipy!=1.4.0\",\n \"sqlalchemy>=1.1.0\",\n \"tqdm\",\n ]\n\n\ndef get_tests_require() -> List[str]:\n\n return get_extras_require()[\"testing\"]\n\n\ndef get_extras_require() -> Dict[str, List[str]]:\n\n requirements = {\n \"checking\": [\"black\", \"hacking\", \"mypy\",],\n \"codecov\": [\"codecov\", \"pytest-cov\",],\n \"doctest\": [\"cma\", \"pandas\", \"plotly>=4.0.0\", \"scikit-learn>=0.19.0\", \"scikit-optimize\",],\n \"document\": [\"sphinx\", \"sphinx_rtd_theme\",],\n \"example\": [\n \"catboost\",\n \"chainer\",\n \"lightgbm\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"nbval\",\n \"pytorch-ignite\",\n \"scikit-image\",\n \"scikit-learn\",\n \"torch\",\n \"torchvision>=0.5.0\",\n \"xgboost\",\n ]\n + ([\"allennlp\", \"fastai<2\"] if (3, 5) < sys.version_info[:2] < (3, 8) else [])\n + (\n [\n \"dask[dataframe]\",\n \"dask-ml\",\n \"keras\",\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/997.\n \"pytorch-lightning<0.7.0\",\n \"tensorflow>=2.0.0\",\n \"tensorflow-datasets\",\n ]\n if sys.version_info[:2] < (3, 8)\n else []\n ),\n \"testing\": [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n \"bokeh<2.0.0\",\n \"chainer>=5.0.0\",\n \"cma\",\n \"fanova\",\n \"lightgbm\",\n \"mock\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"pytest\",\n \"pytorch-ignite\",\n \"scikit-learn>=0.19.0\",\n \"scikit-optimize\",\n \"torch\",\n \"torchvision>=0.5.0\",\n \"xgboost\",\n ]\n + ([\"fastai<2\"] if (3, 5) < sys.version_info[:2] < (3, 8) else [])\n + (\n [\n \"keras\",\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/997.\n \"pytorch-lightning<0.7.0\",\n \"tensorflow\",\n \"tensorflow-datasets\",\n ]\n if sys.version_info[:2] < (3, 8)\n else []\n ),\n }\n\n return requirements\n\n\ndef find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:\n\n for pkg in pkgs:\n try:\n return pkg_resources.get_distribution(pkg)\n except pkg_resources.DistributionNotFound:\n pass\n return None\n\n\npfnopt_pkg = find_any_distribution([\"pfnopt\"])\nif pfnopt_pkg is not None:\n msg = (\n \"We detected that PFNOpt is installed in your environment.\\n\"\n \"PFNOpt has been renamed Optuna. Please uninstall the old\\n\"\n \"PFNOpt in advance (e.g. by executing `$ pip uninstall pfnopt`).\"\n )\n print(msg)\n exit(1)\n\nsetup(\n name=\"optuna\",\n version=get_version(),\n description=\"A hyperparameter optimization framework\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Takuya Akiba\",\n author_email=\"[email protected]\",\n url=\"https://optuna.org/\",\n packages=find_packages(),\n package_data={\n \"optuna\": [\n \"storages/rdb/alembic.ini\",\n \"storages/rdb/alembic/*.*\",\n \"storages/rdb/alembic/versions/*.*\",\n ]\n },\n install_requires=get_install_requires(),\n tests_require=get_tests_require(),\n extras_require=get_extras_require(),\n entry_points={\"console_scripts\": [\"optuna = optuna.cli:main\"]},\n)\n", "path": "setup.py" } ]
diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index 389a100b8e..faecf271c6 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -53,6 +53,15 @@ jobs: done env: OMP_NUM_THREADS: 1 + - name: Run examples + run: | + for file in `find examples -name '*.ipynb'` + do + echo $file + pytest --nbval-lax $file > /dev/null + done + env: + OMP_NUM_THREADS: 1 - name: Run multi-node examples run: | STORAGE_URL=sqlite:///example.db diff --git a/examples/visualization/plot_study.ipynb b/examples/visualization/plot_study.ipynb index e60829cf3a..07bdc6f71d 100644 --- a/examples/visualization/plot_study.ipynb +++ b/examples/visualization/plot_study.ipynb @@ -136,7 +136,7 @@ "def objective(trial):\n", " \n", " clf = MLPClassifier(\n", - " hidden_layer_sizes=tuple([trial.suggest_int(f'n_units_l{i}', 32, 64) for i in range(3)]),\n", + " hidden_layer_sizes=tuple([trial.suggest_int('n_units_l{}'.format(i), 32, 64) for i in range(3)]),\n", " learning_rate_init=trial.suggest_loguniform('lr_init', 1e-5, 1e-1),\n", " )\n", "\n", diff --git a/setup.py b/setup.py index aacb468d71..7f4fdcf78f 100644 --- a/setup.py +++ b/setup.py @@ -61,6 +61,7 @@ def get_extras_require() -> Dict[str, List[str]]: "mlflow", "mpi4py", "mxnet", + "nbval", "pytorch-ignite", "scikit-image", "scikit-learn",
learningequality__kolibri-8048
Context not transferred from Crowdin <!-- Instructions: * Fill out the sections below, replace …'s with information about your issue * Use the 'preview' function above this text box to verify formatting before submitting --> ### Observed behavior <!-- Description of the behavior that was observed, including screenshots or other references when applicable --> Initially noted by @radinamatic This [Vue SFC](https://github.com/learningequality/kolibri/blob/release-v0.14.x/kolibri/plugins/learn/assets/src/views/classes/ClassAssignmentsPage.vue#L68) did not have the [context for its translation](https://crowdin.com/translate/kolibri/3798/en-es?filter=basic&value=0#275962) transferred into the code. This issue should be fixed in the context of 0.15. This work may ultimately be blocked by some necessary work in https://github.com/learningequality/kolibri/issues/7709 ### Expected behavior <!-- Description of what behavior was expected but did not occur --> `yarn transfercontext` should transfer all context from Crowdin into the components associated with the translation strings. ### User-facing consequences <!-- Implications and real-world consequences for learners, coaches, admins, and other users of the application --> Internal facing only - but context will not be available in the code, which can result in losing context when uploading the next round of translations. ### Context <!-- Tell us about your environment, including: * Kolibri version * Operating system * Browser --> Kolibri 0.15
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nFor usage instructions, see:\n https://kolibri-dev.readthedocs.io/en/develop/references/i18n.html\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport argparse\nimport base64\nimport io\nimport json\nimport logging\nimport mimetypes\nimport os\nimport re\nimport sys\nimport tempfile\n\nimport noto_source\nimport utils\nfrom fontTools import merge\nfrom fontTools import subset\n\nlogging.basicConfig(format=\"%(levelname)s: %(message)s\", level=logging.INFO)\nlogging.getLogger(\"fontTools\").setLevel(logging.WARNING)\nlogging.StreamHandler(sys.stdout)\n\n\n\"\"\"\nConstants\n\"\"\"\n\nOUTPUT_PATH = os.path.abspath(\n os.path.join(\n os.path.dirname(__file__),\n os.pardir,\n os.pardir,\n \"kolibri\",\n \"core\",\n \"static\",\n \"assets\",\n \"fonts\",\n )\n)\n\n\n# Sets the source date epoch to 1/1/21 to prevent temporary files from\n# getting different headers on each run, leading to non-glyph-related changes to\n# their base64 encoding\n# ref: https://github.com/fonttools/fonttools/issues/1135\nos.environ[\"SOURCE_DATE_EPOCH\"] = \"1609459200000\"\nFONT_TOOLS_OPTIONS = subset.Options()\nFONT_TOOLS_OPTIONS.flavor = \"woff\" # most widely supported format\nFONT_TOOLS_OPTIONS.ignore_missing_unicodes = True # important for subsetting\n\n# basic latin glyphs\nNOTO_SANS_LATIN = \"NotoSans\"\n\n# font family name conventions\nSCOPE_FULL = \"noto-full\"\nSCOPE_SUBSET = \"noto-subset\"\nSCOPE_COMMON = \"noto-common\"\n\n\"\"\"\nShared helpers\n\"\"\"\n\n\n_FONT_FACE = \"\"\"\n@font-face {{\n font-family: '{family}';\n src: url('{url}') format('woff');\n font-style: normal;\n font-weight: {weight};\n unicode-range: {unicodes};\n font-display: swap;\n}}\n\"\"\"\n\n\ndef _gen_font_face(family, url, is_bold, unicodes):\n weight = \"bold\" if is_bold else \"normal\"\n return _FONT_FACE.format(family=family, url=url, weight=weight, unicodes=unicodes)\n\n\ndef _scoped(scope, name):\n return \"{}.{}\".format(scope, name)\n\n\[email protected]\ndef _woff_font_path(name, is_bold):\n file_name = \"{name}.{weight}.woff\".format(\n name=name, weight=\"700\" if is_bold else \"400\"\n )\n return os.path.join(OUTPUT_PATH, file_name)\n\n\ndef _load_font(path):\n guess = mimetypes.guess_type(path)\n if guess[0] not in [\n \"font/ttc\",\n \"font/ttf\",\n \"font/otf\",\n \"font/woff\",\n \"application/font-sfnt\",\n \"application/font-woff\",\n ]:\n logging.error(\"Not a font file: {}\".format(path))\n logging.error(\"Guessed mimetype: '{}'\".format(guess[0]))\n logging.error(\"If this is a text file: do you have Git LFS installed?\")\n sys.exit(1)\n try:\n return subset.load_font(path, FONT_TOOLS_OPTIONS, dontLoadGlyphNames=True)\n except FileNotFoundError as e: # noqa F821\n logging.error(\"Could not load font: {}\".format(str(e)))\n logging.error(\"You may need to run: `make i18n-download-source-fonts`\")\n sys.exit(1)\n\n\[email protected]\ndef _font_priorities(default_font):\n \"\"\"\n Given a default font, return a list of all possible font names roughly in the order\n that we ought to look for glyphs in. Many fonts contain overlapping sets of glyphs.\n\n Without doing this: we risk loading a bunch of random font files just because they\n happen to contain one of the glyphs, and we also risk loading the 'wrong' version\n of the glyphs if they happen to differ.\n \"\"\"\n\n # start with the default\n font_names = [default_font]\n\n # look in the latin set next\n if default_font is not NOTO_SANS_LATIN:\n font_names.append(NOTO_SANS_LATIN)\n\n # then look at the rest of the supported languages' default fonts\n for lang_info in utils.available_languages():\n name = lang_info[utils.KEY_DEFAULT_FONT]\n if name not in font_names:\n font_names.append(name)\n\n # finally look at the remaining langauges\n font_names.extend([fn for fn in noto_source.FONT_MANIFEST if fn not in font_names])\n return font_names\n\n\[email protected]\ndef _font_glyphs(font_path):\n \"\"\"\n extract set of all glyphs from a font\n \"\"\"\n glyphs = set()\n for table in _load_font(font_path)[\"cmap\"].tables:\n glyphs |= set(table.cmap.keys())\n return glyphs\n\n\ndef _clean_up(scope):\n \"\"\"\n Delete all files in OUTPUT_PATH that match the scope\n \"\"\"\n css_pattern = r\"{}.*?\\.css\".format(scope)\n woff_pattern = r\"{}.*?\\.woff\".format(scope)\n for name in os.listdir(OUTPUT_PATH):\n if re.match(css_pattern, name) or re.match(woff_pattern, name):\n os.unlink(os.path.join(OUTPUT_PATH, name))\n\n\n\"\"\"\nCSS helpers\n\"\"\"\n\n\nCSS_HEADER = \"\"\"\n/*\n * This is an auto-generated file, so any manual edits will be overridden.\n *\n * To regenerate, see instructions here:\n * https://kolibri-dev.readthedocs.io/en/develop/references/i18n.html\n *\n * This file was generated by build_tools/i18n/fonts.py\n */\n\"\"\"\n\n\ndef _list_to_ranges(input_list):\n \"\"\"\n Iterator of ranges of contiguous numbers from a list of integers.\n Ranges returned are [x, y) – in other words, y is non-inclusive.\n (from: http://code.activestate.com/recipes/496682/)\n \"\"\"\n new_list = list(input_list)\n new_list.sort()\n start = new_list[0]\n currentrange = [start, start + 1]\n for item in new_list[1:]:\n if currentrange[1] == item:\n currentrange[1] += 1 # contiguous\n else:\n yield tuple(currentrange) # new range start\n currentrange = [item, item + 1]\n yield tuple(currentrange) # last range\n\n\ndef _fmt_code(code):\n return \"{:x}\".format(code).upper()\n\n\ndef _fmt_range(glyphs):\n \"\"\"\n Generates a font-face-compatible 'unicode range' attribute for a given set of glyphs\n \"\"\"\n fmt_ranges = []\n for r in _list_to_ranges(sorted(glyphs)):\n if r[0] == r[1] - 1:\n fmt_ranges.append(\"U+{}\".format(_fmt_code(r[0])))\n else:\n fmt_ranges.append(\"U+{}-{}\".format(_fmt_code(r[0]), _fmt_code(r[1] - 1)))\n return \",\".join(fmt_ranges)\n\n\n\"\"\"\nFull Fonts\n\"\"\"\n\n\ndef _full_font_face(font_family, font_name, is_bold, omit_glyphs=set()):\n \"\"\"\n generate the CSS reference for a single full font\n \"\"\"\n file_path = _woff_font_path(_scoped(SCOPE_FULL, font_name), is_bold=is_bold)\n file_name = os.path.basename(file_path)\n glyphs = _font_glyphs(file_path) - omit_glyphs\n if not glyphs:\n return \"\"\n return _gen_font_face(\n font_family, file_name, is_bold=is_bold, unicodes=_fmt_range(glyphs)\n )\n\n\ndef _gen_full_css_modern(lang_info):\n \"\"\"\n Generates listing for all full fonts, segmented by unicode ranges and weights\n \"\"\"\n\n # skip previously accounted for glyphs so there is no overlap between font-faces\n previous_glyphs = set()\n\n # all available fonts\n font_faces = []\n for font_name in _font_priorities(lang_info[utils.KEY_DEFAULT_FONT]):\n font_faces.append(\n _full_font_face(\n SCOPE_FULL, font_name, is_bold=False, omit_glyphs=previous_glyphs\n )\n )\n font_faces.append(\n _full_font_face(\n SCOPE_FULL, font_name, is_bold=True, omit_glyphs=previous_glyphs\n )\n )\n\n # Assumes all four variants have the same glyphs, from the content Regular font\n previous_glyphs |= _font_glyphs(\n _woff_font_path(_scoped(SCOPE_FULL, font_name), is_bold=False)\n )\n\n output_name = os.path.join(\n OUTPUT_PATH,\n \"{}.modern.css\".format(_scoped(SCOPE_FULL, lang_info[utils.KEY_INTL_CODE])),\n )\n logging.info(\"Writing {}\".format(output_name))\n with open(output_name, \"w\") as f:\n f.write(CSS_HEADER)\n f.write(\"\".join(font_faces))\n\n\ndef _gen_full_css_basic(lang_info):\n output_name = os.path.join(\n OUTPUT_PATH,\n \"{}.basic.css\".format(_scoped(SCOPE_FULL, lang_info[utils.KEY_INTL_CODE])),\n )\n logging.info(\"Writing {}\".format(output_name))\n with open(output_name, \"w\") as f:\n f.write(CSS_HEADER)\n default_font = lang_info[utils.KEY_DEFAULT_FONT]\n f.write(_full_font_face(SCOPE_FULL, default_font, is_bold=False))\n f.write(_full_font_face(SCOPE_FULL, default_font, is_bold=True))\n\n\ndef _write_full_font(font_name, is_bold):\n font = _load_font(noto_source.get_path(font_name, is_bold=is_bold))\n output_name = _woff_font_path(_scoped(SCOPE_FULL, font_name), is_bold=is_bold)\n logging.info(\"Writing {}\".format(output_name))\n font.save(output_name)\n\n\ndef command_gen_full_fonts():\n logging.info(\"generating full fonts...\")\n\n _clean_up(SCOPE_FULL)\n\n for font_name in noto_source.FONT_MANIFEST:\n _write_full_font(font_name, is_bold=False)\n _write_full_font(font_name, is_bold=True)\n\n languages = utils.available_languages(include_in_context=True, include_english=True)\n for lang_info in languages:\n _gen_full_css_modern(lang_info)\n _gen_full_css_basic(lang_info)\n\n logging.info(\"finished generating full fonts\")\n\n\n\"\"\"\nSubset fonts\n\"\"\"\n\n\ndef _chunks(string, n=72):\n \"\"\"\n Yield successive n-sized chunks from string\n \"\"\"\n for i in range(0, len(string), n):\n yield string[i : i + n]\n\n\ndef _write_inline_font(file_object, font_path, font_family, is_bold):\n \"\"\"\n Inlines a font as base64 encoding within a CSS file\n \"\"\"\n with io.open(font_path, mode=\"rb\") as f:\n data = f.read()\n data_uri = \"data:application/x-font-woff;charset=utf-8;base64,\\\\\\n{}\".format(\n \"\\\\\\n\".join(_chunks(base64.b64encode(data).decode()))\n )\n glyphs = _font_glyphs(font_path)\n if not glyphs:\n return\n file_object.write(\n _gen_font_face(\n family=font_family,\n url=data_uri,\n is_bold=is_bold,\n unicodes=_fmt_range(glyphs),\n )\n )\n\n\ndef _generate_inline_font_css(name, font_family):\n \"\"\"\n Generate CSS and clean up inlined woff files\n \"\"\"\n\n font_path_reg = _woff_font_path(name, is_bold=False)\n font_path_bold = _woff_font_path(name, is_bold=True)\n\n output_name = os.path.join(OUTPUT_PATH, \"{}.css\".format(name))\n logging.info(\"Writing {}\".format(output_name))\n with open(output_name, \"w\") as f:\n f.write(CSS_HEADER)\n _write_inline_font(f, font_path_reg, font_family, is_bold=False)\n _write_inline_font(f, font_path_bold, font_family, is_bold=True)\n\n os.unlink(font_path_reg)\n os.unlink(font_path_bold)\n\n\ndef _get_subset_font(source_file_path, text):\n \"\"\"\n Given a source file and some text, returns a new, in-memory fontTools Font object\n that has only the glyphs specified in the set.\n\n Note that passing actual text instead of a glyph set to the subsetter allows it to\n generate appropriate ligatures and other features important for correct rendering.\n \"\"\"\n if not os.path.exists(source_file_path):\n logging.error(\"'{}' not found\".format(source_file_path))\n\n font = _load_font(source_file_path)\n subsetter = subset.Subsetter(options=FONT_TOOLS_OPTIONS)\n subsetter.populate(text=text)\n subsetter.subset(font)\n return font\n\n\ndef _get_lang_strings(locale_dir):\n \"\"\"\n Text used in a particular language\n \"\"\"\n\n strings = []\n\n for file_name in os.listdir(locale_dir):\n if not file_name.endswith(\".json\"):\n continue\n\n file_path = os.path.join(locale_dir, file_name)\n with io.open(file_path, mode=\"r\", encoding=\"utf-8\") as f:\n lang_strings = json.load(f).values()\n\n for s in lang_strings:\n s = re.sub(r\"\\W\", \" \", s) # clean whitespace\n strings.append(s)\n strings.append(s.upper())\n\n return strings\n\n\[email protected]\ndef _get_common_strings():\n \"\"\"\n Text useful for all languages: displaying the language switcher, Kolibri version\n numbers, symbols, and other un-translated text\n \"\"\"\n\n # Special characters that are used directly in untranslated template strings.\n # Search the codebase with this regex to find new ones: [^\\x00-\\x7F©–—…‘’“”•→›]\n strings = [\n chr(0x0), # null\n \"©\",\n \"–\", # en dash\n \"—\", # em dash\n \"…\",\n \"‘\",\n \"’\",\n \"“\",\n \"”\",\n \"•\",\n \"●\",\n \"→\",\n \"›\",\n ]\n\n # all the basic printable ascii characters\n strings.extend([chr(c) for c in range(32, 127)])\n\n # text from language names, both lower- and upper-case\n languages = utils.available_languages(include_in_context=True, include_english=True)\n for lang in languages:\n strings.append(lang[utils.KEY_LANG_NAME])\n strings.append(lang[utils.KEY_LANG_NAME].upper())\n strings.append(lang[utils.KEY_ENG_NAME])\n strings.append(lang[utils.KEY_ENG_NAME].upper())\n\n return strings\n\n\ndef _merge_fonts(fonts, output_file_path):\n \"\"\"\n Given a list of fontTools font objects, merge them and export to output_file_path.\n\n Implemenatation note: it would have been nice to pass the fonts directly to the\n merger, but the current fontTools implementation of Merger takes a list of file names\n \"\"\"\n tmp = tempfile.gettempdir()\n f_names = []\n for i, f in enumerate(fonts):\n tmp_font_path = os.path.join(tmp, \"{}.woff\".format(i))\n f_names.append(tmp_font_path)\n f.save(tmp_font_path)\n merger = merge.Merger(options=FONT_TOOLS_OPTIONS)\n merged_font = merger.merge(f_names)\n merged_font.save(output_file_path)\n logging.info(\"created {}\".format(output_file_path))\n\n\ndef _cannot_merge(font):\n # all fonts must have equal units per em for merging, and 1000 is most common\n return font[\"head\"].unitsPerEm != 1000\n\n\ndef _subset_and_merge_fonts(text, default_font, subset_reg_path, subset_bold_path):\n \"\"\"\n Given text, generate both a bold and a regular font that can render it.\n \"\"\"\n reg_subsets = []\n bold_subsets = []\n skipped = []\n\n # track which glyphs are left\n remaining_glyphs = set([ord(c) for c in text])\n\n for font_name in _font_priorities(default_font):\n full_reg_path = _woff_font_path(_scoped(SCOPE_FULL, font_name), is_bold=False)\n full_bold_path = _woff_font_path(_scoped(SCOPE_FULL, font_name), is_bold=True)\n reg_subset = _get_subset_font(full_reg_path, text)\n bold_subset = _get_subset_font(full_bold_path, text)\n\n if _cannot_merge(reg_subset) or _cannot_merge(bold_subset):\n skipped.append(font_name)\n continue\n\n reg_subsets.append(reg_subset)\n bold_subsets.append(bold_subset)\n\n remaining_glyphs -= _font_glyphs(full_reg_path)\n if not remaining_glyphs:\n break\n\n _merge_fonts(reg_subsets, os.path.join(OUTPUT_PATH, subset_reg_path))\n _merge_fonts(bold_subsets, os.path.join(OUTPUT_PATH, subset_bold_path))\n\n\ndef command_gen_subset_fonts():\n \"\"\"\n Creates custom fonts that attempt to contain all the glyphs and other font features\n that are used in user-facing text for the translation in each language.\n\n We make a separate subset font for common strings, which generally overaps somewhat\n with the individual language subsets. This slightly increases how much the client\n needs to download on first request, but reduces Kolibri's distribution size by a\n couple megabytes.\n \"\"\"\n logging.info(\"generating subset fonts...\")\n\n _clean_up(SCOPE_COMMON)\n _clean_up(SCOPE_SUBSET)\n\n _subset_and_merge_fonts(\n text=\" \".join(_get_common_strings()),\n default_font=NOTO_SANS_LATIN,\n subset_reg_path=_woff_font_path(SCOPE_COMMON, is_bold=False),\n subset_bold_path=_woff_font_path(SCOPE_COMMON, is_bold=True),\n )\n\n languages = utils.available_languages(include_in_context=True, include_english=True)\n for lang_info in languages:\n logging.info(\"gen subset for {}\".format(lang_info[utils.KEY_ENG_NAME]))\n strings = []\n strings.extend(_get_lang_strings(utils.local_locale_path(lang_info)))\n strings.extend(_get_lang_strings(utils.local_perseus_locale_path(lang_info)))\n\n name = lang_info[utils.KEY_INTL_CODE]\n _subset_and_merge_fonts(\n text=\" \".join(strings),\n default_font=lang_info[utils.KEY_DEFAULT_FONT],\n subset_reg_path=_woff_font_path(_scoped(SCOPE_SUBSET, name), is_bold=False),\n subset_bold_path=_woff_font_path(_scoped(SCOPE_SUBSET, name), is_bold=True),\n )\n\n # generate common subset file\n _generate_inline_font_css(name=SCOPE_COMMON, font_family=SCOPE_COMMON)\n\n # generate language-specific subset font files\n languages = utils.available_languages(include_in_context=True, include_english=True)\n for lang in languages:\n _generate_inline_font_css(\n name=_scoped(SCOPE_SUBSET, lang[utils.KEY_INTL_CODE]),\n font_family=SCOPE_SUBSET,\n )\n\n logging.info(\"subsets created\")\n\n\n\"\"\"\nAdd source fonts\n\"\"\"\n\n\ndef command_update_font_manifest(ref):\n noto_source.update_manifest(ref)\n\n\ndef command_download_source_fonts():\n noto_source.fetch_fonts()\n\n\n\"\"\"\nMain\n\"\"\"\n\n\ndef main():\n \"\"\"\n Generates files to support both 'basic' and a 'modern' browsers.\n\n Both browsers get the common and language-specific application subset fonts inline\n to load quickly and prevent a flash of unstyled text, at least for all application\n text. Full font files are linked and will load asynchronously.\n\n # Modern behavior\n\n Newer browsers have full support for the unicode-range attribute of font-face\n definitions, which allow the browser to download fonts as-needed based on the text\n observed. This allows us to make _all_ font alphabets available, and ensures that\n content will be rendered using the best font possible for all content, regardless\n of selected app language.\n\n # Basic behavior\n\n Older browsers do not fully support the unicode-range attribute, and will eagerly\n download all referenced fonts regardless of whether or not they are needed. This\n would have an unacceptable performance impact. As an alternative, we provide\n references to the full fonts for the user's currently-selected language, under the\n assumption that most of the content they use will be in that language.\n\n Content viewed in other languages using the basic variant should still usually\n display, albeit using system fonts.\n \"\"\"\n\n description = \"\\n\\nProcess fonts.\\nSyntax: [command] [branch]\\n\\n\"\n parser = argparse.ArgumentParser(description=description)\n subparsers = parser.add_subparsers(dest=\"command\")\n\n subparsers.add_parser(\n \"update-font-manifest\",\n help=\"Update manifest from https://github.com/googlei18n/noto-fonts/\",\n ).add_argument(\n \"--ref\",\n help=\"Github reference, e.g. commit or tag. Defaults to head of master.\",\n type=str,\n )\n\n subparsers.add_parser(\n \"download-source-fonts\",\n help=\"Download sources from https://github.com/googlei18n/noto-fonts/\",\n )\n\n subparsers.add_parser(\n \"generate-subset-fonts\", help=\"Generate subset fonts based on app text\"\n )\n\n subparsers.add_parser(\"generate-full-fonts\", help=\"Generate full fonts\")\n\n args = parser.parse_args()\n\n if args.command == \"update-font-manifest\":\n command_update_font_manifest(args.ref)\n elif args.command == \"download-source-fonts\":\n command_download_source_fonts()\n elif args.command == \"generate-subset-fonts\":\n command_gen_subset_fonts()\n elif args.command == \"generate-full-fonts\":\n command_gen_full_fonts()\n else:\n logging.warning(\"Unknown command\\n\")\n parser.print_help(sys.stderr)\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "packages/kolibri-tools/lib/i18n/fonts.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nFor usage instructions, see:\n https://kolibri-dev.readthedocs.io/en/develop/references/i18n.html\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport argparse\nimport base64\nimport io\nimport json\nimport logging\nimport mimetypes\nimport os\nimport re\nimport sys\nimport tempfile\n\nimport noto_source\nimport utils\nfrom fontTools import merge\nfrom fontTools import subset\n\nlogging.basicConfig(format=\"%(levelname)s: %(message)s\", level=logging.INFO)\nlogging.getLogger(\"fontTools\").setLevel(logging.WARNING)\nlogging.StreamHandler(sys.stdout)\n\n\n\"\"\"\nConstants\n\"\"\"\n\nOUTPUT_PATH = os.path.abspath(\n os.path.join(\n os.path.dirname(__file__),\n os.pardir,\n os.pardir,\n os.pardir,\n os.pardir,\n \"kolibri\",\n \"core\",\n \"static\",\n \"assets\",\n \"fonts\",\n )\n)\n\n\n# Sets the source date epoch to 1/1/21 to prevent temporary files from\n# getting different headers on each run, leading to non-glyph-related changes to\n# their base64 encoding\n# ref: https://github.com/fonttools/fonttools/issues/1135\nos.environ[\"SOURCE_DATE_EPOCH\"] = \"1609459200000\"\nFONT_TOOLS_OPTIONS = subset.Options()\nFONT_TOOLS_OPTIONS.flavor = \"woff\" # most widely supported format\nFONT_TOOLS_OPTIONS.ignore_missing_unicodes = True # important for subsetting\n\n# basic latin glyphs\nNOTO_SANS_LATIN = \"NotoSans\"\n\n# font family name conventions\nSCOPE_FULL = \"noto-full\"\nSCOPE_SUBSET = \"noto-subset\"\nSCOPE_COMMON = \"noto-common\"\n\n\"\"\"\nShared helpers\n\"\"\"\n\n\n_FONT_FACE = \"\"\"\n@font-face {{\n font-family: '{family}';\n src: url('{url}') format('woff');\n font-style: normal;\n font-weight: {weight};\n unicode-range: {unicodes};\n font-display: swap;\n}}\n\"\"\"\n\n\ndef _gen_font_face(family, url, is_bold, unicodes):\n weight = \"bold\" if is_bold else \"normal\"\n return _FONT_FACE.format(family=family, url=url, weight=weight, unicodes=unicodes)\n\n\ndef _scoped(scope, name):\n return \"{}.{}\".format(scope, name)\n\n\[email protected]\ndef _woff_font_path(name, is_bold):\n file_name = \"{name}.{weight}.woff\".format(\n name=name, weight=\"700\" if is_bold else \"400\"\n )\n return os.path.join(OUTPUT_PATH, file_name)\n\n\ndef _load_font(path):\n guess = mimetypes.guess_type(path)\n if guess[0] not in [\n \"font/ttc\",\n \"font/ttf\",\n \"font/otf\",\n \"font/woff\",\n \"application/font-sfnt\",\n \"application/font-woff\",\n ]:\n logging.error(\"Not a font file: {}\".format(path))\n logging.error(\"Guessed mimetype: '{}'\".format(guess[0]))\n logging.error(\"If this is a text file: do you have Git LFS installed?\")\n sys.exit(1)\n try:\n return subset.load_font(path, FONT_TOOLS_OPTIONS, dontLoadGlyphNames=True)\n except FileNotFoundError as e: # noqa F821\n logging.error(\"Could not load font: {}\".format(str(e)))\n logging.error(\"You may need to run: `make i18n-download-source-fonts`\")\n sys.exit(1)\n\n\[email protected]\ndef _font_priorities(default_font):\n \"\"\"\n Given a default font, return a list of all possible font names roughly in the order\n that we ought to look for glyphs in. Many fonts contain overlapping sets of glyphs.\n\n Without doing this: we risk loading a bunch of random font files just because they\n happen to contain one of the glyphs, and we also risk loading the 'wrong' version\n of the glyphs if they happen to differ.\n \"\"\"\n\n # start with the default\n font_names = [default_font]\n\n # look in the latin set next\n if default_font is not NOTO_SANS_LATIN:\n font_names.append(NOTO_SANS_LATIN)\n\n # then look at the rest of the supported languages' default fonts\n for lang_info in utils.available_languages():\n name = lang_info[utils.KEY_DEFAULT_FONT]\n if name not in font_names:\n font_names.append(name)\n\n # finally look at the remaining langauges\n font_names.extend([fn for fn in noto_source.FONT_MANIFEST if fn not in font_names])\n return font_names\n\n\[email protected]\ndef _font_glyphs(font_path):\n \"\"\"\n extract set of all glyphs from a font\n \"\"\"\n glyphs = set()\n for table in _load_font(font_path)[\"cmap\"].tables:\n glyphs |= set(table.cmap.keys())\n return glyphs\n\n\ndef _clean_up(scope):\n \"\"\"\n Delete all files in OUTPUT_PATH that match the scope\n \"\"\"\n css_pattern = r\"{}.*?\\.css\".format(scope)\n woff_pattern = r\"{}.*?\\.woff\".format(scope)\n for name in os.listdir(OUTPUT_PATH):\n if re.match(css_pattern, name) or re.match(woff_pattern, name):\n os.unlink(os.path.join(OUTPUT_PATH, name))\n\n\n\"\"\"\nCSS helpers\n\"\"\"\n\n\nCSS_HEADER = \"\"\"\n/*\n * This is an auto-generated file, so any manual edits will be overridden.\n *\n * To regenerate, see instructions here:\n * https://kolibri-dev.readthedocs.io/en/develop/references/i18n.html\n *\n * This file was generated by build_tools/i18n/fonts.py\n */\n\"\"\"\n\n\ndef _list_to_ranges(input_list):\n \"\"\"\n Iterator of ranges of contiguous numbers from a list of integers.\n Ranges returned are [x, y) – in other words, y is non-inclusive.\n (from: http://code.activestate.com/recipes/496682/)\n \"\"\"\n new_list = list(input_list)\n new_list.sort()\n start = new_list[0]\n currentrange = [start, start + 1]\n for item in new_list[1:]:\n if currentrange[1] == item:\n currentrange[1] += 1 # contiguous\n else:\n yield tuple(currentrange) # new range start\n currentrange = [item, item + 1]\n yield tuple(currentrange) # last range\n\n\ndef _fmt_code(code):\n return \"{:x}\".format(code).upper()\n\n\ndef _fmt_range(glyphs):\n \"\"\"\n Generates a font-face-compatible 'unicode range' attribute for a given set of glyphs\n \"\"\"\n fmt_ranges = []\n for r in _list_to_ranges(sorted(glyphs)):\n if r[0] == r[1] - 1:\n fmt_ranges.append(\"U+{}\".format(_fmt_code(r[0])))\n else:\n fmt_ranges.append(\"U+{}-{}\".format(_fmt_code(r[0]), _fmt_code(r[1] - 1)))\n return \",\".join(fmt_ranges)\n\n\n\"\"\"\nFull Fonts\n\"\"\"\n\n\ndef _full_font_face(font_family, font_name, is_bold, omit_glyphs=set()):\n \"\"\"\n generate the CSS reference for a single full font\n \"\"\"\n file_path = _woff_font_path(_scoped(SCOPE_FULL, font_name), is_bold=is_bold)\n file_name = os.path.basename(file_path)\n glyphs = _font_glyphs(file_path) - omit_glyphs\n if not glyphs:\n return \"\"\n return _gen_font_face(\n font_family, file_name, is_bold=is_bold, unicodes=_fmt_range(glyphs)\n )\n\n\ndef _gen_full_css_modern(lang_info):\n \"\"\"\n Generates listing for all full fonts, segmented by unicode ranges and weights\n \"\"\"\n\n # skip previously accounted for glyphs so there is no overlap between font-faces\n previous_glyphs = set()\n\n # all available fonts\n font_faces = []\n for font_name in _font_priorities(lang_info[utils.KEY_DEFAULT_FONT]):\n font_faces.append(\n _full_font_face(\n SCOPE_FULL, font_name, is_bold=False, omit_glyphs=previous_glyphs\n )\n )\n font_faces.append(\n _full_font_face(\n SCOPE_FULL, font_name, is_bold=True, omit_glyphs=previous_glyphs\n )\n )\n\n # Assumes all four variants have the same glyphs, from the content Regular font\n previous_glyphs |= _font_glyphs(\n _woff_font_path(_scoped(SCOPE_FULL, font_name), is_bold=False)\n )\n\n output_name = os.path.join(\n OUTPUT_PATH,\n \"{}.modern.css\".format(_scoped(SCOPE_FULL, lang_info[utils.KEY_INTL_CODE])),\n )\n logging.info(\"Writing {}\".format(output_name))\n with open(output_name, \"w\") as f:\n f.write(CSS_HEADER)\n f.write(\"\".join(font_faces))\n\n\ndef _gen_full_css_basic(lang_info):\n output_name = os.path.join(\n OUTPUT_PATH,\n \"{}.basic.css\".format(_scoped(SCOPE_FULL, lang_info[utils.KEY_INTL_CODE])),\n )\n logging.info(\"Writing {}\".format(output_name))\n with open(output_name, \"w\") as f:\n f.write(CSS_HEADER)\n default_font = lang_info[utils.KEY_DEFAULT_FONT]\n f.write(_full_font_face(SCOPE_FULL, default_font, is_bold=False))\n f.write(_full_font_face(SCOPE_FULL, default_font, is_bold=True))\n\n\ndef _write_full_font(font_name, is_bold):\n font = _load_font(noto_source.get_path(font_name, is_bold=is_bold))\n output_name = _woff_font_path(_scoped(SCOPE_FULL, font_name), is_bold=is_bold)\n logging.info(\"Writing {}\".format(output_name))\n font.save(output_name)\n\n\ndef command_gen_full_fonts():\n logging.info(\"generating full fonts...\")\n\n _clean_up(SCOPE_FULL)\n\n for font_name in noto_source.FONT_MANIFEST:\n _write_full_font(font_name, is_bold=False)\n _write_full_font(font_name, is_bold=True)\n\n languages = utils.available_languages(include_in_context=True, include_english=True)\n for lang_info in languages:\n _gen_full_css_modern(lang_info)\n _gen_full_css_basic(lang_info)\n\n logging.info(\"finished generating full fonts\")\n\n\n\"\"\"\nSubset fonts\n\"\"\"\n\n\ndef _chunks(string, n=72):\n \"\"\"\n Yield successive n-sized chunks from string\n \"\"\"\n for i in range(0, len(string), n):\n yield string[i : i + n]\n\n\ndef _write_inline_font(file_object, font_path, font_family, is_bold):\n \"\"\"\n Inlines a font as base64 encoding within a CSS file\n \"\"\"\n with io.open(font_path, mode=\"rb\") as f:\n data = f.read()\n data_uri = \"data:application/x-font-woff;charset=utf-8;base64,\\\\\\n{}\".format(\n \"\\\\\\n\".join(_chunks(base64.b64encode(data).decode()))\n )\n glyphs = _font_glyphs(font_path)\n if not glyphs:\n return\n file_object.write(\n _gen_font_face(\n family=font_family,\n url=data_uri,\n is_bold=is_bold,\n unicodes=_fmt_range(glyphs),\n )\n )\n\n\ndef _generate_inline_font_css(name, font_family):\n \"\"\"\n Generate CSS and clean up inlined woff files\n \"\"\"\n\n font_path_reg = _woff_font_path(name, is_bold=False)\n font_path_bold = _woff_font_path(name, is_bold=True)\n\n output_name = os.path.join(OUTPUT_PATH, \"{}.css\".format(name))\n logging.info(\"Writing {}\".format(output_name))\n with open(output_name, \"w\") as f:\n f.write(CSS_HEADER)\n _write_inline_font(f, font_path_reg, font_family, is_bold=False)\n _write_inline_font(f, font_path_bold, font_family, is_bold=True)\n\n os.unlink(font_path_reg)\n os.unlink(font_path_bold)\n\n\ndef _get_subset_font(source_file_path, text):\n \"\"\"\n Given a source file and some text, returns a new, in-memory fontTools Font object\n that has only the glyphs specified in the set.\n\n Note that passing actual text instead of a glyph set to the subsetter allows it to\n generate appropriate ligatures and other features important for correct rendering.\n \"\"\"\n if not os.path.exists(source_file_path):\n logging.error(\"'{}' not found\".format(source_file_path))\n\n font = _load_font(source_file_path)\n subsetter = subset.Subsetter(options=FONT_TOOLS_OPTIONS)\n subsetter.populate(text=text)\n subsetter.subset(font)\n return font\n\n\ndef _get_lang_strings(locale_dir):\n \"\"\"\n Text used in a particular language\n \"\"\"\n\n strings = []\n\n for file_name in os.listdir(locale_dir):\n if not file_name.endswith(\".json\"):\n continue\n\n file_path = os.path.join(locale_dir, file_name)\n with io.open(file_path, mode=\"r\", encoding=\"utf-8\") as f:\n lang_strings = json.load(f).values()\n\n for s in lang_strings:\n s = re.sub(r\"\\W\", \" \", s) # clean whitespace\n strings.append(s)\n strings.append(s.upper())\n\n return strings\n\n\[email protected]\ndef _get_common_strings():\n \"\"\"\n Text useful for all languages: displaying the language switcher, Kolibri version\n numbers, symbols, and other un-translated text\n \"\"\"\n\n # Special characters that are used directly in untranslated template strings.\n # Search the codebase with this regex to find new ones: [^\\x00-\\x7F©–—…‘’“”•→›]\n strings = [\n chr(0x0), # null\n \"©\",\n \"–\", # en dash\n \"—\", # em dash\n \"…\",\n \"‘\",\n \"’\",\n \"“\",\n \"”\",\n \"•\",\n \"●\",\n \"→\",\n \"›\",\n ]\n\n # all the basic printable ascii characters\n strings.extend([chr(c) for c in range(32, 127)])\n\n # text from language names, both lower- and upper-case\n languages = utils.available_languages(include_in_context=True, include_english=True)\n for lang in languages:\n strings.append(lang[utils.KEY_LANG_NAME])\n strings.append(lang[utils.KEY_LANG_NAME].upper())\n strings.append(lang[utils.KEY_ENG_NAME])\n strings.append(lang[utils.KEY_ENG_NAME].upper())\n\n return strings\n\n\ndef _merge_fonts(fonts, output_file_path):\n \"\"\"\n Given a list of fontTools font objects, merge them and export to output_file_path.\n\n Implemenatation note: it would have been nice to pass the fonts directly to the\n merger, but the current fontTools implementation of Merger takes a list of file names\n \"\"\"\n tmp = tempfile.gettempdir()\n f_names = []\n for i, f in enumerate(fonts):\n tmp_font_path = os.path.join(tmp, \"{}.woff\".format(i))\n f_names.append(tmp_font_path)\n f.save(tmp_font_path)\n merger = merge.Merger(options=FONT_TOOLS_OPTIONS)\n merged_font = merger.merge(f_names)\n merged_font.save(output_file_path)\n logging.info(\"created {}\".format(output_file_path))\n\n\ndef _cannot_merge(font):\n # all fonts must have equal units per em for merging, and 1000 is most common\n return font[\"head\"].unitsPerEm != 1000\n\n\ndef _subset_and_merge_fonts(text, default_font, subset_reg_path, subset_bold_path):\n \"\"\"\n Given text, generate both a bold and a regular font that can render it.\n \"\"\"\n reg_subsets = []\n bold_subsets = []\n skipped = []\n\n # track which glyphs are left\n remaining_glyphs = set([ord(c) for c in text])\n\n for font_name in _font_priorities(default_font):\n full_reg_path = _woff_font_path(_scoped(SCOPE_FULL, font_name), is_bold=False)\n full_bold_path = _woff_font_path(_scoped(SCOPE_FULL, font_name), is_bold=True)\n reg_subset = _get_subset_font(full_reg_path, text)\n bold_subset = _get_subset_font(full_bold_path, text)\n\n if _cannot_merge(reg_subset) or _cannot_merge(bold_subset):\n skipped.append(font_name)\n continue\n\n reg_subsets.append(reg_subset)\n bold_subsets.append(bold_subset)\n\n remaining_glyphs -= _font_glyphs(full_reg_path)\n if not remaining_glyphs:\n break\n\n _merge_fonts(reg_subsets, os.path.join(OUTPUT_PATH, subset_reg_path))\n _merge_fonts(bold_subsets, os.path.join(OUTPUT_PATH, subset_bold_path))\n\n\ndef command_gen_subset_fonts():\n \"\"\"\n Creates custom fonts that attempt to contain all the glyphs and other font features\n that are used in user-facing text for the translation in each language.\n\n We make a separate subset font for common strings, which generally overaps somewhat\n with the individual language subsets. This slightly increases how much the client\n needs to download on first request, but reduces Kolibri's distribution size by a\n couple megabytes.\n \"\"\"\n logging.info(\"generating subset fonts...\")\n\n _clean_up(SCOPE_COMMON)\n _clean_up(SCOPE_SUBSET)\n\n _subset_and_merge_fonts(\n text=\" \".join(_get_common_strings()),\n default_font=NOTO_SANS_LATIN,\n subset_reg_path=_woff_font_path(SCOPE_COMMON, is_bold=False),\n subset_bold_path=_woff_font_path(SCOPE_COMMON, is_bold=True),\n )\n\n languages = utils.available_languages(include_in_context=True, include_english=True)\n for lang_info in languages:\n logging.info(\"gen subset for {}\".format(lang_info[utils.KEY_ENG_NAME]))\n strings = []\n strings.extend(_get_lang_strings(utils.local_locale_path(lang_info)))\n strings.extend(_get_lang_strings(utils.local_perseus_locale_path(lang_info)))\n\n name = lang_info[utils.KEY_INTL_CODE]\n _subset_and_merge_fonts(\n text=\" \".join(strings),\n default_font=lang_info[utils.KEY_DEFAULT_FONT],\n subset_reg_path=_woff_font_path(_scoped(SCOPE_SUBSET, name), is_bold=False),\n subset_bold_path=_woff_font_path(_scoped(SCOPE_SUBSET, name), is_bold=True),\n )\n\n # generate common subset file\n _generate_inline_font_css(name=SCOPE_COMMON, font_family=SCOPE_COMMON)\n\n # generate language-specific subset font files\n languages = utils.available_languages(include_in_context=True, include_english=True)\n for lang in languages:\n _generate_inline_font_css(\n name=_scoped(SCOPE_SUBSET, lang[utils.KEY_INTL_CODE]),\n font_family=SCOPE_SUBSET,\n )\n\n logging.info(\"subsets created\")\n\n\n\"\"\"\nAdd source fonts\n\"\"\"\n\n\ndef command_update_font_manifest(ref):\n noto_source.update_manifest(ref)\n\n\ndef command_download_source_fonts():\n noto_source.fetch_fonts()\n\n\n\"\"\"\nMain\n\"\"\"\n\n\ndef main():\n \"\"\"\n Generates files to support both 'basic' and a 'modern' browsers.\n\n Both browsers get the common and language-specific application subset fonts inline\n to load quickly and prevent a flash of unstyled text, at least for all application\n text. Full font files are linked and will load asynchronously.\n\n # Modern behavior\n\n Newer browsers have full support for the unicode-range attribute of font-face\n definitions, which allow the browser to download fonts as-needed based on the text\n observed. This allows us to make _all_ font alphabets available, and ensures that\n content will be rendered using the best font possible for all content, regardless\n of selected app language.\n\n # Basic behavior\n\n Older browsers do not fully support the unicode-range attribute, and will eagerly\n download all referenced fonts regardless of whether or not they are needed. This\n would have an unacceptable performance impact. As an alternative, we provide\n references to the full fonts for the user's currently-selected language, under the\n assumption that most of the content they use will be in that language.\n\n Content viewed in other languages using the basic variant should still usually\n display, albeit using system fonts.\n \"\"\"\n\n description = \"\\n\\nProcess fonts.\\nSyntax: [command] [branch]\\n\\n\"\n parser = argparse.ArgumentParser(description=description)\n subparsers = parser.add_subparsers(dest=\"command\")\n\n subparsers.add_parser(\n \"update-font-manifest\",\n help=\"Update manifest from https://github.com/googlei18n/noto-fonts/\",\n ).add_argument(\n \"--ref\",\n help=\"Github reference, e.g. commit or tag. Defaults to head of master.\",\n type=str,\n )\n\n subparsers.add_parser(\n \"download-source-fonts\",\n help=\"Download sources from https://github.com/googlei18n/noto-fonts/\",\n )\n\n subparsers.add_parser(\n \"generate-subset-fonts\", help=\"Generate subset fonts based on app text\"\n )\n\n subparsers.add_parser(\"generate-full-fonts\", help=\"Generate full fonts\")\n\n args = parser.parse_args()\n\n if args.command == \"update-font-manifest\":\n command_update_font_manifest(args.ref)\n elif args.command == \"download-source-fonts\":\n command_download_source_fonts()\n elif args.command == \"generate-subset-fonts\":\n command_gen_subset_fonts()\n elif args.command == \"generate-full-fonts\":\n command_gen_full_fonts()\n else:\n logging.warning(\"Unknown command\\n\")\n parser.print_help(sys.stderr)\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "packages/kolibri-tools/lib/i18n/fonts.py" } ]
diff --git a/kolibri/plugins/coach/assets/src/views/common/MasteryModel.vue b/kolibri/plugins/coach/assets/src/views/common/MasteryModel.vue index 2cd7cd7d47e..44b9381e6c1 100644 --- a/kolibri/plugins/coach/assets/src/views/common/MasteryModel.vue +++ b/kolibri/plugins/coach/assets/src/views/common/MasteryModel.vue @@ -71,7 +71,11 @@ streak: 'Get {count, number, integer} questions in a row correct', mOfN: 'Get {M, number, integer} of the last {N, number, integer} questions correct', doAll: 'Get every question correct', - unknown: 'Unknown mastery model', + unknown: { + message: 'Unknown mastery model', + context: + "Mastery model refers to the 'number of correct answers that need to be given by learners' for an exercise to be considered \"mastered\". This particular one (unknown) tries to cover for cases when the mastery is not clearly defined as 'answered X of Y questions'.", + }, }, }; diff --git a/kolibri/plugins/coach/assets/src/views/common/TruncatedItemList.vue b/kolibri/plugins/coach/assets/src/views/common/TruncatedItemList.vue index b239555dc89..5972639099b 100644 --- a/kolibri/plugins/coach/assets/src/views/common/TruncatedItemList.vue +++ b/kolibri/plugins/coach/assets/src/views/common/TruncatedItemList.vue @@ -32,7 +32,10 @@ $trs: { twoItems: '{item1}, {item2}', threeItems: '{item1}, {item2}, {item3}', - manyItems: '{item1}, {item2}, and {count, number, integer} others', + manyItems: { + message: '{item1}, {item2}, and {count, number, integer} others', + context: "'item' will be replaced by the name of the coach(es) in the list of classes.", + }, }, }; diff --git a/kolibri/plugins/coach/assets/src/views/common/status/statusStrings.js b/kolibri/plugins/coach/assets/src/views/common/status/statusStrings.js index bd9bcb7b516..b14adad4019 100644 --- a/kolibri/plugins/coach/assets/src/views/common/status/statusStrings.js +++ b/kolibri/plugins/coach/assets/src/views/common/status/statusStrings.js @@ -13,10 +13,16 @@ Strings variations below are defined based on the following construction: export const learnerProgressTranslators = { completed: createTranslator('LearnersCompleted', { label: '{count, plural, one {Completed by learner} other {Completed by learners}}', - labelShort: '{count, plural, other {Completed}}', + labelShort: { + message: '{count, plural, other {Completed}}', + context: 'Refers to learners:\n1 (learner) completed\n4 (learners) completed', + }, count: '{count, plural, other {Completed by}} {count, number, integer} {count, plural, one {learner} other {learners}}', - countShort: '{count, number, integer} {count, plural, other {completed}}', + countShort: { + message: '{count, number, integer} {count, plural, other {completed}}', + context: 'Refers to number of learners that completed a activity\n', + }, allOfMoreThanTwo: 'Completed by all {total, number, integer} {total, plural, one {learner} other {learners}}', allOfMoreThanTwoShort: 'Completed by all {total, number, integer}', diff --git a/kolibri/plugins/facility/assets/src/views/CsvInfoModal.vue b/kolibri/plugins/facility/assets/src/views/CsvInfoModal.vue index e9bf1a5f201..217acabb303 100644 --- a/kolibri/plugins/facility/assets/src/views/CsvInfoModal.vue +++ b/kolibri/plugins/facility/assets/src/views/CsvInfoModal.vue @@ -191,7 +191,11 @@ 'Refers to values in a column of the CSV (comma separated values) file used to import and export users.', }, listClassesAssignedL2: 'List of class names, separated by commas', - listClassesAssignedL3: 'If an existing class does not match by name, it will be created', + listClassesAssignedL3: { + message: 'If an existing class does not match by name, it will be created', + context: + 'Explanation that when a CSV file is used to import users and classes they are assigned to, and the CSV file contains a class name that is not already present in a facility, a new class with the name listed in the CSV file will be created. ', + }, columnNameHeader: 'Column', columnIDHeader: 'Identifier', columnInfoHeader: 'Information', diff --git a/kolibri/plugins/learn/assets/src/views/ContentUnavailablePage.vue b/kolibri/plugins/learn/assets/src/views/ContentUnavailablePage.vue index e48e5a99977..e3986089cd7 100644 --- a/kolibri/plugins/learn/assets/src/views/ContentUnavailablePage.vue +++ b/kolibri/plugins/learn/assets/src/views/ContentUnavailablePage.vue @@ -44,8 +44,8 @@ adminLink: 'As an administrator, you can import channels', learnerText: 'Please ask your coach or administrator for assistance', documentTitle: { - message: 'Resource unavailable', - context: '\nSimilar to a 404 not-found error for resources', + message: 'Content Unavailable', + context: '\nThis string should actually say "Resource unavailable"', }, }, }; diff --git a/kolibri/plugins/learn/assets/src/views/classes/ClassAssignmentsPage.vue b/kolibri/plugins/learn/assets/src/views/classes/ClassAssignmentsPage.vue index dbec5362b6c..294cd1fdf9b 100644 --- a/kolibri/plugins/learn/assets/src/views/classes/ClassAssignmentsPage.vue +++ b/kolibri/plugins/learn/assets/src/views/classes/ClassAssignmentsPage.vue @@ -66,7 +66,11 @@ }, }, $trs: { - documentTitle: 'Class assignments', + documentTitle: { + message: 'Class assignments', + context: + 'Page/tab title displayed for the Learn page when the learner is enrolled in a class. This is where the learners can see the list of lessons and quizzes coaches have opened and made available for them.', + }, }, }; diff --git a/kolibri/plugins/user/assets/src/views/AuthBase.vue b/kolibri/plugins/user/assets/src/views/AuthBase.vue index 46ba48648b7..d66d9a39ed7 100644 --- a/kolibri/plugins/user/assets/src/views/AuthBase.vue +++ b/kolibri/plugins/user/assets/src/views/AuthBase.vue @@ -254,7 +254,8 @@ whatsThis: "What's this?", restrictedAccess: { message: 'Access to Kolibri has been restricted for external devices', - context: 'Error message description', + context: + 'This warning is displayed when somebody in the same network tries to connect to Kolibri running as a standalone app on a tablet or a smartphone. It indicates that Kolibri is accessible only to the person(s) physically using that tablet or a phone, and no other devices in the network can access and use Kolibri.', }, restrictedAccessDescription: { message: diff --git a/packages/kolibri-tools/lib/i18n/SyncContext.js b/packages/kolibri-tools/lib/i18n/SyncContext.js index 12d53b38ab6..898b3c380e2 100644 --- a/packages/kolibri-tools/lib/i18n/SyncContext.js +++ b/packages/kolibri-tools/lib/i18n/SyncContext.js @@ -1,5 +1,6 @@ // Import packages const fs = require('fs'); +const os = require('os'); const path = require('path'); const glob = require('glob'); const recast = require('recast'); @@ -17,15 +18,23 @@ const CONTEXT_LINE = require('./ExtractStrings').CONTEXT_LINE; const reScriptOpen = /^[ ]*<script[^>]*>/; const reScriptClose = /^[ ]*<\/script>/; +let CROWDIN_PROJECT = get(os.env, 'CROWDIN_PROJECT', null); + +if (!CROWDIN_PROJECT) { + logging.info( + 'No env var set for CROWDIN_PROJECT. Will default to `kolibri`. If you are working with Kolibri Studio, please set this environment variable to `contentcuration` - the name of the root folder for the Django app.' + ); + CROWDIN_PROJECT = 'kolibri'; +} + // Glob path patterns // All JS files not in node_modules -const JS_GLOB = path.resolve('./kolibri') + '/**/*.js'; +const JS_GLOB = path.resolve(CROWDIN_PROJECT) + '/**/*.js'; // All Vue files not in node_modules -const VUE_GLOB = path.resolve('./kolibri') + '/**/*.vue'; -// We only need one set of languages - since we have the ACH -// which is a Crowdin placeholder language, we'll go there to -// get the Context. -const CSV_PATH = path.resolve('./kolibri/locale/CSV_FILES/ach/'); +const VUE_GLOB = path.resolve(CROWDIN_PROJECT) + '/**/*.vue'; +// We must select a language which will be fully translated - so we use fr-fr. +// Fully translated langauges are the only ones with full context in the CSV +const CSV_PATH = path.resolve(`./${CROWDIN_PROJECT}/locale/CSV_FILES/fr/`); // -------------------- // // Processing Functions // diff --git a/packages/kolibri-tools/lib/i18n/fonts.py b/packages/kolibri-tools/lib/i18n/fonts.py index 17ef640865d..3e4960d24e0 100644 --- a/packages/kolibri-tools/lib/i18n/fonts.py +++ b/packages/kolibri-tools/lib/i18n/fonts.py @@ -35,6 +35,8 @@ os.path.dirname(__file__), os.pardir, os.pardir, + os.pardir, + os.pardir, "kolibri", "core", "static", diff --git a/packages/kolibri-tools/lib/i18n/intl_code_gen.js b/packages/kolibri-tools/lib/i18n/intl_code_gen.js index e21c311f937..d2a4bb2fd8f 100644 --- a/packages/kolibri-tools/lib/i18n/intl_code_gen.js +++ b/packages/kolibri-tools/lib/i18n/intl_code_gen.js @@ -2,7 +2,7 @@ const path = require('path'); const fs = require('fs'); const { lint } = require('kolibri-tools/lib/lint'); -const languageInfo = require('../../kolibri/locale/language_info.json'); +const languageInfo = require('./language_info.json'); const commonHeader = ` /* @@ -41,11 +41,7 @@ const vueIntlFooter = ` const vueIntlModule = commonHeader + vueIntlHeader + languageInfo.map(generateVueIntlItems).join('') + vueIntlFooter; -const vueIntlModulePath = path.resolve( - __dirname, - '../../kolibri/core/assets/src/utils/vue-intl-locale-data.js' -); - +const vueIntlModulePath = path.resolve(__dirname, 'vue-intl-locale-data.js'); const intlHeader = `module.exports = function(locale) { switch (locale) {`; @@ -123,11 +119,7 @@ const intlFooter = ` const intlModule = commonHeader + intlHeader + languageInfo.map(generateIntlItems).join('') + intlFooter; -const intlModulePath = path.resolve( - __dirname, - '../../kolibri/core/assets/src/utils/intl-locale-data.js' -); - +const intlModulePath = path.resolve(__dirname, 'intl-locale-data.js'); fs.writeFileSync(vueIntlModulePath, vueIntlModule, { encoding: 'utf-8' }); fs.writeFileSync(intlModulePath, intlModule, { encoding: 'utf-8' }); diff --git a/vue-intl-locale-data.js b/vue-intl-locale-data.js new file mode 100644 index 00000000000..8f212e22cc5 --- /dev/null +++ b/vue-intl-locale-data.js @@ -0,0 +1,37 @@ +/* + * This is an auto-generated file, any manual edits will be overridden. + * + * To regenerate, see instructions here: + * https://kolibri-dev.readthedocs.io/en/develop/references/i18n.html + * + * This file was generated by frontend_build/src/intl_code_gen.js + */ +module.exports = function() { + const data = []; + data.push(require('vue-intl/locale-data/ar.js')); + data.push(require('vue-intl/locale-data/bg.js')); + data.push(require('vue-intl/locale-data/bn.js')); + data.push(require('vue-intl/locale-data/de.js')); + data.push(require('vue-intl/locale-data/en.js')); + data.push(require('vue-intl/locale-data/es.js')); + data.push(require('vue-intl/locale-data/es.js')); + data.push(require('vue-intl/locale-data/fa.js')); + data.push(require('vue-intl/locale-data/fr.js')); + data.push(require('vue-intl/locale-data/ff.js')); + data.push(require('vue-intl/locale-data/gu.js')); + data.push(require('vue-intl/locale-data/hi.js')); + data.push(require('vue-intl/locale-data/it.js')); + data.push(require('vue-intl/locale-data/km.js')); + data.push(require('vue-intl/locale-data/ko.js')); + data.push(require('vue-intl/locale-data/mr.js')); + data.push(require('vue-intl/locale-data/my.js')); + data.push(require('vue-intl/locale-data/nyn.js')); + data.push(require('vue-intl/locale-data/pt.js')); + data.push(require('vue-intl/locale-data/sw.js')); + data.push(require('vue-intl/locale-data/te.js')); + data.push(require('vue-intl/locale-data/ur.js')); + data.push(require('vue-intl/locale-data/vi.js')); + data.push(require('vue-intl/locale-data/yo.js')); + data.push(require('vue-intl/locale-data/zh.js')); + return data; +};
twisted__twisted-11966
twisted.internet.cfreactor not importable on Python 3.8 **Describe the incorrect behavior you saw** fails with: ``` src/twisted/internet/cfreactor.py:474:24: error: X | Y syntax for unions requires Python 3.10 [syntax] _currentSimulator: object | None = None ``` ``` >>> import twisted.internet.cfreactor Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/graingert/projects/twisted/src/twisted/internet/cfreactor.py", line 92, in <module> class CFReactor(PosixReactorBase): File "/home/graingert/projects/twisted/src/twisted/internet/cfreactor.py", line 474, in CFReactor _currentSimulator: object | None = None TypeError: unsupported operand type(s) for |: 'type' and 'NoneType' ``` **Describe how to cause this behavior** import it on Python 3.8 Preferable a [Short, Self Contained, Correct (Compilable), Example](http://www.sscce.org/) on a branch or on [a gist](https://gist.github.com). Automated tests that are demonstrating the failure would be awesome. **Describe the correct behavior you'd like to see** no import error **Testing environment** N/A
[ { "content": "# -*- test-case-name: twisted.internet.test.test_core -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nA reactor for integrating with U{CFRunLoop<http://bit.ly/cfrunloop>}, the\nCoreFoundation main loop used by macOS.\n\nThis is useful for integrating Twisted with U{PyObjC<http://pyobjc.sf.net/>}\napplications.\n\"\"\"\n\n__all__ = [\"install\", \"CFReactor\"]\n\nimport sys\n\nfrom zope.interface import implementer\n\nfrom CFNetwork import ( # type: ignore[import]\n CFSocketCreateRunLoopSource,\n CFSocketCreateWithNative,\n CFSocketDisableCallBacks,\n CFSocketEnableCallBacks,\n CFSocketInvalidate,\n CFSocketSetSocketFlags,\n kCFSocketAutomaticallyReenableReadCallBack,\n kCFSocketAutomaticallyReenableWriteCallBack,\n kCFSocketConnectCallBack,\n kCFSocketReadCallBack,\n kCFSocketWriteCallBack,\n)\nfrom CoreFoundation import ( # type: ignore[import]\n CFAbsoluteTimeGetCurrent,\n CFRunLoopAddSource,\n CFRunLoopAddTimer,\n CFRunLoopGetCurrent,\n CFRunLoopRemoveSource,\n CFRunLoopRun,\n CFRunLoopStop,\n CFRunLoopTimerCreate,\n CFRunLoopTimerInvalidate,\n kCFAllocatorDefault,\n kCFRunLoopCommonModes,\n)\n\nfrom twisted.internet.interfaces import IReactorFDSet\nfrom twisted.internet.posixbase import _NO_FILEDESC, PosixReactorBase\nfrom twisted.python import log\n\n# We know that we're going to run on macOS so we can just pick the\n# POSIX-appropriate waker. This also avoids having a dynamic base class and\n# so lets more things get type checked.\nfrom ._signals import _UnixWaker\n\n_READ = 0\n_WRITE = 1\n_preserveSOError = 1 << 6\n\n\nclass _WakerPlus(_UnixWaker):\n \"\"\"\n The normal Twisted waker will simply wake up the main loop, which causes an\n iteration to run, which in turn causes L{ReactorBase.runUntilCurrent}\n to get invoked.\n\n L{CFReactor} has a slightly different model of iteration, though: rather\n than have each iteration process the thread queue, then timed calls, then\n file descriptors, each callback is run as it is dispatched by the CFRunLoop\n observer which triggered it.\n\n So this waker needs to not only unblock the loop, but also make sure the\n work gets done; so, it reschedules the invocation of C{runUntilCurrent} to\n be immediate (0 seconds from now) even if there is no timed call work to\n do.\n \"\"\"\n\n def __init__(self, reactor):\n super().__init__()\n self.reactor = reactor\n\n def doRead(self):\n \"\"\"\n Wake up the loop and force C{runUntilCurrent} to run immediately in the\n next timed iteration.\n \"\"\"\n result = super().doRead()\n self.reactor._scheduleSimulate(True)\n return result\n\n\n@implementer(IReactorFDSet)\nclass CFReactor(PosixReactorBase):\n \"\"\"\n The CoreFoundation reactor.\n\n You probably want to use this via the L{install} API.\n\n @ivar _fdmap: a dictionary, mapping an integer (a file descriptor) to a\n 4-tuple of:\n\n - source: a C{CFRunLoopSource}; the source associated with this\n socket.\n - socket: a C{CFSocket} wrapping the file descriptor.\n - descriptor: an L{IReadDescriptor} and/or L{IWriteDescriptor}\n provider.\n - read-write: a 2-C{list} of booleans: respectively, whether this\n descriptor is currently registered for reading or registered for\n writing.\n\n @ivar _idmap: a dictionary, mapping the id() of an L{IReadDescriptor} or\n L{IWriteDescriptor} to a C{fd} in L{_fdmap}. Implemented in this\n manner so that we don't have to rely (even more) on the hashability of\n L{IReadDescriptor} providers, and we know that they won't be collected\n since these are kept in sync with C{_fdmap}. Necessary because the\n .fileno() of a file descriptor may change at will, so we need to be\n able to look up what its file descriptor I{used} to be, so that we can\n look it up in C{_fdmap}\n\n @ivar _cfrunloop: the C{CFRunLoop} pyobjc object wrapped\n by this reactor.\n\n @ivar _inCFLoop: Is C{CFRunLoopRun} currently running?\n\n @type _inCFLoop: L{bool}\n\n @ivar _currentSimulator: if a CFTimer is currently scheduled with the CF\n run loop to run Twisted callLater calls, this is a reference to it.\n Otherwise, it is L{None}\n \"\"\"\n\n def __init__(self, runLoop=None, runner=None):\n self._fdmap = {}\n self._idmap = {}\n if runner is None:\n runner = CFRunLoopRun\n self._runner = runner\n\n if runLoop is None:\n runLoop = CFRunLoopGetCurrent()\n self._cfrunloop = runLoop\n PosixReactorBase.__init__(self)\n\n def _wakerFactory(self) -> _WakerPlus:\n return _WakerPlus(self)\n\n def _socketCallback(\n self, cfSocket, callbackType, ignoredAddress, ignoredData, context\n ):\n \"\"\"\n The socket callback issued by CFRunLoop. This will issue C{doRead} or\n C{doWrite} calls to the L{IReadDescriptor} and L{IWriteDescriptor}\n registered with the file descriptor that we are being notified of.\n\n @param cfSocket: The C{CFSocket} which has got some activity.\n\n @param callbackType: The type of activity that we are being notified\n of. Either C{kCFSocketReadCallBack} or C{kCFSocketWriteCallBack}.\n\n @param ignoredAddress: Unused, because this is not used for either of\n the callback types we register for.\n\n @param ignoredData: Unused, because this is not used for either of the\n callback types we register for.\n\n @param context: The data associated with this callback by\n C{CFSocketCreateWithNative} (in C{CFReactor._watchFD}). A 2-tuple\n of C{(int, CFRunLoopSource)}.\n \"\"\"\n (fd, smugglesrc) = context\n if fd not in self._fdmap:\n # Spurious notifications seem to be generated sometimes if you\n # CFSocketDisableCallBacks in the middle of an event. I don't know\n # about this FD, any more, so let's get rid of it.\n CFRunLoopRemoveSource(self._cfrunloop, smugglesrc, kCFRunLoopCommonModes)\n return\n\n src, skt, readWriteDescriptor, rw = self._fdmap[fd]\n\n def _drdw():\n why = None\n isRead = False\n\n try:\n if readWriteDescriptor.fileno() == -1:\n why = _NO_FILEDESC\n else:\n isRead = callbackType == kCFSocketReadCallBack\n # CFSocket seems to deliver duplicate read/write\n # notifications sometimes, especially a duplicate\n # writability notification when first registering the\n # socket. This bears further investigation, since I may\n # have been mis-interpreting the behavior I was seeing.\n # (Running the full Twisted test suite, while thorough, is\n # not always entirely clear.) Until this has been more\n # thoroughly investigated , we consult our own\n # reading/writing state flags to determine whether we\n # should actually attempt a doRead/doWrite first. -glyph\n if isRead:\n if rw[_READ]:\n why = readWriteDescriptor.doRead()\n else:\n if rw[_WRITE]:\n why = readWriteDescriptor.doWrite()\n except BaseException:\n why = sys.exc_info()[1]\n log.err()\n if why:\n self._disconnectSelectable(readWriteDescriptor, why, isRead)\n\n log.callWithLogger(readWriteDescriptor, _drdw)\n\n def _watchFD(self, fd, descr, flag):\n \"\"\"\n Register a file descriptor with the C{CFRunLoop}, or modify its state\n so that it's listening for both notifications (read and write) rather\n than just one; used to implement C{addReader} and C{addWriter}.\n\n @param fd: The file descriptor.\n\n @type fd: L{int}\n\n @param descr: the L{IReadDescriptor} or L{IWriteDescriptor}\n\n @param flag: the flag to register for callbacks on, either\n C{kCFSocketReadCallBack} or C{kCFSocketWriteCallBack}\n \"\"\"\n if fd == -1:\n raise RuntimeError(\"Invalid file descriptor.\")\n if fd in self._fdmap:\n src, cfs, gotdescr, rw = self._fdmap[fd]\n # do I need to verify that it's the same descr?\n else:\n ctx = []\n ctx.append(fd)\n cfs = CFSocketCreateWithNative(\n kCFAllocatorDefault,\n fd,\n kCFSocketReadCallBack\n | kCFSocketWriteCallBack\n | kCFSocketConnectCallBack,\n self._socketCallback,\n ctx,\n )\n CFSocketSetSocketFlags(\n cfs,\n kCFSocketAutomaticallyReenableReadCallBack\n | kCFSocketAutomaticallyReenableWriteCallBack\n |\n # This extra flag is to ensure that CF doesn't (destructively,\n # because destructively is the only way to do it) retrieve\n # SO_ERROR and thereby break twisted.internet.tcp.BaseClient,\n # which needs SO_ERROR to tell it whether or not it needs to\n # call connect_ex a second time.\n _preserveSOError,\n )\n src = CFSocketCreateRunLoopSource(kCFAllocatorDefault, cfs, 0)\n ctx.append(src)\n CFRunLoopAddSource(self._cfrunloop, src, kCFRunLoopCommonModes)\n CFSocketDisableCallBacks(\n cfs,\n kCFSocketReadCallBack\n | kCFSocketWriteCallBack\n | kCFSocketConnectCallBack,\n )\n rw = [False, False]\n self._idmap[id(descr)] = fd\n self._fdmap[fd] = src, cfs, descr, rw\n rw[self._flag2idx(flag)] = True\n CFSocketEnableCallBacks(cfs, flag)\n\n def _flag2idx(self, flag):\n \"\"\"\n Convert a C{kCFSocket...} constant to an index into the read/write\n state list (C{_READ} or C{_WRITE}) (the 4th element of the value of\n C{self._fdmap}).\n\n @param flag: C{kCFSocketReadCallBack} or C{kCFSocketWriteCallBack}\n\n @return: C{_READ} or C{_WRITE}\n \"\"\"\n return {kCFSocketReadCallBack: _READ, kCFSocketWriteCallBack: _WRITE}[flag]\n\n def _unwatchFD(self, fd, descr, flag):\n \"\"\"\n Unregister a file descriptor with the C{CFRunLoop}, or modify its state\n so that it's listening for only one notification (read or write) as\n opposed to both; used to implement C{removeReader} and C{removeWriter}.\n\n @param fd: a file descriptor\n\n @type fd: C{int}\n\n @param descr: an L{IReadDescriptor} or L{IWriteDescriptor}\n\n @param flag: C{kCFSocketWriteCallBack} C{kCFSocketReadCallBack}\n \"\"\"\n if id(descr) not in self._idmap:\n return\n if fd == -1:\n # need to deal with it in this case, I think.\n realfd = self._idmap[id(descr)]\n else:\n realfd = fd\n src, cfs, descr, rw = self._fdmap[realfd]\n CFSocketDisableCallBacks(cfs, flag)\n rw[self._flag2idx(flag)] = False\n if not rw[_READ] and not rw[_WRITE]:\n del self._idmap[id(descr)]\n del self._fdmap[realfd]\n CFRunLoopRemoveSource(self._cfrunloop, src, kCFRunLoopCommonModes)\n CFSocketInvalidate(cfs)\n\n def addReader(self, reader):\n \"\"\"\n Implement L{IReactorFDSet.addReader}.\n \"\"\"\n self._watchFD(reader.fileno(), reader, kCFSocketReadCallBack)\n\n def addWriter(self, writer):\n \"\"\"\n Implement L{IReactorFDSet.addWriter}.\n \"\"\"\n self._watchFD(writer.fileno(), writer, kCFSocketWriteCallBack)\n\n def removeReader(self, reader):\n \"\"\"\n Implement L{IReactorFDSet.removeReader}.\n \"\"\"\n self._unwatchFD(reader.fileno(), reader, kCFSocketReadCallBack)\n\n def removeWriter(self, writer):\n \"\"\"\n Implement L{IReactorFDSet.removeWriter}.\n \"\"\"\n self._unwatchFD(writer.fileno(), writer, kCFSocketWriteCallBack)\n\n def removeAll(self):\n \"\"\"\n Implement L{IReactorFDSet.removeAll}.\n \"\"\"\n allDesc = {descr for src, cfs, descr, rw in self._fdmap.values()}\n allDesc -= set(self._internalReaders)\n for desc in allDesc:\n self.removeReader(desc)\n self.removeWriter(desc)\n return list(allDesc)\n\n def getReaders(self):\n \"\"\"\n Implement L{IReactorFDSet.getReaders}.\n \"\"\"\n return [descr for src, cfs, descr, rw in self._fdmap.values() if rw[_READ]]\n\n def getWriters(self):\n \"\"\"\n Implement L{IReactorFDSet.getWriters}.\n \"\"\"\n return [descr for src, cfs, descr, rw in self._fdmap.values() if rw[_WRITE]]\n\n def _moveCallLaterSooner(self, tple):\n \"\"\"\n Override L{PosixReactorBase}'s implementation of L{IDelayedCall.reset}\n so that it will immediately reschedule. Normally\n C{_moveCallLaterSooner} depends on the fact that C{runUntilCurrent} is\n always run before the mainloop goes back to sleep, so this forces it to\n immediately recompute how long the loop needs to stay asleep.\n \"\"\"\n result = PosixReactorBase._moveCallLaterSooner(self, tple)\n self._scheduleSimulate()\n return result\n\n def startRunning(self, installSignalHandlers: bool = True) -> None:\n \"\"\"\n Start running the reactor, then kick off the timer that advances\n Twisted's clock to keep pace with CFRunLoop's.\n \"\"\"\n super().startRunning(installSignalHandlers)\n\n # Before 'startRunning' is called, the reactor is not attached to the\n # CFRunLoop[1]; specifically, the CFTimer that runs all of Twisted's\n # timers is not active and will not have been added to the loop by any\n # application code. Now that _running is probably[2] True, we need to\n # ensure that timed calls will actually run on the main loop. This\n # call needs to be here, rather than at the top of mainLoop, because\n # it's possible to use startRunning to *attach* a reactor to an\n # already-running CFRunLoop, i.e. within a plugin for an application\n # that doesn't otherwise use Twisted, rather than calling it via run().\n self._scheduleSimulate(force=True)\n\n # [1]: readers & writers are still active in the loop, but arguably\n # they should not be.\n\n # [2]: application code within a 'startup' system event trigger *may*\n # have already crashed the reactor and thus set _started to False,\n # but that specific case is handled by mainLoop, since that case\n # is inherently irrelevant in an attach-to-application case and is\n # only necessary to handle mainLoop spuriously blocking.\n\n _inCFLoop = False\n\n def mainLoop(self) -> None:\n \"\"\"\n Run the runner (C{CFRunLoopRun} or something that calls it), which runs\n the run loop until C{crash()} is called.\n \"\"\"\n if not self._started:\n # If we arrive here, we were crashed by application code in a\n # 'startup' system event trigger, (or crashed manually before the\n # application calls 'mainLoop' directly for whatever reason; sigh,\n # this method should not be public). However, application code\n # doing obscure things will expect an invocation of this loop to\n # have at least *one* pass over ready readers, writers, and delayed\n # calls. iterate(), in particular, is emulated in exactly this way\n # in this reactor implementation. In order to ensure that we enter\n # the real implementation of the mainloop and do all of those\n # things, we need to set _started back to True so that callLater\n # actually schedules itself against the CFRunLoop, but immediately\n # crash once we are in the context of the loop where we've run\n # ready I/O and timers.\n\n def docrash() -> None:\n self.crash()\n\n self._started = True\n self.callLater(0, docrash)\n already = False\n try:\n while self._started:\n if already:\n # Sometimes CFRunLoopRun (or its equivalents) may exit\n # without CFRunLoopStop being called.\n\n # This is really only *supposed* to happen when it runs out\n # of sources & timers to process. However, in full Twisted\n # test-suite runs we have observed, extremely rarely (once\n # in every 3000 tests or so) CFRunLoopRun exiting in cases\n # where it seems as though there *is* still some work to\n # do. However, given the difficulty of reproducing the\n # race conditions necessary to make this happen, it's\n # possible that we have missed some nuance of when\n # CFRunLoop considers the list of work \"empty\" and various\n # callbacks and timers to be \"invalidated\". Therefore we\n # are not fully confident that this is a platform bug, but\n # it is nevertheless unexpected behavior from our reading\n # of the documentation.\n\n # To accommodate this rare and slightly ambiguous stress\n # case, we make extra sure that our scheduled timer is\n # re-created on the loop as a CFRunLoopTimer, which\n # reliably gives the loop some work to do and 'fixes' it if\n # it exited due to having no active sources or timers.\n self._scheduleSimulate()\n\n # At this point, there may be a little more code that we\n # would need to put here for full correctness for a very\n # peculiar type of application: if you're writing a\n # command-line tool using CFReactor, adding *nothing* to\n # the reactor itself, disabling even the internal Waker\n # file descriptors, then there's a possibility that\n # CFRunLoopRun will exit early, and if we have no timers,\n # we might busy-loop here. Because we cannot seem to force\n # this to happen under normal circumstances, we're leaving\n # that code out.\n\n already = True\n self._inCFLoop = True\n try:\n self._runner()\n finally:\n self._inCFLoop = False\n finally:\n self._stopSimulating()\n\n _currentSimulator: object | None = None\n\n def _stopSimulating(self) -> None:\n \"\"\"\n If we have a CFRunLoopTimer registered with the CFRunLoop, invalidate\n it and set it to None.\n \"\"\"\n if self._currentSimulator is None:\n return\n CFRunLoopTimerInvalidate(self._currentSimulator)\n self._currentSimulator = None\n\n def _scheduleSimulate(self, force: bool = False) -> None:\n \"\"\"\n Schedule a call to C{self.runUntilCurrent}. This will cancel the\n currently scheduled call if it is already scheduled.\n\n @param force: Even if there are no timed calls, make sure that\n C{runUntilCurrent} runs immediately (in a 0-seconds-from-now\n C{CFRunLoopTimer}). This is necessary for calls which need to\n trigger behavior of C{runUntilCurrent} other than running timed\n calls, such as draining the thread call queue or calling C{crash()}\n when the appropriate flags are set.\n\n @type force: C{bool}\n \"\"\"\n self._stopSimulating()\n if not self._started:\n # If the reactor is not running (e.g. we are scheduling callLater\n # calls before starting the reactor) we should not be scheduling\n # CFRunLoopTimers against the global CFRunLoop.\n return\n\n timeout = 0.0 if force else self.timeout()\n if timeout is None:\n return\n\n fireDate = CFAbsoluteTimeGetCurrent() + timeout\n\n def simulate(cftimer, extra):\n self._currentSimulator = None\n self.runUntilCurrent()\n self._scheduleSimulate()\n\n c = self._currentSimulator = CFRunLoopTimerCreate(\n kCFAllocatorDefault, fireDate, 0, 0, 0, simulate, None\n )\n CFRunLoopAddTimer(self._cfrunloop, c, kCFRunLoopCommonModes)\n\n def callLater(self, _seconds, _f, *args, **kw):\n \"\"\"\n Implement L{IReactorTime.callLater}.\n \"\"\"\n delayedCall = PosixReactorBase.callLater(self, _seconds, _f, *args, **kw)\n self._scheduleSimulate()\n return delayedCall\n\n def stop(self):\n \"\"\"\n Implement L{IReactorCore.stop}.\n \"\"\"\n PosixReactorBase.stop(self)\n self._scheduleSimulate(True)\n\n def crash(self):\n \"\"\"\n Implement L{IReactorCore.crash}\n \"\"\"\n PosixReactorBase.crash(self)\n if not self._inCFLoop:\n return\n CFRunLoopStop(self._cfrunloop)\n\n def iterate(self, delay=0):\n \"\"\"\n Emulate the behavior of C{iterate()} for things that want to call it,\n by letting the loop run for a little while and then scheduling a timed\n call to exit it.\n \"\"\"\n self._started = True\n # Since the CoreFoundation loop doesn't have the concept of \"iterate\"\n # we can't ask it to do this. Instead we will make arrangements to\n # crash it *very* soon and then make it run. This is a rough\n # approximation of \"an iteration\". Using crash and mainLoop here\n # means that it's safe (as safe as anything using \"iterate\" can be) to\n # do this repeatedly.\n self.callLater(0, self.crash)\n self.mainLoop()\n\n\ndef install(runLoop=None, runner=None):\n \"\"\"\n Configure the twisted mainloop to be run inside CFRunLoop.\n\n @param runLoop: the run loop to use.\n\n @param runner: the function to call in order to actually invoke the main\n loop. This will default to C{CFRunLoopRun} if not specified. However,\n this is not an appropriate choice for GUI applications, as you need to\n run NSApplicationMain (or something like it). For example, to run the\n Twisted mainloop in a PyObjC application, your C{main.py} should look\n something like this::\n\n from PyObjCTools import AppHelper\n from twisted.internet.cfreactor import install\n install(runner=AppHelper.runEventLoop)\n # initialize your application\n reactor.run()\n\n @return: The installed reactor.\n\n @rtype: C{CFReactor}\n \"\"\"\n\n reactor = CFReactor(runLoop=runLoop, runner=runner)\n from twisted.internet.main import installReactor\n\n installReactor(reactor)\n return reactor\n", "path": "src/twisted/internet/cfreactor.py" } ]
[ { "content": "# -*- test-case-name: twisted.internet.test.test_core -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nA reactor for integrating with U{CFRunLoop<http://bit.ly/cfrunloop>}, the\nCoreFoundation main loop used by macOS.\n\nThis is useful for integrating Twisted with U{PyObjC<http://pyobjc.sf.net/>}\napplications.\n\"\"\"\nfrom __future__ import annotations\n\n__all__ = [\"install\", \"CFReactor\"]\n\nimport sys\n\nfrom zope.interface import implementer\n\nfrom CFNetwork import ( # type: ignore[import]\n CFSocketCreateRunLoopSource,\n CFSocketCreateWithNative,\n CFSocketDisableCallBacks,\n CFSocketEnableCallBacks,\n CFSocketInvalidate,\n CFSocketSetSocketFlags,\n kCFSocketAutomaticallyReenableReadCallBack,\n kCFSocketAutomaticallyReenableWriteCallBack,\n kCFSocketConnectCallBack,\n kCFSocketReadCallBack,\n kCFSocketWriteCallBack,\n)\nfrom CoreFoundation import ( # type: ignore[import]\n CFAbsoluteTimeGetCurrent,\n CFRunLoopAddSource,\n CFRunLoopAddTimer,\n CFRunLoopGetCurrent,\n CFRunLoopRemoveSource,\n CFRunLoopRun,\n CFRunLoopStop,\n CFRunLoopTimerCreate,\n CFRunLoopTimerInvalidate,\n kCFAllocatorDefault,\n kCFRunLoopCommonModes,\n)\n\nfrom twisted.internet.interfaces import IReactorFDSet\nfrom twisted.internet.posixbase import _NO_FILEDESC, PosixReactorBase\nfrom twisted.python import log\n\n# We know that we're going to run on macOS so we can just pick the\n# POSIX-appropriate waker. This also avoids having a dynamic base class and\n# so lets more things get type checked.\nfrom ._signals import _UnixWaker\n\n_READ = 0\n_WRITE = 1\n_preserveSOError = 1 << 6\n\n\nclass _WakerPlus(_UnixWaker):\n \"\"\"\n The normal Twisted waker will simply wake up the main loop, which causes an\n iteration to run, which in turn causes L{ReactorBase.runUntilCurrent}\n to get invoked.\n\n L{CFReactor} has a slightly different model of iteration, though: rather\n than have each iteration process the thread queue, then timed calls, then\n file descriptors, each callback is run as it is dispatched by the CFRunLoop\n observer which triggered it.\n\n So this waker needs to not only unblock the loop, but also make sure the\n work gets done; so, it reschedules the invocation of C{runUntilCurrent} to\n be immediate (0 seconds from now) even if there is no timed call work to\n do.\n \"\"\"\n\n def __init__(self, reactor):\n super().__init__()\n self.reactor = reactor\n\n def doRead(self):\n \"\"\"\n Wake up the loop and force C{runUntilCurrent} to run immediately in the\n next timed iteration.\n \"\"\"\n result = super().doRead()\n self.reactor._scheduleSimulate(True)\n return result\n\n\n@implementer(IReactorFDSet)\nclass CFReactor(PosixReactorBase):\n \"\"\"\n The CoreFoundation reactor.\n\n You probably want to use this via the L{install} API.\n\n @ivar _fdmap: a dictionary, mapping an integer (a file descriptor) to a\n 4-tuple of:\n\n - source: a C{CFRunLoopSource}; the source associated with this\n socket.\n - socket: a C{CFSocket} wrapping the file descriptor.\n - descriptor: an L{IReadDescriptor} and/or L{IWriteDescriptor}\n provider.\n - read-write: a 2-C{list} of booleans: respectively, whether this\n descriptor is currently registered for reading or registered for\n writing.\n\n @ivar _idmap: a dictionary, mapping the id() of an L{IReadDescriptor} or\n L{IWriteDescriptor} to a C{fd} in L{_fdmap}. Implemented in this\n manner so that we don't have to rely (even more) on the hashability of\n L{IReadDescriptor} providers, and we know that they won't be collected\n since these are kept in sync with C{_fdmap}. Necessary because the\n .fileno() of a file descriptor may change at will, so we need to be\n able to look up what its file descriptor I{used} to be, so that we can\n look it up in C{_fdmap}\n\n @ivar _cfrunloop: the C{CFRunLoop} pyobjc object wrapped\n by this reactor.\n\n @ivar _inCFLoop: Is C{CFRunLoopRun} currently running?\n\n @type _inCFLoop: L{bool}\n\n @ivar _currentSimulator: if a CFTimer is currently scheduled with the CF\n run loop to run Twisted callLater calls, this is a reference to it.\n Otherwise, it is L{None}\n \"\"\"\n\n def __init__(self, runLoop=None, runner=None):\n self._fdmap = {}\n self._idmap = {}\n if runner is None:\n runner = CFRunLoopRun\n self._runner = runner\n\n if runLoop is None:\n runLoop = CFRunLoopGetCurrent()\n self._cfrunloop = runLoop\n PosixReactorBase.__init__(self)\n\n def _wakerFactory(self) -> _WakerPlus:\n return _WakerPlus(self)\n\n def _socketCallback(\n self, cfSocket, callbackType, ignoredAddress, ignoredData, context\n ):\n \"\"\"\n The socket callback issued by CFRunLoop. This will issue C{doRead} or\n C{doWrite} calls to the L{IReadDescriptor} and L{IWriteDescriptor}\n registered with the file descriptor that we are being notified of.\n\n @param cfSocket: The C{CFSocket} which has got some activity.\n\n @param callbackType: The type of activity that we are being notified\n of. Either C{kCFSocketReadCallBack} or C{kCFSocketWriteCallBack}.\n\n @param ignoredAddress: Unused, because this is not used for either of\n the callback types we register for.\n\n @param ignoredData: Unused, because this is not used for either of the\n callback types we register for.\n\n @param context: The data associated with this callback by\n C{CFSocketCreateWithNative} (in C{CFReactor._watchFD}). A 2-tuple\n of C{(int, CFRunLoopSource)}.\n \"\"\"\n (fd, smugglesrc) = context\n if fd not in self._fdmap:\n # Spurious notifications seem to be generated sometimes if you\n # CFSocketDisableCallBacks in the middle of an event. I don't know\n # about this FD, any more, so let's get rid of it.\n CFRunLoopRemoveSource(self._cfrunloop, smugglesrc, kCFRunLoopCommonModes)\n return\n\n src, skt, readWriteDescriptor, rw = self._fdmap[fd]\n\n def _drdw():\n why = None\n isRead = False\n\n try:\n if readWriteDescriptor.fileno() == -1:\n why = _NO_FILEDESC\n else:\n isRead = callbackType == kCFSocketReadCallBack\n # CFSocket seems to deliver duplicate read/write\n # notifications sometimes, especially a duplicate\n # writability notification when first registering the\n # socket. This bears further investigation, since I may\n # have been mis-interpreting the behavior I was seeing.\n # (Running the full Twisted test suite, while thorough, is\n # not always entirely clear.) Until this has been more\n # thoroughly investigated , we consult our own\n # reading/writing state flags to determine whether we\n # should actually attempt a doRead/doWrite first. -glyph\n if isRead:\n if rw[_READ]:\n why = readWriteDescriptor.doRead()\n else:\n if rw[_WRITE]:\n why = readWriteDescriptor.doWrite()\n except BaseException:\n why = sys.exc_info()[1]\n log.err()\n if why:\n self._disconnectSelectable(readWriteDescriptor, why, isRead)\n\n log.callWithLogger(readWriteDescriptor, _drdw)\n\n def _watchFD(self, fd, descr, flag):\n \"\"\"\n Register a file descriptor with the C{CFRunLoop}, or modify its state\n so that it's listening for both notifications (read and write) rather\n than just one; used to implement C{addReader} and C{addWriter}.\n\n @param fd: The file descriptor.\n\n @type fd: L{int}\n\n @param descr: the L{IReadDescriptor} or L{IWriteDescriptor}\n\n @param flag: the flag to register for callbacks on, either\n C{kCFSocketReadCallBack} or C{kCFSocketWriteCallBack}\n \"\"\"\n if fd == -1:\n raise RuntimeError(\"Invalid file descriptor.\")\n if fd in self._fdmap:\n src, cfs, gotdescr, rw = self._fdmap[fd]\n # do I need to verify that it's the same descr?\n else:\n ctx = []\n ctx.append(fd)\n cfs = CFSocketCreateWithNative(\n kCFAllocatorDefault,\n fd,\n kCFSocketReadCallBack\n | kCFSocketWriteCallBack\n | kCFSocketConnectCallBack,\n self._socketCallback,\n ctx,\n )\n CFSocketSetSocketFlags(\n cfs,\n kCFSocketAutomaticallyReenableReadCallBack\n | kCFSocketAutomaticallyReenableWriteCallBack\n |\n # This extra flag is to ensure that CF doesn't (destructively,\n # because destructively is the only way to do it) retrieve\n # SO_ERROR and thereby break twisted.internet.tcp.BaseClient,\n # which needs SO_ERROR to tell it whether or not it needs to\n # call connect_ex a second time.\n _preserveSOError,\n )\n src = CFSocketCreateRunLoopSource(kCFAllocatorDefault, cfs, 0)\n ctx.append(src)\n CFRunLoopAddSource(self._cfrunloop, src, kCFRunLoopCommonModes)\n CFSocketDisableCallBacks(\n cfs,\n kCFSocketReadCallBack\n | kCFSocketWriteCallBack\n | kCFSocketConnectCallBack,\n )\n rw = [False, False]\n self._idmap[id(descr)] = fd\n self._fdmap[fd] = src, cfs, descr, rw\n rw[self._flag2idx(flag)] = True\n CFSocketEnableCallBacks(cfs, flag)\n\n def _flag2idx(self, flag):\n \"\"\"\n Convert a C{kCFSocket...} constant to an index into the read/write\n state list (C{_READ} or C{_WRITE}) (the 4th element of the value of\n C{self._fdmap}).\n\n @param flag: C{kCFSocketReadCallBack} or C{kCFSocketWriteCallBack}\n\n @return: C{_READ} or C{_WRITE}\n \"\"\"\n return {kCFSocketReadCallBack: _READ, kCFSocketWriteCallBack: _WRITE}[flag]\n\n def _unwatchFD(self, fd, descr, flag):\n \"\"\"\n Unregister a file descriptor with the C{CFRunLoop}, or modify its state\n so that it's listening for only one notification (read or write) as\n opposed to both; used to implement C{removeReader} and C{removeWriter}.\n\n @param fd: a file descriptor\n\n @type fd: C{int}\n\n @param descr: an L{IReadDescriptor} or L{IWriteDescriptor}\n\n @param flag: C{kCFSocketWriteCallBack} C{kCFSocketReadCallBack}\n \"\"\"\n if id(descr) not in self._idmap:\n return\n if fd == -1:\n # need to deal with it in this case, I think.\n realfd = self._idmap[id(descr)]\n else:\n realfd = fd\n src, cfs, descr, rw = self._fdmap[realfd]\n CFSocketDisableCallBacks(cfs, flag)\n rw[self._flag2idx(flag)] = False\n if not rw[_READ] and not rw[_WRITE]:\n del self._idmap[id(descr)]\n del self._fdmap[realfd]\n CFRunLoopRemoveSource(self._cfrunloop, src, kCFRunLoopCommonModes)\n CFSocketInvalidate(cfs)\n\n def addReader(self, reader):\n \"\"\"\n Implement L{IReactorFDSet.addReader}.\n \"\"\"\n self._watchFD(reader.fileno(), reader, kCFSocketReadCallBack)\n\n def addWriter(self, writer):\n \"\"\"\n Implement L{IReactorFDSet.addWriter}.\n \"\"\"\n self._watchFD(writer.fileno(), writer, kCFSocketWriteCallBack)\n\n def removeReader(self, reader):\n \"\"\"\n Implement L{IReactorFDSet.removeReader}.\n \"\"\"\n self._unwatchFD(reader.fileno(), reader, kCFSocketReadCallBack)\n\n def removeWriter(self, writer):\n \"\"\"\n Implement L{IReactorFDSet.removeWriter}.\n \"\"\"\n self._unwatchFD(writer.fileno(), writer, kCFSocketWriteCallBack)\n\n def removeAll(self):\n \"\"\"\n Implement L{IReactorFDSet.removeAll}.\n \"\"\"\n allDesc = {descr for src, cfs, descr, rw in self._fdmap.values()}\n allDesc -= set(self._internalReaders)\n for desc in allDesc:\n self.removeReader(desc)\n self.removeWriter(desc)\n return list(allDesc)\n\n def getReaders(self):\n \"\"\"\n Implement L{IReactorFDSet.getReaders}.\n \"\"\"\n return [descr for src, cfs, descr, rw in self._fdmap.values() if rw[_READ]]\n\n def getWriters(self):\n \"\"\"\n Implement L{IReactorFDSet.getWriters}.\n \"\"\"\n return [descr for src, cfs, descr, rw in self._fdmap.values() if rw[_WRITE]]\n\n def _moveCallLaterSooner(self, tple):\n \"\"\"\n Override L{PosixReactorBase}'s implementation of L{IDelayedCall.reset}\n so that it will immediately reschedule. Normally\n C{_moveCallLaterSooner} depends on the fact that C{runUntilCurrent} is\n always run before the mainloop goes back to sleep, so this forces it to\n immediately recompute how long the loop needs to stay asleep.\n \"\"\"\n result = PosixReactorBase._moveCallLaterSooner(self, tple)\n self._scheduleSimulate()\n return result\n\n def startRunning(self, installSignalHandlers: bool = True) -> None:\n \"\"\"\n Start running the reactor, then kick off the timer that advances\n Twisted's clock to keep pace with CFRunLoop's.\n \"\"\"\n super().startRunning(installSignalHandlers)\n\n # Before 'startRunning' is called, the reactor is not attached to the\n # CFRunLoop[1]; specifically, the CFTimer that runs all of Twisted's\n # timers is not active and will not have been added to the loop by any\n # application code. Now that _running is probably[2] True, we need to\n # ensure that timed calls will actually run on the main loop. This\n # call needs to be here, rather than at the top of mainLoop, because\n # it's possible to use startRunning to *attach* a reactor to an\n # already-running CFRunLoop, i.e. within a plugin for an application\n # that doesn't otherwise use Twisted, rather than calling it via run().\n self._scheduleSimulate(force=True)\n\n # [1]: readers & writers are still active in the loop, but arguably\n # they should not be.\n\n # [2]: application code within a 'startup' system event trigger *may*\n # have already crashed the reactor and thus set _started to False,\n # but that specific case is handled by mainLoop, since that case\n # is inherently irrelevant in an attach-to-application case and is\n # only necessary to handle mainLoop spuriously blocking.\n\n _inCFLoop = False\n\n def mainLoop(self) -> None:\n \"\"\"\n Run the runner (C{CFRunLoopRun} or something that calls it), which runs\n the run loop until C{crash()} is called.\n \"\"\"\n if not self._started:\n # If we arrive here, we were crashed by application code in a\n # 'startup' system event trigger, (or crashed manually before the\n # application calls 'mainLoop' directly for whatever reason; sigh,\n # this method should not be public). However, application code\n # doing obscure things will expect an invocation of this loop to\n # have at least *one* pass over ready readers, writers, and delayed\n # calls. iterate(), in particular, is emulated in exactly this way\n # in this reactor implementation. In order to ensure that we enter\n # the real implementation of the mainloop and do all of those\n # things, we need to set _started back to True so that callLater\n # actually schedules itself against the CFRunLoop, but immediately\n # crash once we are in the context of the loop where we've run\n # ready I/O and timers.\n\n def docrash() -> None:\n self.crash()\n\n self._started = True\n self.callLater(0, docrash)\n already = False\n try:\n while self._started:\n if already:\n # Sometimes CFRunLoopRun (or its equivalents) may exit\n # without CFRunLoopStop being called.\n\n # This is really only *supposed* to happen when it runs out\n # of sources & timers to process. However, in full Twisted\n # test-suite runs we have observed, extremely rarely (once\n # in every 3000 tests or so) CFRunLoopRun exiting in cases\n # where it seems as though there *is* still some work to\n # do. However, given the difficulty of reproducing the\n # race conditions necessary to make this happen, it's\n # possible that we have missed some nuance of when\n # CFRunLoop considers the list of work \"empty\" and various\n # callbacks and timers to be \"invalidated\". Therefore we\n # are not fully confident that this is a platform bug, but\n # it is nevertheless unexpected behavior from our reading\n # of the documentation.\n\n # To accommodate this rare and slightly ambiguous stress\n # case, we make extra sure that our scheduled timer is\n # re-created on the loop as a CFRunLoopTimer, which\n # reliably gives the loop some work to do and 'fixes' it if\n # it exited due to having no active sources or timers.\n self._scheduleSimulate()\n\n # At this point, there may be a little more code that we\n # would need to put here for full correctness for a very\n # peculiar type of application: if you're writing a\n # command-line tool using CFReactor, adding *nothing* to\n # the reactor itself, disabling even the internal Waker\n # file descriptors, then there's a possibility that\n # CFRunLoopRun will exit early, and if we have no timers,\n # we might busy-loop here. Because we cannot seem to force\n # this to happen under normal circumstances, we're leaving\n # that code out.\n\n already = True\n self._inCFLoop = True\n try:\n self._runner()\n finally:\n self._inCFLoop = False\n finally:\n self._stopSimulating()\n\n _currentSimulator: object | None = None\n\n def _stopSimulating(self) -> None:\n \"\"\"\n If we have a CFRunLoopTimer registered with the CFRunLoop, invalidate\n it and set it to None.\n \"\"\"\n if self._currentSimulator is None:\n return\n CFRunLoopTimerInvalidate(self._currentSimulator)\n self._currentSimulator = None\n\n def _scheduleSimulate(self, force: bool = False) -> None:\n \"\"\"\n Schedule a call to C{self.runUntilCurrent}. This will cancel the\n currently scheduled call if it is already scheduled.\n\n @param force: Even if there are no timed calls, make sure that\n C{runUntilCurrent} runs immediately (in a 0-seconds-from-now\n C{CFRunLoopTimer}). This is necessary for calls which need to\n trigger behavior of C{runUntilCurrent} other than running timed\n calls, such as draining the thread call queue or calling C{crash()}\n when the appropriate flags are set.\n\n @type force: C{bool}\n \"\"\"\n self._stopSimulating()\n if not self._started:\n # If the reactor is not running (e.g. we are scheduling callLater\n # calls before starting the reactor) we should not be scheduling\n # CFRunLoopTimers against the global CFRunLoop.\n return\n\n timeout = 0.0 if force else self.timeout()\n if timeout is None:\n return\n\n fireDate = CFAbsoluteTimeGetCurrent() + timeout\n\n def simulate(cftimer, extra):\n self._currentSimulator = None\n self.runUntilCurrent()\n self._scheduleSimulate()\n\n c = self._currentSimulator = CFRunLoopTimerCreate(\n kCFAllocatorDefault, fireDate, 0, 0, 0, simulate, None\n )\n CFRunLoopAddTimer(self._cfrunloop, c, kCFRunLoopCommonModes)\n\n def callLater(self, _seconds, _f, *args, **kw):\n \"\"\"\n Implement L{IReactorTime.callLater}.\n \"\"\"\n delayedCall = PosixReactorBase.callLater(self, _seconds, _f, *args, **kw)\n self._scheduleSimulate()\n return delayedCall\n\n def stop(self):\n \"\"\"\n Implement L{IReactorCore.stop}.\n \"\"\"\n PosixReactorBase.stop(self)\n self._scheduleSimulate(True)\n\n def crash(self):\n \"\"\"\n Implement L{IReactorCore.crash}\n \"\"\"\n PosixReactorBase.crash(self)\n if not self._inCFLoop:\n return\n CFRunLoopStop(self._cfrunloop)\n\n def iterate(self, delay=0):\n \"\"\"\n Emulate the behavior of C{iterate()} for things that want to call it,\n by letting the loop run for a little while and then scheduling a timed\n call to exit it.\n \"\"\"\n self._started = True\n # Since the CoreFoundation loop doesn't have the concept of \"iterate\"\n # we can't ask it to do this. Instead we will make arrangements to\n # crash it *very* soon and then make it run. This is a rough\n # approximation of \"an iteration\". Using crash and mainLoop here\n # means that it's safe (as safe as anything using \"iterate\" can be) to\n # do this repeatedly.\n self.callLater(0, self.crash)\n self.mainLoop()\n\n\ndef install(runLoop=None, runner=None):\n \"\"\"\n Configure the twisted mainloop to be run inside CFRunLoop.\n\n @param runLoop: the run loop to use.\n\n @param runner: the function to call in order to actually invoke the main\n loop. This will default to C{CFRunLoopRun} if not specified. However,\n this is not an appropriate choice for GUI applications, as you need to\n run NSApplicationMain (or something like it). For example, to run the\n Twisted mainloop in a PyObjC application, your C{main.py} should look\n something like this::\n\n from PyObjCTools import AppHelper\n from twisted.internet.cfreactor import install\n install(runner=AppHelper.runEventLoop)\n # initialize your application\n reactor.run()\n\n @return: The installed reactor.\n\n @rtype: C{CFReactor}\n \"\"\"\n\n reactor = CFReactor(runLoop=runLoop, runner=runner)\n from twisted.internet.main import installReactor\n\n installReactor(reactor)\n return reactor\n", "path": "src/twisted/internet/cfreactor.py" } ]
diff --git a/src/twisted/internet/cfreactor.py b/src/twisted/internet/cfreactor.py index 333ab497604..142c0472ef4 100644 --- a/src/twisted/internet/cfreactor.py +++ b/src/twisted/internet/cfreactor.py @@ -9,6 +9,7 @@ This is useful for integrating Twisted with U{PyObjC<http://pyobjc.sf.net/>} applications. """ +from __future__ import annotations __all__ = ["install", "CFReactor"] diff --git a/src/twisted/newsfragments/11965.bugfix b/src/twisted/newsfragments/11965.bugfix new file mode 100644 index 00000000000..b8a16d2e9a8 --- /dev/null +++ b/src/twisted/newsfragments/11965.bugfix @@ -0,0 +1 @@ +Fix TypeError on t.i.cfreactor due to 3.10 type annotation syntax
nilearn__nilearn-507
Add test for compatibility of old version of six For the moment, we are compatible with the latest version of six. Recently, somebody pointed out that we did not support six 1.5.2. We should investigate, decide which version we should be compatible with and then add this to Travis.
[ { "content": "import sys\n\nDEPENDENCIES = ['numpy', 'scipy', 'sklearn', 'matplotlib', 'nibabel']\n\n\ndef print_package_version(package_name, indent=' '):\n try:\n package = __import__(package_name)\n version = getattr(package, '__version__', None)\n package_file = getattr(package, '__file__', )\n provenance_info = '{0} from {1}'.format(version, package_file)\n except ImportError:\n provenance_info = 'not installed'\n\n print('{0}{1}: {2}'.format(indent, package_name, provenance_info))\n\nif __name__ == '__main__':\n print('=' * 120)\n print('Python %s' % str(sys.version))\n print('from: %s\\n' % sys.executable)\n\n print('Dependencies versions')\n for package_name in DEPENDENCIES:\n print_package_version(package_name)\n print('=' * 120)\n", "path": "continuous_integration/show-python-packages-versions.py" } ]
[ { "content": "import sys\n\nDEPENDENCIES = ['six', 'numpy', 'scipy', 'sklearn', 'matplotlib', 'nibabel']\n\n\ndef print_package_version(package_name, indent=' '):\n try:\n package = __import__(package_name)\n version = getattr(package, '__version__', None)\n package_file = getattr(package, '__file__', )\n provenance_info = '{0} from {1}'.format(version, package_file)\n except ImportError:\n provenance_info = 'not installed'\n\n print('{0}{1}: {2}'.format(indent, package_name, provenance_info))\n\nif __name__ == '__main__':\n print('=' * 120)\n print('Python %s' % str(sys.version))\n print('from: %s\\n' % sys.executable)\n\n print('Dependencies versions')\n for package_name in DEPENDENCIES:\n print_package_version(package_name)\n print('=' * 120)\n", "path": "continuous_integration/show-python-packages-versions.py" } ]
diff --git a/.travis.yml b/.travis.yml index 184464c0e4..43e11f4db7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,7 +21,7 @@ env: - DISTRIB="conda" PYTHON_VERSION="2.6" NUMPY_VERSION="1.6.2" SCIPY_VERSION="0.11.0" SCIKIT_LEARN_VERSION="0.12.1" MATPLOTLIB_VERSION="1.1.1" - NIBABEL_VERSION="1.1.0" + NIBABEL_VERSION="1.1.0" SIX_VERSION="1.4.1" # Most recent versions - DISTRIB="conda" PYTHON_VERSION="3.4" NUMPY_VERSION="*" SCIPY_VERSION="*" diff --git a/continuous_integration/install.sh b/continuous_integration/install.sh index 04a3c8e198..4af1c80a98 100755 --- a/continuous_integration/install.sh +++ b/continuous_integration/install.sh @@ -35,7 +35,7 @@ print_conda_requirements() { # - for scikit-learn, SCIKIT_LEARN_VERSION is used TO_INSTALL_ALWAYS="pip nose" REQUIREMENTS="$TO_INSTALL_ALWAYS" - TO_INSTALL_MAYBE="python numpy scipy matplotlib scikit-learn" + TO_INSTALL_MAYBE="python six numpy scipy matplotlib scikit-learn" for PACKAGE in $TO_INSTALL_MAYBE; do # Capitalize package name and add _VERSION PACKAGE_VERSION_VARNAME="${PACKAGE^^}_VERSION" diff --git a/continuous_integration/show-python-packages-versions.py b/continuous_integration/show-python-packages-versions.py index 1822dd172e..6134c7a47b 100644 --- a/continuous_integration/show-python-packages-versions.py +++ b/continuous_integration/show-python-packages-versions.py @@ -1,6 +1,6 @@ import sys -DEPENDENCIES = ['numpy', 'scipy', 'sklearn', 'matplotlib', 'nibabel'] +DEPENDENCIES = ['six', 'numpy', 'scipy', 'sklearn', 'matplotlib', 'nibabel'] def print_package_version(package_name, indent=' '):
pydantic__pydantic-3707
subclasses of bytes converted to bytes See https://github.com/duo-labs/py_webauthn/issues/113#issuecomment-1017816575 In short (I think) cython is converting subclasses of bytes to raw bytes in here: https://github.com/samuelcolvin/pydantic/blob/9d631a3429a66f30742c1a52c94ac18ec6ba848d/pydantic/validators.py#L79 Fix should be as simple as changing the type hint.
[ { "content": "import re\nfrom collections import OrderedDict, deque\nfrom collections.abc import Hashable as CollectionsHashable\nfrom datetime import date, datetime, time, timedelta\nfrom decimal import Decimal, DecimalException\nfrom enum import Enum, IntEnum\nfrom ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Deque,\n Dict,\n FrozenSet,\n Generator,\n Hashable,\n List,\n NamedTuple,\n Pattern,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\nfrom uuid import UUID\n\nfrom . import errors\nfrom .datetime_parse import parse_date, parse_datetime, parse_duration, parse_time\nfrom .typing import (\n AnyCallable,\n ForwardRef,\n all_literal_values,\n display_as_type,\n get_class,\n is_callable_type,\n is_literal_type,\n is_namedtuple,\n is_none_type,\n is_typeddict,\n)\nfrom .utils import almost_equal_floats, lenient_issubclass, sequence_like\n\nif TYPE_CHECKING:\n from typing_extensions import Literal, TypedDict\n\n from .config import BaseConfig\n from .fields import ModelField\n from .types import ConstrainedDecimal, ConstrainedFloat, ConstrainedInt\n\n ConstrainedNumber = Union[ConstrainedDecimal, ConstrainedFloat, ConstrainedInt]\n AnyOrderedDict = OrderedDict[Any, Any]\n Number = Union[int, float, Decimal]\n StrBytes = Union[str, bytes]\n\n\ndef str_validator(v: Any) -> Union[str]:\n if isinstance(v, str):\n if isinstance(v, Enum):\n return v.value\n else:\n return v\n elif isinstance(v, (float, int, Decimal)):\n # is there anything else we want to add here? If you think so, create an issue.\n return str(v)\n elif isinstance(v, (bytes, bytearray)):\n return v.decode()\n else:\n raise errors.StrError()\n\n\ndef strict_str_validator(v: Any) -> Union[str]:\n if isinstance(v, str) and not isinstance(v, Enum):\n return v\n raise errors.StrError()\n\n\ndef bytes_validator(v: Any) -> bytes:\n if isinstance(v, bytes):\n return v\n elif isinstance(v, bytearray):\n return bytes(v)\n elif isinstance(v, str):\n return v.encode()\n elif isinstance(v, (float, int, Decimal)):\n return str(v).encode()\n else:\n raise errors.BytesError()\n\n\ndef strict_bytes_validator(v: Any) -> Union[bytes]:\n if isinstance(v, bytes):\n return v\n elif isinstance(v, bytearray):\n return bytes(v)\n else:\n raise errors.BytesError()\n\n\nBOOL_FALSE = {0, '0', 'off', 'f', 'false', 'n', 'no'}\nBOOL_TRUE = {1, '1', 'on', 't', 'true', 'y', 'yes'}\n\n\ndef bool_validator(v: Any) -> bool:\n if v is True or v is False:\n return v\n if isinstance(v, bytes):\n v = v.decode()\n if isinstance(v, str):\n v = v.lower()\n try:\n if v in BOOL_TRUE:\n return True\n if v in BOOL_FALSE:\n return False\n except TypeError:\n raise errors.BoolError()\n raise errors.BoolError()\n\n\ndef int_validator(v: Any) -> int:\n if isinstance(v, int) and not (v is True or v is False):\n return v\n\n try:\n return int(v)\n except (TypeError, ValueError):\n raise errors.IntegerError()\n\n\ndef strict_int_validator(v: Any) -> int:\n if isinstance(v, int) and not (v is True or v is False):\n return v\n raise errors.IntegerError()\n\n\ndef float_validator(v: Any) -> float:\n if isinstance(v, float):\n return v\n\n try:\n return float(v)\n except (TypeError, ValueError):\n raise errors.FloatError()\n\n\ndef strict_float_validator(v: Any) -> float:\n if isinstance(v, float):\n return v\n raise errors.FloatError()\n\n\ndef number_multiple_validator(v: 'Number', field: 'ModelField') -> 'Number':\n field_type: ConstrainedNumber = field.type_\n if field_type.multiple_of is not None:\n mod = float(v) / float(field_type.multiple_of) % 1\n if not almost_equal_floats(mod, 0.0) and not almost_equal_floats(mod, 1.0):\n raise errors.NumberNotMultipleError(multiple_of=field_type.multiple_of)\n return v\n\n\ndef number_size_validator(v: 'Number', field: 'ModelField') -> 'Number':\n field_type: ConstrainedNumber = field.type_\n if field_type.gt is not None and not v > field_type.gt:\n raise errors.NumberNotGtError(limit_value=field_type.gt)\n elif field_type.ge is not None and not v >= field_type.ge:\n raise errors.NumberNotGeError(limit_value=field_type.ge)\n\n if field_type.lt is not None and not v < field_type.lt:\n raise errors.NumberNotLtError(limit_value=field_type.lt)\n if field_type.le is not None and not v <= field_type.le:\n raise errors.NumberNotLeError(limit_value=field_type.le)\n\n return v\n\n\ndef constant_validator(v: 'Any', field: 'ModelField') -> 'Any':\n \"\"\"Validate ``const`` fields.\n\n The value provided for a ``const`` field must be equal to the default value\n of the field. This is to support the keyword of the same name in JSON\n Schema.\n \"\"\"\n if v != field.default:\n raise errors.WrongConstantError(given=v, permitted=[field.default])\n\n return v\n\n\ndef anystr_length_validator(v: 'StrBytes', config: 'BaseConfig') -> 'StrBytes':\n v_len = len(v)\n\n min_length = config.min_anystr_length\n if v_len < min_length:\n raise errors.AnyStrMinLengthError(limit_value=min_length)\n\n max_length = config.max_anystr_length\n if max_length is not None and v_len > max_length:\n raise errors.AnyStrMaxLengthError(limit_value=max_length)\n\n return v\n\n\ndef anystr_strip_whitespace(v: 'StrBytes') -> 'StrBytes':\n return v.strip()\n\n\ndef anystr_lower(v: 'StrBytes') -> 'StrBytes':\n return v.lower()\n\n\ndef ordered_dict_validator(v: Any) -> 'AnyOrderedDict':\n if isinstance(v, OrderedDict):\n return v\n\n try:\n return OrderedDict(v)\n except (TypeError, ValueError):\n raise errors.DictError()\n\n\ndef dict_validator(v: Any) -> Dict[Any, Any]:\n if isinstance(v, dict):\n return v\n\n try:\n return dict(v)\n except (TypeError, ValueError):\n raise errors.DictError()\n\n\ndef list_validator(v: Any) -> List[Any]:\n if isinstance(v, list):\n return v\n elif sequence_like(v):\n return list(v)\n else:\n raise errors.ListError()\n\n\ndef tuple_validator(v: Any) -> Tuple[Any, ...]:\n if isinstance(v, tuple):\n return v\n elif sequence_like(v):\n return tuple(v)\n else:\n raise errors.TupleError()\n\n\ndef set_validator(v: Any) -> Set[Any]:\n if isinstance(v, set):\n return v\n elif sequence_like(v):\n return set(v)\n else:\n raise errors.SetError()\n\n\ndef frozenset_validator(v: Any) -> FrozenSet[Any]:\n if isinstance(v, frozenset):\n return v\n elif sequence_like(v):\n return frozenset(v)\n else:\n raise errors.FrozenSetError()\n\n\ndef deque_validator(v: Any) -> Deque[Any]:\n if isinstance(v, deque):\n return v\n elif sequence_like(v):\n return deque(v)\n else:\n raise errors.DequeError()\n\n\ndef enum_member_validator(v: Any, field: 'ModelField', config: 'BaseConfig') -> Enum:\n try:\n enum_v = field.type_(v)\n except ValueError:\n # field.type_ should be an enum, so will be iterable\n raise errors.EnumMemberError(enum_values=list(field.type_))\n return enum_v.value if config.use_enum_values else enum_v\n\n\ndef uuid_validator(v: Any, field: 'ModelField') -> UUID:\n try:\n if isinstance(v, str):\n v = UUID(v)\n elif isinstance(v, (bytes, bytearray)):\n try:\n v = UUID(v.decode())\n except ValueError:\n # 16 bytes in big-endian order as the bytes argument fail\n # the above check\n v = UUID(bytes=v)\n except ValueError:\n raise errors.UUIDError()\n\n if not isinstance(v, UUID):\n raise errors.UUIDError()\n\n required_version = getattr(field.type_, '_required_version', None)\n if required_version and v.version != required_version:\n raise errors.UUIDVersionError(required_version=required_version)\n\n return v\n\n\ndef decimal_validator(v: Any) -> Decimal:\n if isinstance(v, Decimal):\n return v\n elif isinstance(v, (bytes, bytearray)):\n v = v.decode()\n\n v = str(v).strip()\n\n try:\n v = Decimal(v)\n except DecimalException:\n raise errors.DecimalError()\n\n if not v.is_finite():\n raise errors.DecimalIsNotFiniteError()\n\n return v\n\n\ndef hashable_validator(v: Any) -> Hashable:\n if isinstance(v, Hashable):\n return v\n\n raise errors.HashableError()\n\n\ndef ip_v4_address_validator(v: Any) -> IPv4Address:\n if isinstance(v, IPv4Address):\n return v\n\n try:\n return IPv4Address(v)\n except ValueError:\n raise errors.IPv4AddressError()\n\n\ndef ip_v6_address_validator(v: Any) -> IPv6Address:\n if isinstance(v, IPv6Address):\n return v\n\n try:\n return IPv6Address(v)\n except ValueError:\n raise errors.IPv6AddressError()\n\n\ndef ip_v4_network_validator(v: Any) -> IPv4Network:\n \"\"\"\n Assume IPv4Network initialised with a default ``strict`` argument\n\n See more:\n https://docs.python.org/library/ipaddress.html#ipaddress.IPv4Network\n \"\"\"\n if isinstance(v, IPv4Network):\n return v\n\n try:\n return IPv4Network(v)\n except ValueError:\n raise errors.IPv4NetworkError()\n\n\ndef ip_v6_network_validator(v: Any) -> IPv6Network:\n \"\"\"\n Assume IPv6Network initialised with a default ``strict`` argument\n\n See more:\n https://docs.python.org/library/ipaddress.html#ipaddress.IPv6Network\n \"\"\"\n if isinstance(v, IPv6Network):\n return v\n\n try:\n return IPv6Network(v)\n except ValueError:\n raise errors.IPv6NetworkError()\n\n\ndef ip_v4_interface_validator(v: Any) -> IPv4Interface:\n if isinstance(v, IPv4Interface):\n return v\n\n try:\n return IPv4Interface(v)\n except ValueError:\n raise errors.IPv4InterfaceError()\n\n\ndef ip_v6_interface_validator(v: Any) -> IPv6Interface:\n if isinstance(v, IPv6Interface):\n return v\n\n try:\n return IPv6Interface(v)\n except ValueError:\n raise errors.IPv6InterfaceError()\n\n\ndef path_validator(v: Any) -> Path:\n if isinstance(v, Path):\n return v\n\n try:\n return Path(v)\n except TypeError:\n raise errors.PathError()\n\n\ndef path_exists_validator(v: Any) -> Path:\n if not v.exists():\n raise errors.PathNotExistsError(path=v)\n\n return v\n\n\ndef callable_validator(v: Any) -> AnyCallable:\n \"\"\"\n Perform a simple check if the value is callable.\n\n Note: complete matching of argument type hints and return types is not performed\n \"\"\"\n if callable(v):\n return v\n\n raise errors.CallableError(value=v)\n\n\ndef enum_validator(v: Any) -> Enum:\n if isinstance(v, Enum):\n return v\n\n raise errors.EnumError(value=v)\n\n\ndef int_enum_validator(v: Any) -> IntEnum:\n if isinstance(v, IntEnum):\n return v\n\n raise errors.IntEnumError(value=v)\n\n\ndef make_literal_validator(type_: Any) -> Callable[[Any], Any]:\n permitted_choices = all_literal_values(type_)\n\n # To have a O(1) complexity and still return one of the values set inside the `Literal`,\n # we create a dict with the set values (a set causes some problems with the way intersection works).\n # In some cases the set value and checked value can indeed be different (see `test_literal_validator_str_enum`)\n allowed_choices = {v: v for v in permitted_choices}\n\n def literal_validator(v: Any) -> Any:\n try:\n return allowed_choices[v]\n except KeyError:\n raise errors.WrongConstantError(given=v, permitted=permitted_choices)\n\n return literal_validator\n\n\ndef constr_length_validator(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes':\n v_len = len(v)\n\n min_length = field.type_.min_length if field.type_.min_length is not None else config.min_anystr_length\n if v_len < min_length:\n raise errors.AnyStrMinLengthError(limit_value=min_length)\n\n max_length = field.type_.max_length if field.type_.max_length is not None else config.max_anystr_length\n if max_length is not None and v_len > max_length:\n raise errors.AnyStrMaxLengthError(limit_value=max_length)\n\n return v\n\n\ndef constr_strip_whitespace(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes':\n strip_whitespace = field.type_.strip_whitespace or config.anystr_strip_whitespace\n if strip_whitespace:\n v = v.strip()\n\n return v\n\n\ndef constr_lower(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes':\n lower = field.type_.to_lower or config.anystr_lower\n if lower:\n v = v.lower()\n return v\n\n\ndef validate_json(v: Any, config: 'BaseConfig') -> Any:\n if v is None:\n # pass None through to other validators\n return v\n try:\n return config.json_loads(v) # type: ignore\n except ValueError:\n raise errors.JsonError()\n except TypeError:\n raise errors.JsonTypeError()\n\n\nT = TypeVar('T')\n\n\ndef make_arbitrary_type_validator(type_: Type[T]) -> Callable[[T], T]:\n def arbitrary_type_validator(v: Any) -> T:\n if isinstance(v, type_):\n return v\n raise errors.ArbitraryTypeError(expected_arbitrary_type=type_)\n\n return arbitrary_type_validator\n\n\ndef make_class_validator(type_: Type[T]) -> Callable[[Any], Type[T]]:\n def class_validator(v: Any) -> Type[T]:\n if lenient_issubclass(v, type_):\n return v\n raise errors.SubclassError(expected_class=type_)\n\n return class_validator\n\n\ndef any_class_validator(v: Any) -> Type[T]:\n if isinstance(v, type):\n return v\n raise errors.ClassError()\n\n\ndef none_validator(v: Any) -> 'Literal[None]':\n if v is None:\n return v\n raise errors.NotNoneError()\n\n\ndef pattern_validator(v: Any) -> Pattern[str]:\n if isinstance(v, Pattern):\n return v\n\n str_value = str_validator(v)\n\n try:\n return re.compile(str_value)\n except re.error:\n raise errors.PatternError()\n\n\nNamedTupleT = TypeVar('NamedTupleT', bound=NamedTuple)\n\n\ndef make_namedtuple_validator(namedtuple_cls: Type[NamedTupleT]) -> Callable[[Tuple[Any, ...]], NamedTupleT]:\n from .annotated_types import create_model_from_namedtuple\n\n NamedTupleModel = create_model_from_namedtuple(\n namedtuple_cls,\n __module__=namedtuple_cls.__module__,\n )\n namedtuple_cls.__pydantic_model__ = NamedTupleModel # type: ignore[attr-defined]\n\n def namedtuple_validator(values: Tuple[Any, ...]) -> NamedTupleT:\n annotations = NamedTupleModel.__annotations__\n\n if len(values) > len(annotations):\n raise errors.ListMaxLengthError(limit_value=len(annotations))\n\n dict_values: Dict[str, Any] = dict(zip(annotations, values))\n validated_dict_values: Dict[str, Any] = dict(NamedTupleModel(**dict_values))\n return namedtuple_cls(**validated_dict_values)\n\n return namedtuple_validator\n\n\ndef make_typeddict_validator(\n typeddict_cls: Type['TypedDict'], config: Type['BaseConfig'] # type: ignore[valid-type]\n) -> Callable[[Any], Dict[str, Any]]:\n from .annotated_types import create_model_from_typeddict\n\n TypedDictModel = create_model_from_typeddict(\n typeddict_cls,\n __config__=config,\n __module__=typeddict_cls.__module__,\n )\n typeddict_cls.__pydantic_model__ = TypedDictModel # type: ignore[attr-defined]\n\n def typeddict_validator(values: 'TypedDict') -> Dict[str, Any]: # type: ignore[valid-type]\n return TypedDictModel.parse_obj(values).dict(exclude_unset=True)\n\n return typeddict_validator\n\n\nclass IfConfig:\n def __init__(self, validator: AnyCallable, *config_attr_names: str) -> None:\n self.validator = validator\n self.config_attr_names = config_attr_names\n\n def check(self, config: Type['BaseConfig']) -> bool:\n return any(getattr(config, name) not in {None, False} for name in self.config_attr_names)\n\n\n# order is important here, for example: bool is a subclass of int so has to come first, datetime before date same,\n# IPv4Interface before IPv4Address, etc\n_VALIDATORS: List[Tuple[Type[Any], List[Any]]] = [\n (IntEnum, [int_validator, enum_member_validator]),\n (Enum, [enum_member_validator]),\n (\n str,\n [\n str_validator,\n IfConfig(anystr_strip_whitespace, 'anystr_strip_whitespace'),\n IfConfig(anystr_lower, 'anystr_lower'),\n IfConfig(anystr_length_validator, 'min_anystr_length', 'max_anystr_length'),\n ],\n ),\n (\n bytes,\n [\n bytes_validator,\n IfConfig(anystr_strip_whitespace, 'anystr_strip_whitespace'),\n IfConfig(anystr_lower, 'anystr_lower'),\n IfConfig(anystr_length_validator, 'min_anystr_length', 'max_anystr_length'),\n ],\n ),\n (bool, [bool_validator]),\n (int, [int_validator]),\n (float, [float_validator]),\n (Path, [path_validator]),\n (datetime, [parse_datetime]),\n (date, [parse_date]),\n (time, [parse_time]),\n (timedelta, [parse_duration]),\n (OrderedDict, [ordered_dict_validator]),\n (dict, [dict_validator]),\n (list, [list_validator]),\n (tuple, [tuple_validator]),\n (set, [set_validator]),\n (frozenset, [frozenset_validator]),\n (deque, [deque_validator]),\n (UUID, [uuid_validator]),\n (Decimal, [decimal_validator]),\n (IPv4Interface, [ip_v4_interface_validator]),\n (IPv6Interface, [ip_v6_interface_validator]),\n (IPv4Address, [ip_v4_address_validator]),\n (IPv6Address, [ip_v6_address_validator]),\n (IPv4Network, [ip_v4_network_validator]),\n (IPv6Network, [ip_v6_network_validator]),\n]\n\n\ndef find_validators( # noqa: C901 (ignore complexity)\n type_: Type[Any], config: Type['BaseConfig']\n) -> Generator[AnyCallable, None, None]:\n from .dataclasses import is_builtin_dataclass, make_dataclass_validator\n\n if type_ is Any or type_ is object:\n return\n type_type = type_.__class__\n if type_type == ForwardRef or type_type == TypeVar:\n return\n\n if is_none_type(type_):\n yield none_validator\n return\n if type_ is Pattern:\n yield pattern_validator\n return\n if type_ is Hashable or type_ is CollectionsHashable:\n yield hashable_validator\n return\n if is_callable_type(type_):\n yield callable_validator\n return\n if is_literal_type(type_):\n yield make_literal_validator(type_)\n return\n if is_builtin_dataclass(type_):\n yield from make_dataclass_validator(type_, config)\n return\n if type_ is Enum:\n yield enum_validator\n return\n if type_ is IntEnum:\n yield int_enum_validator\n return\n if is_namedtuple(type_):\n yield tuple_validator\n yield make_namedtuple_validator(type_)\n return\n if is_typeddict(type_):\n yield make_typeddict_validator(type_, config)\n return\n\n class_ = get_class(type_)\n if class_ is not None:\n if isinstance(class_, type):\n yield make_class_validator(class_)\n else:\n yield any_class_validator\n return\n\n for val_type, validators in _VALIDATORS:\n try:\n if issubclass(type_, val_type):\n for v in validators:\n if isinstance(v, IfConfig):\n if v.check(config):\n yield v.validator\n else:\n yield v\n return\n except TypeError:\n raise RuntimeError(f'error checking inheritance of {type_!r} (type: {display_as_type(type_)})')\n\n if config.arbitrary_types_allowed:\n yield make_arbitrary_type_validator(type_)\n else:\n raise RuntimeError(f'no validator found for {type_}, see `arbitrary_types_allowed` in Config')\n", "path": "pydantic/validators.py" } ]
[ { "content": "import re\nfrom collections import OrderedDict, deque\nfrom collections.abc import Hashable as CollectionsHashable\nfrom datetime import date, datetime, time, timedelta\nfrom decimal import Decimal, DecimalException\nfrom enum import Enum, IntEnum\nfrom ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Deque,\n Dict,\n FrozenSet,\n Generator,\n Hashable,\n List,\n NamedTuple,\n Pattern,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\nfrom uuid import UUID\n\nfrom . import errors\nfrom .datetime_parse import parse_date, parse_datetime, parse_duration, parse_time\nfrom .typing import (\n AnyCallable,\n ForwardRef,\n all_literal_values,\n display_as_type,\n get_class,\n is_callable_type,\n is_literal_type,\n is_namedtuple,\n is_none_type,\n is_typeddict,\n)\nfrom .utils import almost_equal_floats, lenient_issubclass, sequence_like\n\nif TYPE_CHECKING:\n from typing_extensions import Literal, TypedDict\n\n from .config import BaseConfig\n from .fields import ModelField\n from .types import ConstrainedDecimal, ConstrainedFloat, ConstrainedInt\n\n ConstrainedNumber = Union[ConstrainedDecimal, ConstrainedFloat, ConstrainedInt]\n AnyOrderedDict = OrderedDict[Any, Any]\n Number = Union[int, float, Decimal]\n StrBytes = Union[str, bytes]\n\n\ndef str_validator(v: Any) -> Union[str]:\n if isinstance(v, str):\n if isinstance(v, Enum):\n return v.value\n else:\n return v\n elif isinstance(v, (float, int, Decimal)):\n # is there anything else we want to add here? If you think so, create an issue.\n return str(v)\n elif isinstance(v, (bytes, bytearray)):\n return v.decode()\n else:\n raise errors.StrError()\n\n\ndef strict_str_validator(v: Any) -> Union[str]:\n if isinstance(v, str) and not isinstance(v, Enum):\n return v\n raise errors.StrError()\n\n\ndef bytes_validator(v: Any) -> Union[bytes]:\n if isinstance(v, bytes):\n return v\n elif isinstance(v, bytearray):\n return bytes(v)\n elif isinstance(v, str):\n return v.encode()\n elif isinstance(v, (float, int, Decimal)):\n return str(v).encode()\n else:\n raise errors.BytesError()\n\n\ndef strict_bytes_validator(v: Any) -> Union[bytes]:\n if isinstance(v, bytes):\n return v\n elif isinstance(v, bytearray):\n return bytes(v)\n else:\n raise errors.BytesError()\n\n\nBOOL_FALSE = {0, '0', 'off', 'f', 'false', 'n', 'no'}\nBOOL_TRUE = {1, '1', 'on', 't', 'true', 'y', 'yes'}\n\n\ndef bool_validator(v: Any) -> bool:\n if v is True or v is False:\n return v\n if isinstance(v, bytes):\n v = v.decode()\n if isinstance(v, str):\n v = v.lower()\n try:\n if v in BOOL_TRUE:\n return True\n if v in BOOL_FALSE:\n return False\n except TypeError:\n raise errors.BoolError()\n raise errors.BoolError()\n\n\ndef int_validator(v: Any) -> int:\n if isinstance(v, int) and not (v is True or v is False):\n return v\n\n try:\n return int(v)\n except (TypeError, ValueError):\n raise errors.IntegerError()\n\n\ndef strict_int_validator(v: Any) -> int:\n if isinstance(v, int) and not (v is True or v is False):\n return v\n raise errors.IntegerError()\n\n\ndef float_validator(v: Any) -> float:\n if isinstance(v, float):\n return v\n\n try:\n return float(v)\n except (TypeError, ValueError):\n raise errors.FloatError()\n\n\ndef strict_float_validator(v: Any) -> float:\n if isinstance(v, float):\n return v\n raise errors.FloatError()\n\n\ndef number_multiple_validator(v: 'Number', field: 'ModelField') -> 'Number':\n field_type: ConstrainedNumber = field.type_\n if field_type.multiple_of is not None:\n mod = float(v) / float(field_type.multiple_of) % 1\n if not almost_equal_floats(mod, 0.0) and not almost_equal_floats(mod, 1.0):\n raise errors.NumberNotMultipleError(multiple_of=field_type.multiple_of)\n return v\n\n\ndef number_size_validator(v: 'Number', field: 'ModelField') -> 'Number':\n field_type: ConstrainedNumber = field.type_\n if field_type.gt is not None and not v > field_type.gt:\n raise errors.NumberNotGtError(limit_value=field_type.gt)\n elif field_type.ge is not None and not v >= field_type.ge:\n raise errors.NumberNotGeError(limit_value=field_type.ge)\n\n if field_type.lt is not None and not v < field_type.lt:\n raise errors.NumberNotLtError(limit_value=field_type.lt)\n if field_type.le is not None and not v <= field_type.le:\n raise errors.NumberNotLeError(limit_value=field_type.le)\n\n return v\n\n\ndef constant_validator(v: 'Any', field: 'ModelField') -> 'Any':\n \"\"\"Validate ``const`` fields.\n\n The value provided for a ``const`` field must be equal to the default value\n of the field. This is to support the keyword of the same name in JSON\n Schema.\n \"\"\"\n if v != field.default:\n raise errors.WrongConstantError(given=v, permitted=[field.default])\n\n return v\n\n\ndef anystr_length_validator(v: 'StrBytes', config: 'BaseConfig') -> 'StrBytes':\n v_len = len(v)\n\n min_length = config.min_anystr_length\n if v_len < min_length:\n raise errors.AnyStrMinLengthError(limit_value=min_length)\n\n max_length = config.max_anystr_length\n if max_length is not None and v_len > max_length:\n raise errors.AnyStrMaxLengthError(limit_value=max_length)\n\n return v\n\n\ndef anystr_strip_whitespace(v: 'StrBytes') -> 'StrBytes':\n return v.strip()\n\n\ndef anystr_lower(v: 'StrBytes') -> 'StrBytes':\n return v.lower()\n\n\ndef ordered_dict_validator(v: Any) -> 'AnyOrderedDict':\n if isinstance(v, OrderedDict):\n return v\n\n try:\n return OrderedDict(v)\n except (TypeError, ValueError):\n raise errors.DictError()\n\n\ndef dict_validator(v: Any) -> Dict[Any, Any]:\n if isinstance(v, dict):\n return v\n\n try:\n return dict(v)\n except (TypeError, ValueError):\n raise errors.DictError()\n\n\ndef list_validator(v: Any) -> List[Any]:\n if isinstance(v, list):\n return v\n elif sequence_like(v):\n return list(v)\n else:\n raise errors.ListError()\n\n\ndef tuple_validator(v: Any) -> Tuple[Any, ...]:\n if isinstance(v, tuple):\n return v\n elif sequence_like(v):\n return tuple(v)\n else:\n raise errors.TupleError()\n\n\ndef set_validator(v: Any) -> Set[Any]:\n if isinstance(v, set):\n return v\n elif sequence_like(v):\n return set(v)\n else:\n raise errors.SetError()\n\n\ndef frozenset_validator(v: Any) -> FrozenSet[Any]:\n if isinstance(v, frozenset):\n return v\n elif sequence_like(v):\n return frozenset(v)\n else:\n raise errors.FrozenSetError()\n\n\ndef deque_validator(v: Any) -> Deque[Any]:\n if isinstance(v, deque):\n return v\n elif sequence_like(v):\n return deque(v)\n else:\n raise errors.DequeError()\n\n\ndef enum_member_validator(v: Any, field: 'ModelField', config: 'BaseConfig') -> Enum:\n try:\n enum_v = field.type_(v)\n except ValueError:\n # field.type_ should be an enum, so will be iterable\n raise errors.EnumMemberError(enum_values=list(field.type_))\n return enum_v.value if config.use_enum_values else enum_v\n\n\ndef uuid_validator(v: Any, field: 'ModelField') -> UUID:\n try:\n if isinstance(v, str):\n v = UUID(v)\n elif isinstance(v, (bytes, bytearray)):\n try:\n v = UUID(v.decode())\n except ValueError:\n # 16 bytes in big-endian order as the bytes argument fail\n # the above check\n v = UUID(bytes=v)\n except ValueError:\n raise errors.UUIDError()\n\n if not isinstance(v, UUID):\n raise errors.UUIDError()\n\n required_version = getattr(field.type_, '_required_version', None)\n if required_version and v.version != required_version:\n raise errors.UUIDVersionError(required_version=required_version)\n\n return v\n\n\ndef decimal_validator(v: Any) -> Decimal:\n if isinstance(v, Decimal):\n return v\n elif isinstance(v, (bytes, bytearray)):\n v = v.decode()\n\n v = str(v).strip()\n\n try:\n v = Decimal(v)\n except DecimalException:\n raise errors.DecimalError()\n\n if not v.is_finite():\n raise errors.DecimalIsNotFiniteError()\n\n return v\n\n\ndef hashable_validator(v: Any) -> Hashable:\n if isinstance(v, Hashable):\n return v\n\n raise errors.HashableError()\n\n\ndef ip_v4_address_validator(v: Any) -> IPv4Address:\n if isinstance(v, IPv4Address):\n return v\n\n try:\n return IPv4Address(v)\n except ValueError:\n raise errors.IPv4AddressError()\n\n\ndef ip_v6_address_validator(v: Any) -> IPv6Address:\n if isinstance(v, IPv6Address):\n return v\n\n try:\n return IPv6Address(v)\n except ValueError:\n raise errors.IPv6AddressError()\n\n\ndef ip_v4_network_validator(v: Any) -> IPv4Network:\n \"\"\"\n Assume IPv4Network initialised with a default ``strict`` argument\n\n See more:\n https://docs.python.org/library/ipaddress.html#ipaddress.IPv4Network\n \"\"\"\n if isinstance(v, IPv4Network):\n return v\n\n try:\n return IPv4Network(v)\n except ValueError:\n raise errors.IPv4NetworkError()\n\n\ndef ip_v6_network_validator(v: Any) -> IPv6Network:\n \"\"\"\n Assume IPv6Network initialised with a default ``strict`` argument\n\n See more:\n https://docs.python.org/library/ipaddress.html#ipaddress.IPv6Network\n \"\"\"\n if isinstance(v, IPv6Network):\n return v\n\n try:\n return IPv6Network(v)\n except ValueError:\n raise errors.IPv6NetworkError()\n\n\ndef ip_v4_interface_validator(v: Any) -> IPv4Interface:\n if isinstance(v, IPv4Interface):\n return v\n\n try:\n return IPv4Interface(v)\n except ValueError:\n raise errors.IPv4InterfaceError()\n\n\ndef ip_v6_interface_validator(v: Any) -> IPv6Interface:\n if isinstance(v, IPv6Interface):\n return v\n\n try:\n return IPv6Interface(v)\n except ValueError:\n raise errors.IPv6InterfaceError()\n\n\ndef path_validator(v: Any) -> Path:\n if isinstance(v, Path):\n return v\n\n try:\n return Path(v)\n except TypeError:\n raise errors.PathError()\n\n\ndef path_exists_validator(v: Any) -> Path:\n if not v.exists():\n raise errors.PathNotExistsError(path=v)\n\n return v\n\n\ndef callable_validator(v: Any) -> AnyCallable:\n \"\"\"\n Perform a simple check if the value is callable.\n\n Note: complete matching of argument type hints and return types is not performed\n \"\"\"\n if callable(v):\n return v\n\n raise errors.CallableError(value=v)\n\n\ndef enum_validator(v: Any) -> Enum:\n if isinstance(v, Enum):\n return v\n\n raise errors.EnumError(value=v)\n\n\ndef int_enum_validator(v: Any) -> IntEnum:\n if isinstance(v, IntEnum):\n return v\n\n raise errors.IntEnumError(value=v)\n\n\ndef make_literal_validator(type_: Any) -> Callable[[Any], Any]:\n permitted_choices = all_literal_values(type_)\n\n # To have a O(1) complexity and still return one of the values set inside the `Literal`,\n # we create a dict with the set values (a set causes some problems with the way intersection works).\n # In some cases the set value and checked value can indeed be different (see `test_literal_validator_str_enum`)\n allowed_choices = {v: v for v in permitted_choices}\n\n def literal_validator(v: Any) -> Any:\n try:\n return allowed_choices[v]\n except KeyError:\n raise errors.WrongConstantError(given=v, permitted=permitted_choices)\n\n return literal_validator\n\n\ndef constr_length_validator(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes':\n v_len = len(v)\n\n min_length = field.type_.min_length if field.type_.min_length is not None else config.min_anystr_length\n if v_len < min_length:\n raise errors.AnyStrMinLengthError(limit_value=min_length)\n\n max_length = field.type_.max_length if field.type_.max_length is not None else config.max_anystr_length\n if max_length is not None and v_len > max_length:\n raise errors.AnyStrMaxLengthError(limit_value=max_length)\n\n return v\n\n\ndef constr_strip_whitespace(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes':\n strip_whitespace = field.type_.strip_whitespace or config.anystr_strip_whitespace\n if strip_whitespace:\n v = v.strip()\n\n return v\n\n\ndef constr_lower(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes':\n lower = field.type_.to_lower or config.anystr_lower\n if lower:\n v = v.lower()\n return v\n\n\ndef validate_json(v: Any, config: 'BaseConfig') -> Any:\n if v is None:\n # pass None through to other validators\n return v\n try:\n return config.json_loads(v) # type: ignore\n except ValueError:\n raise errors.JsonError()\n except TypeError:\n raise errors.JsonTypeError()\n\n\nT = TypeVar('T')\n\n\ndef make_arbitrary_type_validator(type_: Type[T]) -> Callable[[T], T]:\n def arbitrary_type_validator(v: Any) -> T:\n if isinstance(v, type_):\n return v\n raise errors.ArbitraryTypeError(expected_arbitrary_type=type_)\n\n return arbitrary_type_validator\n\n\ndef make_class_validator(type_: Type[T]) -> Callable[[Any], Type[T]]:\n def class_validator(v: Any) -> Type[T]:\n if lenient_issubclass(v, type_):\n return v\n raise errors.SubclassError(expected_class=type_)\n\n return class_validator\n\n\ndef any_class_validator(v: Any) -> Type[T]:\n if isinstance(v, type):\n return v\n raise errors.ClassError()\n\n\ndef none_validator(v: Any) -> 'Literal[None]':\n if v is None:\n return v\n raise errors.NotNoneError()\n\n\ndef pattern_validator(v: Any) -> Pattern[str]:\n if isinstance(v, Pattern):\n return v\n\n str_value = str_validator(v)\n\n try:\n return re.compile(str_value)\n except re.error:\n raise errors.PatternError()\n\n\nNamedTupleT = TypeVar('NamedTupleT', bound=NamedTuple)\n\n\ndef make_namedtuple_validator(namedtuple_cls: Type[NamedTupleT]) -> Callable[[Tuple[Any, ...]], NamedTupleT]:\n from .annotated_types import create_model_from_namedtuple\n\n NamedTupleModel = create_model_from_namedtuple(\n namedtuple_cls,\n __module__=namedtuple_cls.__module__,\n )\n namedtuple_cls.__pydantic_model__ = NamedTupleModel # type: ignore[attr-defined]\n\n def namedtuple_validator(values: Tuple[Any, ...]) -> NamedTupleT:\n annotations = NamedTupleModel.__annotations__\n\n if len(values) > len(annotations):\n raise errors.ListMaxLengthError(limit_value=len(annotations))\n\n dict_values: Dict[str, Any] = dict(zip(annotations, values))\n validated_dict_values: Dict[str, Any] = dict(NamedTupleModel(**dict_values))\n return namedtuple_cls(**validated_dict_values)\n\n return namedtuple_validator\n\n\ndef make_typeddict_validator(\n typeddict_cls: Type['TypedDict'], config: Type['BaseConfig'] # type: ignore[valid-type]\n) -> Callable[[Any], Dict[str, Any]]:\n from .annotated_types import create_model_from_typeddict\n\n TypedDictModel = create_model_from_typeddict(\n typeddict_cls,\n __config__=config,\n __module__=typeddict_cls.__module__,\n )\n typeddict_cls.__pydantic_model__ = TypedDictModel # type: ignore[attr-defined]\n\n def typeddict_validator(values: 'TypedDict') -> Dict[str, Any]: # type: ignore[valid-type]\n return TypedDictModel.parse_obj(values).dict(exclude_unset=True)\n\n return typeddict_validator\n\n\nclass IfConfig:\n def __init__(self, validator: AnyCallable, *config_attr_names: str) -> None:\n self.validator = validator\n self.config_attr_names = config_attr_names\n\n def check(self, config: Type['BaseConfig']) -> bool:\n return any(getattr(config, name) not in {None, False} for name in self.config_attr_names)\n\n\n# order is important here, for example: bool is a subclass of int so has to come first, datetime before date same,\n# IPv4Interface before IPv4Address, etc\n_VALIDATORS: List[Tuple[Type[Any], List[Any]]] = [\n (IntEnum, [int_validator, enum_member_validator]),\n (Enum, [enum_member_validator]),\n (\n str,\n [\n str_validator,\n IfConfig(anystr_strip_whitespace, 'anystr_strip_whitespace'),\n IfConfig(anystr_lower, 'anystr_lower'),\n IfConfig(anystr_length_validator, 'min_anystr_length', 'max_anystr_length'),\n ],\n ),\n (\n bytes,\n [\n bytes_validator,\n IfConfig(anystr_strip_whitespace, 'anystr_strip_whitespace'),\n IfConfig(anystr_lower, 'anystr_lower'),\n IfConfig(anystr_length_validator, 'min_anystr_length', 'max_anystr_length'),\n ],\n ),\n (bool, [bool_validator]),\n (int, [int_validator]),\n (float, [float_validator]),\n (Path, [path_validator]),\n (datetime, [parse_datetime]),\n (date, [parse_date]),\n (time, [parse_time]),\n (timedelta, [parse_duration]),\n (OrderedDict, [ordered_dict_validator]),\n (dict, [dict_validator]),\n (list, [list_validator]),\n (tuple, [tuple_validator]),\n (set, [set_validator]),\n (frozenset, [frozenset_validator]),\n (deque, [deque_validator]),\n (UUID, [uuid_validator]),\n (Decimal, [decimal_validator]),\n (IPv4Interface, [ip_v4_interface_validator]),\n (IPv6Interface, [ip_v6_interface_validator]),\n (IPv4Address, [ip_v4_address_validator]),\n (IPv6Address, [ip_v6_address_validator]),\n (IPv4Network, [ip_v4_network_validator]),\n (IPv6Network, [ip_v6_network_validator]),\n]\n\n\ndef find_validators( # noqa: C901 (ignore complexity)\n type_: Type[Any], config: Type['BaseConfig']\n) -> Generator[AnyCallable, None, None]:\n from .dataclasses import is_builtin_dataclass, make_dataclass_validator\n\n if type_ is Any or type_ is object:\n return\n type_type = type_.__class__\n if type_type == ForwardRef or type_type == TypeVar:\n return\n\n if is_none_type(type_):\n yield none_validator\n return\n if type_ is Pattern:\n yield pattern_validator\n return\n if type_ is Hashable or type_ is CollectionsHashable:\n yield hashable_validator\n return\n if is_callable_type(type_):\n yield callable_validator\n return\n if is_literal_type(type_):\n yield make_literal_validator(type_)\n return\n if is_builtin_dataclass(type_):\n yield from make_dataclass_validator(type_, config)\n return\n if type_ is Enum:\n yield enum_validator\n return\n if type_ is IntEnum:\n yield int_enum_validator\n return\n if is_namedtuple(type_):\n yield tuple_validator\n yield make_namedtuple_validator(type_)\n return\n if is_typeddict(type_):\n yield make_typeddict_validator(type_, config)\n return\n\n class_ = get_class(type_)\n if class_ is not None:\n if isinstance(class_, type):\n yield make_class_validator(class_)\n else:\n yield any_class_validator\n return\n\n for val_type, validators in _VALIDATORS:\n try:\n if issubclass(type_, val_type):\n for v in validators:\n if isinstance(v, IfConfig):\n if v.check(config):\n yield v.validator\n else:\n yield v\n return\n except TypeError:\n raise RuntimeError(f'error checking inheritance of {type_!r} (type: {display_as_type(type_)})')\n\n if config.arbitrary_types_allowed:\n yield make_arbitrary_type_validator(type_)\n else:\n raise RuntimeError(f'no validator found for {type_}, see `arbitrary_types_allowed` in Config')\n", "path": "pydantic/validators.py" } ]
diff --git a/changes/3706-samuelcolvin.md b/changes/3706-samuelcolvin.md new file mode 100644 index 00000000000..3a22afee678 --- /dev/null +++ b/changes/3706-samuelcolvin.md @@ -0,0 +1 @@ +Prevent subclasses of bytes being converted to bytes diff --git a/pydantic/validators.py b/pydantic/validators.py index 63b7a59e080..d4783d97b12 100644 --- a/pydantic/validators.py +++ b/pydantic/validators.py @@ -76,7 +76,7 @@ def strict_str_validator(v: Any) -> Union[str]: raise errors.StrError() -def bytes_validator(v: Any) -> bytes: +def bytes_validator(v: Any) -> Union[bytes]: if isinstance(v, bytes): return v elif isinstance(v, bytearray): diff --git a/tests/test_edge_cases.py b/tests/test_edge_cases.py index dd07eb3d37b..5da62257040 100644 --- a/tests/test_edge_cases.py +++ b/tests/test_edge_cases.py @@ -1906,3 +1906,29 @@ class Config: arbitrary_types_allowed = True assert Model().x == Foo() + + +def test_bytes_subclass(): + class MyModel(BaseModel): + my_bytes: bytes + + class BytesSubclass(bytes): + def __new__(cls, data: bytes): + self = bytes.__new__(cls, data) + return self + + m = MyModel(my_bytes=BytesSubclass(b'foobar')) + assert m.my_bytes.__class__ == BytesSubclass + + +def test_int_subclass(): + class MyModel(BaseModel): + my_int: int + + class IntSubclass(int): + def __new__(cls, data: int): + self = int.__new__(cls, data) + return self + + m = MyModel(my_int=IntSubclass(123)) + assert m.my_int.__class__ == IntSubclass