problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
9.01k
golden_diff
stringlengths
151
4.94k
verification_info
stringlengths
465
11.3k
num_tokens_prompt
int64
557
2.05k
num_tokens_diff
int64
48
1.02k
gh_patches_debug_29619
rasdani/github-patches
git_diff
learningequality__kolibri-4115
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> add facility setting to disable 'guest' browsing ### Observed behavior Some organizations prefer that their users always be logged in ### Expected behavior A facility setting can be added, e.g. "Disable guest access". When this setting is active: * "Access as guest" is hidden on the log-in page * When a user visits a page without an active session, they are redirected to the log-in page ### User-facing consequences Sometimes students are accessing as a guest because it's easier than dealing with a log-in ### Context k0.10.0 ---- cc @jtamiace @rtibbles this seem reasonable? </issue> <code> [start of kolibri/core/device/serializers.py] 1 from django.db import transaction 2 from django.utils.translation import check_for_language 3 from django.utils.translation import ugettext_lazy as _ 4 from rest_framework import serializers 5 6 from .models import DevicePermissions 7 from .models import DeviceSettings 8 from kolibri.core.auth.constants.facility_presets import choices 9 from kolibri.core.auth.constants.facility_presets import mappings 10 from kolibri.core.auth.constants.role_kinds import ADMIN 11 from kolibri.core.auth.models import Facility 12 from kolibri.core.auth.models import FacilityUser 13 from kolibri.core.auth.serializers import FacilitySerializer 14 from kolibri.core.auth.serializers import FacilityUserSerializer 15 16 17 class DevicePermissionsSerializer(serializers.ModelSerializer): 18 19 user = serializers.PrimaryKeyRelatedField(queryset=FacilityUser.objects.all()) 20 21 class Meta: 22 model = DevicePermissions 23 fields = ( 24 'user', 'is_superuser', 'can_manage_content', 25 ) 26 27 28 class NoFacilityFacilityUserSerializer(FacilityUserSerializer): 29 30 class Meta: 31 model = FacilityUser 32 fields = ('id', 'username', 'full_name', 'password', ) 33 34 35 class DeviceProvisionSerializer(serializers.Serializer): 36 facility = FacilitySerializer() 37 preset = serializers.ChoiceField(choices=choices) 38 superuser = NoFacilityFacilityUserSerializer() 39 language_id = serializers.CharField(max_length=15) 40 41 class Meta: 42 fields = ('facility', 'dataset', 'superuser', 'language_id') 43 44 def validate_language_id(self, language_id): 45 """ 46 Check that the language_id is supported by Kolibri 47 """ 48 if not check_for_language(language_id): 49 raise serializers.ValidationError(_("Language is not supported by Kolibri")) 50 return language_id 51 52 def create(self, validated_data): 53 """ 54 Endpoint for initial setup of a device. 55 Expects a value for: 56 default language - the default language of this Kolibri device 57 facility - the required fields for setting up a facility 58 facilitydataset - facility configuration options 59 superuser - the required fields for a facilityuser who will be set as the super user for this device 60 """ 61 with transaction.atomic(): 62 facility = Facility.objects.create(**validated_data.pop('facility')) 63 preset = validated_data.pop('preset') 64 dataset_data = mappings[preset] 65 for key, value in dataset_data.items(): 66 setattr(facility.dataset, key, value) 67 facility.dataset.save() 68 superuser_data = validated_data.pop('superuser') 69 superuser_data['facility'] = facility 70 superuser = FacilityUserSerializer(data=superuser_data).create(superuser_data) 71 superuser.set_password(superuser_data["password"]) 72 superuser.save() 73 facility.add_role(superuser, ADMIN) 74 DevicePermissions.objects.create(user=superuser, is_superuser=True) 75 language_id = validated_data.pop('language_id') 76 device_settings, created = DeviceSettings.objects.get_or_create() 77 device_settings.is_provisioned = True 78 device_settings.language_id = language_id 79 device_settings.default_facility = facility 80 device_settings.save() 81 return { 82 "facility": facility, 83 "preset": preset, 84 "superuser": superuser, 85 "language_id": language_id 86 } 87 [end of kolibri/core/device/serializers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kolibri/core/device/serializers.py b/kolibri/core/device/serializers.py --- a/kolibri/core/device/serializers.py +++ b/kolibri/core/device/serializers.py @@ -37,9 +37,10 @@ preset = serializers.ChoiceField(choices=choices) superuser = NoFacilityFacilityUserSerializer() language_id = serializers.CharField(max_length=15) + settings = serializers.JSONField() class Meta: - fields = ('facility', 'dataset', 'superuser', 'language_id') + fields = ('facility', 'dataset', 'superuser', 'language_id', 'settings',) def validate_language_id(self, language_id): """ @@ -64,6 +65,10 @@ dataset_data = mappings[preset] for key, value in dataset_data.items(): setattr(facility.dataset, key, value) + # overwrite the settings in dataset_data with validated_data.settings + custom_settings = validated_data.pop('settings') + for key, value in custom_settings.items(): + setattr(facility.dataset, key, value) facility.dataset.save() superuser_data = validated_data.pop('superuser') superuser_data['facility'] = facility @@ -82,5 +87,6 @@ "facility": facility, "preset": preset, "superuser": superuser, - "language_id": language_id + "language_id": language_id, + "settings": custom_settings, }
{"golden_diff": "diff --git a/kolibri/core/device/serializers.py b/kolibri/core/device/serializers.py\n--- a/kolibri/core/device/serializers.py\n+++ b/kolibri/core/device/serializers.py\n@@ -37,9 +37,10 @@\n preset = serializers.ChoiceField(choices=choices)\n superuser = NoFacilityFacilityUserSerializer()\n language_id = serializers.CharField(max_length=15)\n+ settings = serializers.JSONField()\n \n class Meta:\n- fields = ('facility', 'dataset', 'superuser', 'language_id')\n+ fields = ('facility', 'dataset', 'superuser', 'language_id', 'settings',)\n \n def validate_language_id(self, language_id):\n \"\"\"\n@@ -64,6 +65,10 @@\n dataset_data = mappings[preset]\n for key, value in dataset_data.items():\n setattr(facility.dataset, key, value)\n+ # overwrite the settings in dataset_data with validated_data.settings\n+ custom_settings = validated_data.pop('settings')\n+ for key, value in custom_settings.items():\n+ setattr(facility.dataset, key, value)\n facility.dataset.save()\n superuser_data = validated_data.pop('superuser')\n superuser_data['facility'] = facility\n@@ -82,5 +87,6 @@\n \"facility\": facility,\n \"preset\": preset,\n \"superuser\": superuser,\n- \"language_id\": language_id\n+ \"language_id\": language_id,\n+ \"settings\": custom_settings,\n }\n", "issue": "add facility setting to disable 'guest' browsing\n\r\n### Observed behavior\r\n\r\nSome organizations prefer that their users always be logged in\r\n\r\n### Expected behavior\r\n\r\nA facility setting can be added, e.g. \"Disable guest access\".\r\n\r\nWhen this setting is active:\r\n\r\n* \"Access as guest\" is hidden on the log-in page\r\n* When a user visits a page without an active session, they are redirected to the log-in page\r\n\r\n### User-facing consequences\r\n\r\nSometimes students are accessing as a guest because it's easier than dealing with a log-in\r\n\r\n### Context\r\n\r\nk0.10.0\r\n\r\n\r\n----\r\n\r\ncc @jtamiace @rtibbles this seem reasonable?\r\n\r\n\n", "before_files": [{"content": "from django.db import transaction\nfrom django.utils.translation import check_for_language\nfrom django.utils.translation import ugettext_lazy as _\nfrom rest_framework import serializers\n\nfrom .models import DevicePermissions\nfrom .models import DeviceSettings\nfrom kolibri.core.auth.constants.facility_presets import choices\nfrom kolibri.core.auth.constants.facility_presets import mappings\nfrom kolibri.core.auth.constants.role_kinds import ADMIN\nfrom kolibri.core.auth.models import Facility\nfrom kolibri.core.auth.models import FacilityUser\nfrom kolibri.core.auth.serializers import FacilitySerializer\nfrom kolibri.core.auth.serializers import FacilityUserSerializer\n\n\nclass DevicePermissionsSerializer(serializers.ModelSerializer):\n\n user = serializers.PrimaryKeyRelatedField(queryset=FacilityUser.objects.all())\n\n class Meta:\n model = DevicePermissions\n fields = (\n 'user', 'is_superuser', 'can_manage_content',\n )\n\n\nclass NoFacilityFacilityUserSerializer(FacilityUserSerializer):\n\n class Meta:\n model = FacilityUser\n fields = ('id', 'username', 'full_name', 'password', )\n\n\nclass DeviceProvisionSerializer(serializers.Serializer):\n facility = FacilitySerializer()\n preset = serializers.ChoiceField(choices=choices)\n superuser = NoFacilityFacilityUserSerializer()\n language_id = serializers.CharField(max_length=15)\n\n class Meta:\n fields = ('facility', 'dataset', 'superuser', 'language_id')\n\n def validate_language_id(self, language_id):\n \"\"\"\n Check that the language_id is supported by Kolibri\n \"\"\"\n if not check_for_language(language_id):\n raise serializers.ValidationError(_(\"Language is not supported by Kolibri\"))\n return language_id\n\n def create(self, validated_data):\n \"\"\"\n Endpoint for initial setup of a device.\n Expects a value for:\n default language - the default language of this Kolibri device\n facility - the required fields for setting up a facility\n facilitydataset - facility configuration options\n superuser - the required fields for a facilityuser who will be set as the super user for this device\n \"\"\"\n with transaction.atomic():\n facility = Facility.objects.create(**validated_data.pop('facility'))\n preset = validated_data.pop('preset')\n dataset_data = mappings[preset]\n for key, value in dataset_data.items():\n setattr(facility.dataset, key, value)\n facility.dataset.save()\n superuser_data = validated_data.pop('superuser')\n superuser_data['facility'] = facility\n superuser = FacilityUserSerializer(data=superuser_data).create(superuser_data)\n superuser.set_password(superuser_data[\"password\"])\n superuser.save()\n facility.add_role(superuser, ADMIN)\n DevicePermissions.objects.create(user=superuser, is_superuser=True)\n language_id = validated_data.pop('language_id')\n device_settings, created = DeviceSettings.objects.get_or_create()\n device_settings.is_provisioned = True\n device_settings.language_id = language_id\n device_settings.default_facility = facility\n device_settings.save()\n return {\n \"facility\": facility,\n \"preset\": preset,\n \"superuser\": superuser,\n \"language_id\": language_id\n }\n", "path": "kolibri/core/device/serializers.py"}]}
1,514
339
gh_patches_debug_29342
rasdani/github-patches
git_diff
pytorch__pytorch-4614
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support "device" keyword argument Adds the optional "device" keyword argument to Tensor and Storage constructors and .new methods. </issue> <code> [start of torch/distributions/categorical.py] 1 import torch 2 from torch.autograd import Variable 3 from torch.distributions import constraints 4 from torch.distributions.distribution import Distribution 5 from torch.distributions.utils import probs_to_logits, logits_to_probs, log_sum_exp, lazy_property 6 7 8 class Categorical(Distribution): 9 r""" 10 Creates a categorical distribution parameterized by `probs`. 11 12 .. note:: 13 It is equivalent to the distribution that ``multinomial()`` samples from. 14 15 Samples are integers from `0 ... K-1` where `K` is probs.size(-1). 16 17 If `probs` is 1D with length-`K`, each element is the relative probability 18 of sampling the class at that index. 19 20 If `probs` is 2D, it is treated as a batch of probability vectors. 21 22 See also: :func:`torch.multinomial` 23 24 Example:: 25 26 >>> m = Categorical(torch.Tensor([ 0.25, 0.25, 0.25, 0.25 ])) 27 >>> m.sample() # equal probability of 0, 1, 2, 3 28 3 29 [torch.LongTensor of size 1] 30 31 Args: 32 probs (Tensor or Variable): event probabilities 33 """ 34 params = {'probs': constraints.simplex} 35 has_enumerate_support = True 36 37 def __init__(self, probs=None, logits=None): 38 if (probs is None) == (logits is None): 39 raise ValueError("Either `probs` or `logits` must be specified, but not both.") 40 if probs is not None: 41 self.probs = probs / probs.sum(-1, keepdim=True) 42 else: 43 self.logits = logits - log_sum_exp(logits) 44 batch_shape = self.probs.size()[:-1] if probs is not None else self.logits.size()[:-1] 45 super(Categorical, self).__init__(batch_shape) 46 47 @constraints.dependent_property 48 def support(self): 49 return constraints.integer_interval(0, self.probs.size()[-1] - 1) 50 51 @lazy_property 52 def logits(self): 53 return probs_to_logits(self.probs) 54 55 @lazy_property 56 def probs(self): 57 return logits_to_probs(self.logits) 58 59 def sample(self, sample_shape=torch.Size()): 60 num_events = self.probs.size()[-1] 61 sample_shape = self._extended_shape(sample_shape) 62 param_shape = sample_shape + self.probs.size()[-1:] 63 probs = self.probs.expand(param_shape) 64 probs_2d = probs.contiguous().view(-1, num_events) 65 sample_2d = torch.multinomial(probs_2d, 1, True) 66 return sample_2d.contiguous().view(sample_shape) 67 68 def log_prob(self, value): 69 self._validate_log_prob_arg(value) 70 param_shape = value.size() + self.probs.size()[-1:] 71 log_pmf = self.logits.expand(param_shape) 72 return log_pmf.gather(-1, value.unsqueeze(-1).long()).squeeze(-1) 73 74 def entropy(self): 75 p_log_p = self.logits * self.probs 76 return -p_log_p.sum(-1) 77 78 def enumerate_support(self): 79 num_events = self.probs.size()[-1] 80 values = torch.arange(num_events).long() 81 values = values.view((-1,) + (1,) * len(self._batch_shape)) 82 values = values.expand((-1,) + self._batch_shape) 83 if self.probs.is_cuda: 84 values = values.cuda(self.probs.get_device()) 85 if isinstance(self.probs, Variable): 86 values = Variable(values) 87 return values 88 [end of torch/distributions/categorical.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torch/distributions/categorical.py b/torch/distributions/categorical.py --- a/torch/distributions/categorical.py +++ b/torch/distributions/categorical.py @@ -2,7 +2,7 @@ from torch.autograd import Variable from torch.distributions import constraints from torch.distributions.distribution import Distribution -from torch.distributions.utils import probs_to_logits, logits_to_probs, log_sum_exp, lazy_property +from torch.distributions.utils import probs_to_logits, logits_to_probs, log_sum_exp, lazy_property, broadcast_all class Categorical(Distribution): @@ -41,6 +41,7 @@ self.probs = probs / probs.sum(-1, keepdim=True) else: self.logits = logits - log_sum_exp(logits) + self._num_events = self.probs.size()[-1] if self.probs is not None else self.logits.size()[-1] batch_shape = self.probs.size()[:-1] if probs is not None else self.logits.size()[:-1] super(Categorical, self).__init__(batch_shape) @@ -67,7 +68,9 @@ def log_prob(self, value): self._validate_log_prob_arg(value) - param_shape = value.size() + self.probs.size()[-1:] + value_shape = torch._C._infer_size(value.size(), self.batch_shape) if self.batch_shape else value.size() + param_shape = value_shape + (self._num_events,) + value = value.expand(value_shape) log_pmf = self.logits.expand(param_shape) return log_pmf.gather(-1, value.unsqueeze(-1).long()).squeeze(-1)
{"golden_diff": "diff --git a/torch/distributions/categorical.py b/torch/distributions/categorical.py\n--- a/torch/distributions/categorical.py\n+++ b/torch/distributions/categorical.py\n@@ -2,7 +2,7 @@\n from torch.autograd import Variable\n from torch.distributions import constraints\n from torch.distributions.distribution import Distribution\n-from torch.distributions.utils import probs_to_logits, logits_to_probs, log_sum_exp, lazy_property\n+from torch.distributions.utils import probs_to_logits, logits_to_probs, log_sum_exp, lazy_property, broadcast_all\n \n \n class Categorical(Distribution):\n@@ -41,6 +41,7 @@\n self.probs = probs / probs.sum(-1, keepdim=True)\n else:\n self.logits = logits - log_sum_exp(logits)\n+ self._num_events = self.probs.size()[-1] if self.probs is not None else self.logits.size()[-1]\n batch_shape = self.probs.size()[:-1] if probs is not None else self.logits.size()[:-1]\n super(Categorical, self).__init__(batch_shape)\n \n@@ -67,7 +68,9 @@\n \n def log_prob(self, value):\n self._validate_log_prob_arg(value)\n- param_shape = value.size() + self.probs.size()[-1:]\n+ value_shape = torch._C._infer_size(value.size(), self.batch_shape) if self.batch_shape else value.size()\n+ param_shape = value_shape + (self._num_events,)\n+ value = value.expand(value_shape)\n log_pmf = self.logits.expand(param_shape)\n return log_pmf.gather(-1, value.unsqueeze(-1).long()).squeeze(-1)\n", "issue": "Support \"device\" keyword argument\nAdds the optional \"device\" keyword argument to Tensor and Storage\nconstructors and .new methods.\n\n", "before_files": [{"content": "import torch\nfrom torch.autograd import Variable\nfrom torch.distributions import constraints\nfrom torch.distributions.distribution import Distribution\nfrom torch.distributions.utils import probs_to_logits, logits_to_probs, log_sum_exp, lazy_property\n\n\nclass Categorical(Distribution):\n r\"\"\"\n Creates a categorical distribution parameterized by `probs`.\n\n .. note::\n It is equivalent to the distribution that ``multinomial()`` samples from.\n\n Samples are integers from `0 ... K-1` where `K` is probs.size(-1).\n\n If `probs` is 1D with length-`K`, each element is the relative probability\n of sampling the class at that index.\n\n If `probs` is 2D, it is treated as a batch of probability vectors.\n\n See also: :func:`torch.multinomial`\n\n Example::\n\n >>> m = Categorical(torch.Tensor([ 0.25, 0.25, 0.25, 0.25 ]))\n >>> m.sample() # equal probability of 0, 1, 2, 3\n 3\n [torch.LongTensor of size 1]\n\n Args:\n probs (Tensor or Variable): event probabilities\n \"\"\"\n params = {'probs': constraints.simplex}\n has_enumerate_support = True\n\n def __init__(self, probs=None, logits=None):\n if (probs is None) == (logits is None):\n raise ValueError(\"Either `probs` or `logits` must be specified, but not both.\")\n if probs is not None:\n self.probs = probs / probs.sum(-1, keepdim=True)\n else:\n self.logits = logits - log_sum_exp(logits)\n batch_shape = self.probs.size()[:-1] if probs is not None else self.logits.size()[:-1]\n super(Categorical, self).__init__(batch_shape)\n\n @constraints.dependent_property\n def support(self):\n return constraints.integer_interval(0, self.probs.size()[-1] - 1)\n\n @lazy_property\n def logits(self):\n return probs_to_logits(self.probs)\n\n @lazy_property\n def probs(self):\n return logits_to_probs(self.logits)\n\n def sample(self, sample_shape=torch.Size()):\n num_events = self.probs.size()[-1]\n sample_shape = self._extended_shape(sample_shape)\n param_shape = sample_shape + self.probs.size()[-1:]\n probs = self.probs.expand(param_shape)\n probs_2d = probs.contiguous().view(-1, num_events)\n sample_2d = torch.multinomial(probs_2d, 1, True)\n return sample_2d.contiguous().view(sample_shape)\n\n def log_prob(self, value):\n self._validate_log_prob_arg(value)\n param_shape = value.size() + self.probs.size()[-1:]\n log_pmf = self.logits.expand(param_shape)\n return log_pmf.gather(-1, value.unsqueeze(-1).long()).squeeze(-1)\n\n def entropy(self):\n p_log_p = self.logits * self.probs\n return -p_log_p.sum(-1)\n\n def enumerate_support(self):\n num_events = self.probs.size()[-1]\n values = torch.arange(num_events).long()\n values = values.view((-1,) + (1,) * len(self._batch_shape))\n values = values.expand((-1,) + self._batch_shape)\n if self.probs.is_cuda:\n values = values.cuda(self.probs.get_device())\n if isinstance(self.probs, Variable):\n values = Variable(values)\n return values\n", "path": "torch/distributions/categorical.py"}]}
1,532
369
gh_patches_debug_24879
rasdani/github-patches
git_diff
mozilla__bugbug-242
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add assignee model to run.py See also #61. </issue> <code> [start of run.py] 1 # -*- coding: utf-8 -*- 2 # This Source Code Form is subject to the terms of the Mozilla Public 3 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 # You can obtain one at http://mozilla.org/MPL/2.0/. 5 6 import argparse 7 import csv 8 import os 9 from datetime import datetime 10 from datetime import timedelta 11 12 import numpy as np 13 14 from bugbug import bugzilla 15 from bugbug import db 16 from bugbug import repository # noqa 17 18 if __name__ == '__main__': 19 parser = argparse.ArgumentParser() 20 parser.add_argument('--lemmatization', help='Perform lemmatization (using spaCy)', action='store_true') 21 parser.add_argument('--train', help='Perform training', action='store_true') 22 parser.add_argument('--goal', 23 help='Goal of the classifier', 24 choices=['bug', 'regression', 'tracking', 'qaneeded', 'uplift', 'component', 'devdocneeded', 'defectfeaturetask'], 25 default='bug') 26 parser.add_argument('--classifier', help='Type of the classifier', choices=['default', 'nn'], default='default') 27 parser.add_argument('--classify', help='Perform evaluation', action='store_true') 28 parser.add_argument('--generate-sheet', help='Perform evaluation on bugs from last week and generate a csv file', action='store_true') 29 args = parser.parse_args() 30 31 model_file_name = '{}{}model'.format( 32 args.goal, 33 '' if args.classifier == 'default' else args.classifier 34 ) 35 36 if args.goal == 'bug': 37 from bugbug.models.bug import BugModel 38 model_class = BugModel 39 elif args.goal == 'defectfeaturetask': 40 from bugbug.models.defect_feature_task import DefectFeatureTaskModel 41 model_class = DefectFeatureTaskModel 42 elif args.goal == 'regression': 43 from bugbug.models.regression import RegressionModel 44 model_class = RegressionModel 45 elif args.goal == 'tracking': 46 from bugbug.models.tracking import TrackingModel 47 model_class = TrackingModel 48 elif args.goal == 'qaneeded': 49 from bugbug.models.qaneeded import QANeededModel 50 model_class = QANeededModel 51 elif args.goal == 'uplift': 52 from bugbug.models.uplift import UpliftModel 53 model_class = UpliftModel 54 elif args.goal == 'component': 55 if args.classifier == 'default': 56 from bugbug.models.component import ComponentModel 57 model_class = ComponentModel 58 elif args.classifier == 'nn': 59 from bugbug.models.component_nn import ComponentNNModel 60 model_class = ComponentNNModel 61 elif args.goal == 'devdocneeded': 62 from bugbug.models.devdocneeded import DevDocNeededModel 63 model_class = DevDocNeededModel 64 65 if args.train: 66 db.download() 67 68 model = model_class(args.lemmatization) 69 model.train() 70 else: 71 model = model_class.load(model_file_name) 72 73 if args.classify: 74 for bug in bugzilla.get_bugs(): 75 print(f'https://bugzilla.mozilla.org/show_bug.cgi?id={ bug["id"] } - { bug["summary"]} ') 76 77 if model.calculate_importance: 78 probas, importances = model.classify(bug, probabilities=True, importances=True) 79 80 feature_names = model.get_feature_names() 81 for i, (importance, index, is_positive) in enumerate(importances): 82 print(f'{i + 1}. \'{feature_names[int(index)]}\' ({"+" if (is_positive) else "-"}{importance})') 83 else: 84 probas = model.classify(bug, probabilities=True, importances=False) 85 86 if np.argmax(probas) == 1: 87 print(f'Positive! {probas}') 88 else: 89 print(f'Negative! {probas}') 90 input() 91 92 if args.generate_sheet: 93 today = datetime.utcnow() 94 a_week_ago = today - timedelta(7) 95 bugs = bugzilla.download_bugs_between(a_week_ago, today) 96 97 print(f'Classifying {len(bugs)} bugs...') 98 99 rows = [ 100 ['Bug', f'{args.goal}(model)', args.goal, 'Title'] 101 ] 102 103 for bug in bugs: 104 p = model.classify(bug, probabilities=True) 105 rows.append([f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug["id"]}', 'y' if p[0][1] >= 0.7 else 'n', '', bug['summary']]) 106 107 os.makedirs('sheets', exist_ok=True) 108 with open(os.path.join('sheets', f'{args.goal}-{datetime.utcnow().strftime("%Y-%m-%d")}-labels.csv'), 'w') as f: 109 writer = csv.writer(f) 110 writer.writerows(rows) 111 [end of run.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/run.py b/run.py --- a/run.py +++ b/run.py @@ -21,7 +21,7 @@ parser.add_argument('--train', help='Perform training', action='store_true') parser.add_argument('--goal', help='Goal of the classifier', - choices=['bug', 'regression', 'tracking', 'qaneeded', 'uplift', 'component', 'devdocneeded', 'defectfeaturetask'], + choices=['bug', 'regression', 'tracking', 'qaneeded', 'uplift', 'component', 'devdocneeded', 'defectfeaturetask', 'assignee'], default='bug') parser.add_argument('--classifier', help='Type of the classifier', choices=['default', 'nn'], default='default') parser.add_argument('--classify', help='Perform evaluation', action='store_true') @@ -61,6 +61,9 @@ elif args.goal == 'devdocneeded': from bugbug.models.devdocneeded import DevDocNeededModel model_class = DevDocNeededModel + elif args.goal == 'assignee': + from bugbug.models.assignee import AssigneeModel + model_class = AssigneeModel if args.train: db.download()
{"golden_diff": "diff --git a/run.py b/run.py\n--- a/run.py\n+++ b/run.py\n@@ -21,7 +21,7 @@\n parser.add_argument('--train', help='Perform training', action='store_true')\n parser.add_argument('--goal',\n help='Goal of the classifier',\n- choices=['bug', 'regression', 'tracking', 'qaneeded', 'uplift', 'component', 'devdocneeded', 'defectfeaturetask'],\n+ choices=['bug', 'regression', 'tracking', 'qaneeded', 'uplift', 'component', 'devdocneeded', 'defectfeaturetask', 'assignee'],\n default='bug')\n parser.add_argument('--classifier', help='Type of the classifier', choices=['default', 'nn'], default='default')\n parser.add_argument('--classify', help='Perform evaluation', action='store_true')\n@@ -61,6 +61,9 @@\n elif args.goal == 'devdocneeded':\n from bugbug.models.devdocneeded import DevDocNeededModel\n model_class = DevDocNeededModel\n+ elif args.goal == 'assignee':\n+ from bugbug.models.assignee import AssigneeModel\n+ model_class = AssigneeModel\n \n if args.train:\n db.download()\n", "issue": "Add assignee model to run.py\nSee also #61.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport argparse\nimport csv\nimport os\nfrom datetime import datetime\nfrom datetime import timedelta\n\nimport numpy as np\n\nfrom bugbug import bugzilla\nfrom bugbug import db\nfrom bugbug import repository # noqa\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--lemmatization', help='Perform lemmatization (using spaCy)', action='store_true')\n parser.add_argument('--train', help='Perform training', action='store_true')\n parser.add_argument('--goal',\n help='Goal of the classifier',\n choices=['bug', 'regression', 'tracking', 'qaneeded', 'uplift', 'component', 'devdocneeded', 'defectfeaturetask'],\n default='bug')\n parser.add_argument('--classifier', help='Type of the classifier', choices=['default', 'nn'], default='default')\n parser.add_argument('--classify', help='Perform evaluation', action='store_true')\n parser.add_argument('--generate-sheet', help='Perform evaluation on bugs from last week and generate a csv file', action='store_true')\n args = parser.parse_args()\n\n model_file_name = '{}{}model'.format(\n args.goal,\n '' if args.classifier == 'default' else args.classifier\n )\n\n if args.goal == 'bug':\n from bugbug.models.bug import BugModel\n model_class = BugModel\n elif args.goal == 'defectfeaturetask':\n from bugbug.models.defect_feature_task import DefectFeatureTaskModel\n model_class = DefectFeatureTaskModel\n elif args.goal == 'regression':\n from bugbug.models.regression import RegressionModel\n model_class = RegressionModel\n elif args.goal == 'tracking':\n from bugbug.models.tracking import TrackingModel\n model_class = TrackingModel\n elif args.goal == 'qaneeded':\n from bugbug.models.qaneeded import QANeededModel\n model_class = QANeededModel\n elif args.goal == 'uplift':\n from bugbug.models.uplift import UpliftModel\n model_class = UpliftModel\n elif args.goal == 'component':\n if args.classifier == 'default':\n from bugbug.models.component import ComponentModel\n model_class = ComponentModel\n elif args.classifier == 'nn':\n from bugbug.models.component_nn import ComponentNNModel\n model_class = ComponentNNModel\n elif args.goal == 'devdocneeded':\n from bugbug.models.devdocneeded import DevDocNeededModel\n model_class = DevDocNeededModel\n\n if args.train:\n db.download()\n\n model = model_class(args.lemmatization)\n model.train()\n else:\n model = model_class.load(model_file_name)\n\n if args.classify:\n for bug in bugzilla.get_bugs():\n print(f'https://bugzilla.mozilla.org/show_bug.cgi?id={ bug[\"id\"] } - { bug[\"summary\"]} ')\n\n if model.calculate_importance:\n probas, importances = model.classify(bug, probabilities=True, importances=True)\n\n feature_names = model.get_feature_names()\n for i, (importance, index, is_positive) in enumerate(importances):\n print(f'{i + 1}. \\'{feature_names[int(index)]}\\' ({\"+\" if (is_positive) else \"-\"}{importance})')\n else:\n probas = model.classify(bug, probabilities=True, importances=False)\n\n if np.argmax(probas) == 1:\n print(f'Positive! {probas}')\n else:\n print(f'Negative! {probas}')\n input()\n\n if args.generate_sheet:\n today = datetime.utcnow()\n a_week_ago = today - timedelta(7)\n bugs = bugzilla.download_bugs_between(a_week_ago, today)\n\n print(f'Classifying {len(bugs)} bugs...')\n\n rows = [\n ['Bug', f'{args.goal}(model)', args.goal, 'Title']\n ]\n\n for bug in bugs:\n p = model.classify(bug, probabilities=True)\n rows.append([f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug[\"id\"]}', 'y' if p[0][1] >= 0.7 else 'n', '', bug['summary']])\n\n os.makedirs('sheets', exist_ok=True)\n with open(os.path.join('sheets', f'{args.goal}-{datetime.utcnow().strftime(\"%Y-%m-%d\")}-labels.csv'), 'w') as f:\n writer = csv.writer(f)\n writer.writerows(rows)\n", "path": "run.py"}]}
1,814
274
gh_patches_debug_99
rasdani/github-patches
git_diff
streamlit__streamlit-6663
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> st.json replaces multiple spaces with single space ### Checklist - [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues. - [X] I added a very descriptive title to this issue. - [X] I have provided sufficient information below to help reproduce this issue. ### Summary When using st.json, multiple spaces in strings are replaced with a single space. ### Reproducible Code Example ```Python import streamlit as st st.json({"Hello World": "Hello James"}) ``` ### Steps To Reproduce _No response_ ### Expected Behavior _No response_ ### Current Behavior _No response_ ### Is this a regression? - [ ] Yes, this used to work in a previous version. ### Debug info - Streamlit version: - Python version: - Operating System: - Browser: - Virtual environment: ### Additional Information _No response_ ### Are you willing to submit a PR? - [ ] Yes, I am willing to submit a PR! st.json replaces multiple spaces with single space ### Checklist - [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues. - [X] I added a very descriptive title to this issue. - [X] I have provided sufficient information below to help reproduce this issue. ### Summary When using st.json, multiple spaces in strings are replaced with a single space. ### Reproducible Code Example ```Python import streamlit as st st.json({"Hello World": "Hello James"}) ``` ### Steps To Reproduce _No response_ ### Expected Behavior _No response_ ### Current Behavior _No response_ ### Is this a regression? - [ ] Yes, this used to work in a previous version. ### Debug info - Streamlit version: - Python version: - Operating System: - Browser: - Virtual environment: ### Additional Information _No response_ ### Are you willing to submit a PR? - [ ] Yes, I am willing to submit a PR! </issue> <code> [start of e2e/scripts/st_json.py] 1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022) 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import streamlit as st 16 17 data = {"foo": "bar"} 18 st.json(data) 19 st.json(data, expanded=False) 20 [end of e2e/scripts/st_json.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/e2e/scripts/st_json.py b/e2e/scripts/st_json.py --- a/e2e/scripts/st_json.py +++ b/e2e/scripts/st_json.py @@ -17,3 +17,5 @@ data = {"foo": "bar"} st.json(data) st.json(data, expanded=False) +data = {"Hello World": "Foo Bar"} +st.json(data)
{"golden_diff": "diff --git a/e2e/scripts/st_json.py b/e2e/scripts/st_json.py\n--- a/e2e/scripts/st_json.py\n+++ b/e2e/scripts/st_json.py\n@@ -17,3 +17,5 @@\n data = {\"foo\": \"bar\"}\n st.json(data)\n st.json(data, expanded=False)\n+data = {\"Hello World\": \"Foo Bar\"}\n+st.json(data)\n", "issue": "st.json replaces multiple spaces with single space\n### Checklist\n\n- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.\n- [X] I added a very descriptive title to this issue.\n- [X] I have provided sufficient information below to help reproduce this issue.\n\n### Summary\n\nWhen using st.json, multiple spaces in strings are replaced with a single space.\n\n### Reproducible Code Example\n\n```Python\nimport streamlit as st\r\n\r\nst.json({\"Hello World\": \"Hello James\"})\n```\n\n\n### Steps To Reproduce\n\n_No response_\n\n### Expected Behavior\n\n_No response_\n\n### Current Behavior\n\n_No response_\n\n### Is this a regression?\n\n- [ ] Yes, this used to work in a previous version.\n\n### Debug info\n\n- Streamlit version:\r\n- Python version:\r\n- Operating System:\r\n- Browser:\r\n- Virtual environment:\r\n\n\n### Additional Information\n\n_No response_\n\n### Are you willing to submit a PR?\n\n- [ ] Yes, I am willing to submit a PR!\nst.json replaces multiple spaces with single space\n### Checklist\n\n- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.\n- [X] I added a very descriptive title to this issue.\n- [X] I have provided sufficient information below to help reproduce this issue.\n\n### Summary\n\nWhen using st.json, multiple spaces in strings are replaced with a single space.\n\n### Reproducible Code Example\n\n```Python\nimport streamlit as st\r\n\r\nst.json({\"Hello World\": \"Hello James\"})\n```\n\n\n### Steps To Reproduce\n\n_No response_\n\n### Expected Behavior\n\n_No response_\n\n### Current Behavior\n\n_No response_\n\n### Is this a regression?\n\n- [ ] Yes, this used to work in a previous version.\n\n### Debug info\n\n- Streamlit version:\r\n- Python version:\r\n- Operating System:\r\n- Browser:\r\n- Virtual environment:\r\n\n\n### Additional Information\n\n_No response_\n\n### Are you willing to submit a PR?\n\n- [ ] Yes, I am willing to submit a PR!\n", "before_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\ndata = {\"foo\": \"bar\"}\nst.json(data)\nst.json(data, expanded=False)\n", "path": "e2e/scripts/st_json.py"}]}
1,180
90
gh_patches_debug_667
rasdani/github-patches
git_diff
meltano__meltano-6901
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ci: PyPi publish job fails in "Build distribution" step with error `module 'sqlalchemy' has no attribute 'orm'` https://github.com/meltano/meltano/actions/runs/3267990463/jobs/5373871668 </issue> <code> [start of src/meltano/migrations/versions/23ea52e6d784_add_resource_type_to_embed_token.py] 1 """add resource type to embed token 2 3 Revision ID: 23ea52e6d784 4 Revises: ceb00d7ff3bd 5 Create Date: 2020-02-12 09:29:31.592426 6 7 """ 8 import sqlalchemy as sa 9 from alembic import op 10 11 from meltano.migrations.utils.dialect_typing import ( 12 get_dialect_name, 13 max_string_length_for_dialect, 14 ) 15 16 # revision identifiers, used by Alembic. 17 revision = "23ea52e6d784" 18 down_revision = "ceb00d7ff3bd" 19 branch_labels = None 20 depends_on = None 21 22 Session = sa.orm.sessionmaker() 23 24 25 def upgrade(): 26 dialect_name = get_dialect_name() 27 max_string_length = max_string_length_for_dialect(dialect_name) 28 29 op.add_column( 30 "embed_tokens", sa.Column("resource_type", sa.String(max_string_length)) 31 ) 32 33 metadata = sa.MetaData(bind=op.get_bind()) 34 Embed_Tokens = sa.Table("embed_tokens", metadata, autoload=True) 35 op.execute(Embed_Tokens.update().values({"resource_type": "report"})) 36 37 38 def downgrade(): 39 op.drop_column("embed_tokens", "resource_type") 40 [end of src/meltano/migrations/versions/23ea52e6d784_add_resource_type_to_embed_token.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/meltano/migrations/versions/23ea52e6d784_add_resource_type_to_embed_token.py b/src/meltano/migrations/versions/23ea52e6d784_add_resource_type_to_embed_token.py --- a/src/meltano/migrations/versions/23ea52e6d784_add_resource_type_to_embed_token.py +++ b/src/meltano/migrations/versions/23ea52e6d784_add_resource_type_to_embed_token.py @@ -6,6 +6,7 @@ """ import sqlalchemy as sa +import sqlalchemy.orm from alembic import op from meltano.migrations.utils.dialect_typing import (
{"golden_diff": "diff --git a/src/meltano/migrations/versions/23ea52e6d784_add_resource_type_to_embed_token.py b/src/meltano/migrations/versions/23ea52e6d784_add_resource_type_to_embed_token.py\n--- a/src/meltano/migrations/versions/23ea52e6d784_add_resource_type_to_embed_token.py\n+++ b/src/meltano/migrations/versions/23ea52e6d784_add_resource_type_to_embed_token.py\n@@ -6,6 +6,7 @@\n \n \"\"\"\n import sqlalchemy as sa\n+import sqlalchemy.orm\n from alembic import op\n \n from meltano.migrations.utils.dialect_typing import (\n", "issue": "ci: PyPi publish job fails in \"Build distribution\" step with error `module 'sqlalchemy' has no attribute 'orm'`\nhttps://github.com/meltano/meltano/actions/runs/3267990463/jobs/5373871668\n", "before_files": [{"content": "\"\"\"add resource type to embed token\n\nRevision ID: 23ea52e6d784\nRevises: ceb00d7ff3bd\nCreate Date: 2020-02-12 09:29:31.592426\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\nfrom meltano.migrations.utils.dialect_typing import (\n get_dialect_name,\n max_string_length_for_dialect,\n)\n\n# revision identifiers, used by Alembic.\nrevision = \"23ea52e6d784\"\ndown_revision = \"ceb00d7ff3bd\"\nbranch_labels = None\ndepends_on = None\n\nSession = sa.orm.sessionmaker()\n\n\ndef upgrade():\n dialect_name = get_dialect_name()\n max_string_length = max_string_length_for_dialect(dialect_name)\n\n op.add_column(\n \"embed_tokens\", sa.Column(\"resource_type\", sa.String(max_string_length))\n )\n\n metadata = sa.MetaData(bind=op.get_bind())\n Embed_Tokens = sa.Table(\"embed_tokens\", metadata, autoload=True)\n op.execute(Embed_Tokens.update().values({\"resource_type\": \"report\"}))\n\n\ndef downgrade():\n op.drop_column(\"embed_tokens\", \"resource_type\")\n", "path": "src/meltano/migrations/versions/23ea52e6d784_add_resource_type_to_embed_token.py"}]}
1,004
162
gh_patches_debug_40162
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3398
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spider lululemon is broken During the global build at 2021-06-02-14-42-40, spider **lululemon** failed with **0 features** and **0 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/lululemon.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/lululemon.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/lululemon.geojson)) </issue> <code> [start of locations/spiders/lululemon.py] 1 # -*- coding: utf-8 -*- 2 import json 3 import scrapy 4 5 from locations.items import GeojsonPointItem 6 from locations.hours import OpeningHours 7 8 WEEKDAYS = ["Mo", "Tu", "We", "Th", "Fr", "Sa", "Su"] 9 10 11 class LuLuLemonSpider(scrapy.Spider): 12 download_delay = 0.1 13 name = "lululemon" 14 item_attributes = {"brand": "LuLuLemon", "brand_wikidata": "Q6702957"} 15 allowed_domains = ["shop.lululemon.com"] 16 start_urls = ("https://shop.lululemon.com/stores/all-lululemon-stores",) 17 18 def parse(self, response): 19 urls = response.xpath('//a[@class="store-link basic"]/@href').extract() 20 for path in urls: 21 yield scrapy.Request(response.urljoin(path), callback=self.parse_store) 22 23 def parse_store(self, response): 24 address = {} 25 geo = {} 26 data = json.loads( 27 response.xpath('//script[@type="application/ld+json"]/text()').extract_first() 28 ) 29 30 ref = response.xpath('//h1[@class="lll-stack-lvl4"]/text()').extract_first() 31 32 if data.get("address"): 33 address["full"] = data["address"].get("streetAddress") 34 address["zip"] = data["address"].get("postalCode") 35 address["state"] = data["address"].get("addressRegion") 36 address["city"] = data["address"].get("addressLocality") 37 address["country"] = data["address"].get("addressCountry") 38 if data.get("geo"): 39 geo["lat"] = data["geo"].get("latitude") 40 geo["lon"] = data["geo"].get("longitude") 41 oh = self.parse_hours(data.get("openingHours")) 42 if not oh: 43 ref = "CLOSED - {}".format(ref) 44 45 properties = { 46 "addr_full": address.get("full"), 47 "phone": data.get("telephone"), 48 "city": address.get("city"), 49 "state": address.get("state"), 50 "postcode": address.get("zip"), 51 "ref": ref, 52 "website": response.url, 53 "lat": geo.get("lat"), 54 "lon": geo.get("lon"), 55 "opening_hours": oh, 56 } 57 yield GeojsonPointItem(**properties) 58 59 def parse_hours(self, hours): 60 oh = OpeningHours() 61 62 for h in hours: 63 t = h.replace(" - ", "-") 64 d, h = t.split() 65 ot, ct = h.split("-") 66 # Some stores are permanently closed, thus no time is defined 67 if ot == "null" or ct == "null": 68 continue 69 days = self.parse_days(d) 70 for day in days: 71 oh.add_range(day=day, open_time=ot, close_time=ct, time_format="%H:%M") 72 73 return oh.as_opening_hours() 74 75 def parse_days(self, days): 76 """Parse day ranges and returns a list of days it represent 77 The following formats are considered: 78 - Single day, e.g. "Mon", "Monday" 79 - Range, e.g. "Mon-Fri", "Tue-Sund", "Sat-Sunday" 80 - Two days, e.g. "Sat & Sun", "Friday & Su" 81 82 Returns a list with the weekdays 83 """ 84 # Produce a list of weekdays between two days e.g. su-sa, mo-th, etc. 85 if "-" in days: 86 d = days.split("-") 87 r = [i.strip()[:2] for i in d] 88 s = WEEKDAYS.index(r[0].title()) 89 e = WEEKDAYS.index(r[1].title()) 90 if s <= e: 91 return WEEKDAYS[s : e + 1] 92 else: 93 return WEEKDAYS[s:] + WEEKDAYS[: e + 1] 94 # Two days 95 if "&" in days: 96 d = days.split("&") 97 return [i.strip()[:2].title() for i in d] 98 # Single days 99 else: 100 return [days.strip()[:2].title()] 101 [end of locations/spiders/lululemon.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/lululemon.py b/locations/spiders/lululemon.py --- a/locations/spiders/lululemon.py +++ b/locations/spiders/lululemon.py @@ -16,35 +16,39 @@ start_urls = ("https://shop.lululemon.com/stores/all-lululemon-stores",) def parse(self, response): - urls = response.xpath('//a[@class="store-link basic"]/@href').extract() + urls = response.xpath('//a[@class="store-list_storeLink__3krLG"]/@href').extract() for path in urls: yield scrapy.Request(response.urljoin(path), callback=self.parse_store) def parse_store(self, response): address = {} geo = {} + hours = {} + status = "CLOSED" data = json.loads( - response.xpath('//script[@type="application/ld+json"]/text()').extract_first() + response.xpath('//script[@type="application/json"]/text()').extract_first() ) - ref = response.xpath('//h1[@class="lll-stack-lvl4"]/text()').extract_first() + ref = data["props"]["pageProps"]["storeData"]["name"] + address["full"] = data["props"]["pageProps"]["storeData"].get("fullAddress") + address["zip"] = address["full"].split(",")[-1].strip() + address["state"] = data["props"]["pageProps"]["storeData"].get("state") + address["city"] = data["props"]["pageProps"]["storeData"].get("city") + address["country"] = data["props"]["pageProps"]["storeData"].get("country") + address["phone"] = data["props"]["pageProps"]["storeData"].get("phone") + geo["lat"] = data["props"]["pageProps"]["storeData"].get("latitude") + geo["lon"] = data["props"]["pageProps"]["storeData"].get("longitude") + hours = data["props"]["pageProps"]["storeData"].get("hours") + if data["props"]["pageProps"]["storeData"].get("status") == "active_soon": + status = "Opening soon" - if data.get("address"): - address["full"] = data["address"].get("streetAddress") - address["zip"] = data["address"].get("postalCode") - address["state"] = data["address"].get("addressRegion") - address["city"] = data["address"].get("addressLocality") - address["country"] = data["address"].get("addressCountry") - if data.get("geo"): - geo["lat"] = data["geo"].get("latitude") - geo["lon"] = data["geo"].get("longitude") - oh = self.parse_hours(data.get("openingHours")) + oh = self.parse_hours(hours) if not oh: - ref = "CLOSED - {}".format(ref) + ref = "{} - {}".format(status, ref) properties = { "addr_full": address.get("full"), - "phone": data.get("telephone"), + "phone": address.get("phone"), "city": address.get("city"), "state": address.get("state"), "postcode": address.get("zip"), @@ -60,11 +64,11 @@ oh = OpeningHours() for h in hours: - t = h.replace(" - ", "-") - d, h = t.split() - ot, ct = h.split("-") + d = h.get("name") + ot = h.get("openHour") + ct = h.get("closeHour") # Some stores are permanently closed, thus no time is defined - if ot == "null" or ct == "null": + if not ot or not ct: continue days = self.parse_days(d) for day in days:
{"golden_diff": "diff --git a/locations/spiders/lululemon.py b/locations/spiders/lululemon.py\n--- a/locations/spiders/lululemon.py\n+++ b/locations/spiders/lululemon.py\n@@ -16,35 +16,39 @@\n start_urls = (\"https://shop.lululemon.com/stores/all-lululemon-stores\",)\n \n def parse(self, response):\n- urls = response.xpath('//a[@class=\"store-link basic\"]/@href').extract()\n+ urls = response.xpath('//a[@class=\"store-list_storeLink__3krLG\"]/@href').extract()\n for path in urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_store)\n \n def parse_store(self, response):\n address = {}\n geo = {}\n+ hours = {}\n+ status = \"CLOSED\"\n data = json.loads(\n- response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first()\n+ response.xpath('//script[@type=\"application/json\"]/text()').extract_first()\n )\n \n- ref = response.xpath('//h1[@class=\"lll-stack-lvl4\"]/text()').extract_first()\n+ ref = data[\"props\"][\"pageProps\"][\"storeData\"][\"name\"]\n+ address[\"full\"] = data[\"props\"][\"pageProps\"][\"storeData\"].get(\"fullAddress\")\n+ address[\"zip\"] = address[\"full\"].split(\",\")[-1].strip()\n+ address[\"state\"] = data[\"props\"][\"pageProps\"][\"storeData\"].get(\"state\")\n+ address[\"city\"] = data[\"props\"][\"pageProps\"][\"storeData\"].get(\"city\")\n+ address[\"country\"] = data[\"props\"][\"pageProps\"][\"storeData\"].get(\"country\")\n+ address[\"phone\"] = data[\"props\"][\"pageProps\"][\"storeData\"].get(\"phone\")\n+ geo[\"lat\"] = data[\"props\"][\"pageProps\"][\"storeData\"].get(\"latitude\")\n+ geo[\"lon\"] = data[\"props\"][\"pageProps\"][\"storeData\"].get(\"longitude\")\n+ hours = data[\"props\"][\"pageProps\"][\"storeData\"].get(\"hours\")\n+ if data[\"props\"][\"pageProps\"][\"storeData\"].get(\"status\") == \"active_soon\":\n+ status = \"Opening soon\"\n \n- if data.get(\"address\"):\n- address[\"full\"] = data[\"address\"].get(\"streetAddress\")\n- address[\"zip\"] = data[\"address\"].get(\"postalCode\")\n- address[\"state\"] = data[\"address\"].get(\"addressRegion\")\n- address[\"city\"] = data[\"address\"].get(\"addressLocality\")\n- address[\"country\"] = data[\"address\"].get(\"addressCountry\")\n- if data.get(\"geo\"):\n- geo[\"lat\"] = data[\"geo\"].get(\"latitude\")\n- geo[\"lon\"] = data[\"geo\"].get(\"longitude\")\n- oh = self.parse_hours(data.get(\"openingHours\"))\n+ oh = self.parse_hours(hours)\n if not oh:\n- ref = \"CLOSED - {}\".format(ref)\n+ ref = \"{} - {}\".format(status, ref)\n \n properties = {\n \"addr_full\": address.get(\"full\"),\n- \"phone\": data.get(\"telephone\"),\n+ \"phone\": address.get(\"phone\"),\n \"city\": address.get(\"city\"),\n \"state\": address.get(\"state\"),\n \"postcode\": address.get(\"zip\"),\n@@ -60,11 +64,11 @@\n oh = OpeningHours()\n \n for h in hours:\n- t = h.replace(\" - \", \"-\")\n- d, h = t.split()\n- ot, ct = h.split(\"-\")\n+ d = h.get(\"name\")\n+ ot = h.get(\"openHour\")\n+ ct = h.get(\"closeHour\")\n # Some stores are permanently closed, thus no time is defined\n- if ot == \"null\" or ct == \"null\":\n+ if not ot or not ct:\n continue\n days = self.parse_days(d)\n for day in days:\n", "issue": "Spider lululemon is broken\nDuring the global build at 2021-06-02-14-42-40, spider **lululemon** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/lululemon.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/lululemon.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/lululemon.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nWEEKDAYS = [\"Mo\", \"Tu\", \"We\", \"Th\", \"Fr\", \"Sa\", \"Su\"]\n\n\nclass LuLuLemonSpider(scrapy.Spider):\n download_delay = 0.1\n name = \"lululemon\"\n item_attributes = {\"brand\": \"LuLuLemon\", \"brand_wikidata\": \"Q6702957\"}\n allowed_domains = [\"shop.lululemon.com\"]\n start_urls = (\"https://shop.lululemon.com/stores/all-lululemon-stores\",)\n\n def parse(self, response):\n urls = response.xpath('//a[@class=\"store-link basic\"]/@href').extract()\n for path in urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_store)\n\n def parse_store(self, response):\n address = {}\n geo = {}\n data = json.loads(\n response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first()\n )\n\n ref = response.xpath('//h1[@class=\"lll-stack-lvl4\"]/text()').extract_first()\n\n if data.get(\"address\"):\n address[\"full\"] = data[\"address\"].get(\"streetAddress\")\n address[\"zip\"] = data[\"address\"].get(\"postalCode\")\n address[\"state\"] = data[\"address\"].get(\"addressRegion\")\n address[\"city\"] = data[\"address\"].get(\"addressLocality\")\n address[\"country\"] = data[\"address\"].get(\"addressCountry\")\n if data.get(\"geo\"):\n geo[\"lat\"] = data[\"geo\"].get(\"latitude\")\n geo[\"lon\"] = data[\"geo\"].get(\"longitude\")\n oh = self.parse_hours(data.get(\"openingHours\"))\n if not oh:\n ref = \"CLOSED - {}\".format(ref)\n\n properties = {\n \"addr_full\": address.get(\"full\"),\n \"phone\": data.get(\"telephone\"),\n \"city\": address.get(\"city\"),\n \"state\": address.get(\"state\"),\n \"postcode\": address.get(\"zip\"),\n \"ref\": ref,\n \"website\": response.url,\n \"lat\": geo.get(\"lat\"),\n \"lon\": geo.get(\"lon\"),\n \"opening_hours\": oh,\n }\n yield GeojsonPointItem(**properties)\n\n def parse_hours(self, hours):\n oh = OpeningHours()\n\n for h in hours:\n t = h.replace(\" - \", \"-\")\n d, h = t.split()\n ot, ct = h.split(\"-\")\n # Some stores are permanently closed, thus no time is defined\n if ot == \"null\" or ct == \"null\":\n continue\n days = self.parse_days(d)\n for day in days:\n oh.add_range(day=day, open_time=ot, close_time=ct, time_format=\"%H:%M\")\n\n return oh.as_opening_hours()\n\n def parse_days(self, days):\n \"\"\"Parse day ranges and returns a list of days it represent\n The following formats are considered:\n - Single day, e.g. \"Mon\", \"Monday\"\n - Range, e.g. \"Mon-Fri\", \"Tue-Sund\", \"Sat-Sunday\"\n - Two days, e.g. \"Sat & Sun\", \"Friday & Su\"\n\n Returns a list with the weekdays\n \"\"\"\n # Produce a list of weekdays between two days e.g. su-sa, mo-th, etc.\n if \"-\" in days:\n d = days.split(\"-\")\n r = [i.strip()[:2] for i in d]\n s = WEEKDAYS.index(r[0].title())\n e = WEEKDAYS.index(r[1].title())\n if s <= e:\n return WEEKDAYS[s : e + 1]\n else:\n return WEEKDAYS[s:] + WEEKDAYS[: e + 1]\n # Two days\n if \"&\" in days:\n d = days.split(\"&\")\n return [i.strip()[:2].title() for i in d]\n # Single days\n else:\n return [days.strip()[:2].title()]\n", "path": "locations/spiders/lululemon.py"}]}
1,833
876
gh_patches_debug_43013
rasdani/github-patches
git_diff
deepset-ai__haystack-7599
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> feat: Add `page_number` to meta of Documents in `DocumentSplitter` **Is your feature request related to a problem? Please describe.** In Haystack v1 we had an option in the Preprocessor to add the original `page_number` to a Document's meta data when it was split into a chunk. This feature made down stream applications of visualizing the retrieved text from original files (e.g. PDFs) very easy and straightforward, so I'd like to see it in Haystack v2 as well. **Describe the solution you'd like** I would like to add the option to store the `page_number` in the meta info of the Document in the DocumentSplitter component. I believe we can use a similar/same implementation of calculating this like we did for the Preprocessor. </issue> <code> [start of haystack/components/preprocessors/document_splitter.py] 1 from copy import deepcopy 2 from typing import List, Literal 3 4 from more_itertools import windowed 5 6 from haystack import Document, component 7 8 9 @component 10 class DocumentSplitter: 11 """ 12 Splits a list of text documents into a list of text documents with shorter texts. 13 14 Splitting documents with long texts is a common preprocessing step during indexing. 15 This allows Embedders to create significant semantic representations 16 and avoids exceeding the maximum context length of language models. 17 """ 18 19 def __init__( 20 self, 21 split_by: Literal["word", "sentence", "page", "passage"] = "word", 22 split_length: int = 200, 23 split_overlap: int = 0, 24 ): 25 """ 26 Initialize the DocumentSplitter. 27 28 :param split_by: The unit by which the document should be split. Choose from "word" for splitting by " ", 29 "sentence" for splitting by ".", "page" for splitting by "\\f" or "passage" for splitting by "\\n\\n". 30 :param split_length: The maximum number of units in each split. 31 :param split_overlap: The number of units that each split should overlap. 32 """ 33 34 self.split_by = split_by 35 if split_by not in ["word", "sentence", "page", "passage"]: 36 raise ValueError("split_by must be one of 'word', 'sentence', 'page' or 'passage'.") 37 if split_length <= 0: 38 raise ValueError("split_length must be greater than 0.") 39 self.split_length = split_length 40 if split_overlap < 0: 41 raise ValueError("split_overlap must be greater than or equal to 0.") 42 self.split_overlap = split_overlap 43 44 @component.output_types(documents=List[Document]) 45 def run(self, documents: List[Document]): 46 """ 47 Split documents into smaller parts. 48 49 Splits documents by the unit expressed in `split_by`, with a length of `split_length` 50 and an overlap of `split_overlap`. 51 52 :param documents: The documents to split. 53 54 :returns: A dictionary with the following key: 55 - `documents`: List of documents with the split texts. A metadata field "source_id" is added to each 56 document to keep track of the original document that was split. Other metadata are copied from the original 57 document. 58 59 :raises TypeError: if the input is not a list of Documents. 60 :raises ValueError: if the content of a document is None. 61 """ 62 63 if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)): 64 raise TypeError("DocumentSplitter expects a List of Documents as input.") 65 66 split_docs = [] 67 for doc in documents: 68 if doc.content is None: 69 raise ValueError( 70 f"DocumentSplitter only works with text documents but document.content for document ID {doc.id} is None." 71 ) 72 units = self._split_into_units(doc.content, self.split_by) 73 text_splits = self._concatenate_units(units, self.split_length, self.split_overlap) 74 metadata = deepcopy(doc.meta) 75 metadata["source_id"] = doc.id 76 split_docs += [Document(content=txt, meta=metadata) for txt in text_splits] 77 return {"documents": split_docs} 78 79 def _split_into_units(self, text: str, split_by: Literal["word", "sentence", "passage", "page"]) -> List[str]: 80 if split_by == "page": 81 split_at = "\f" 82 elif split_by == "passage": 83 split_at = "\n\n" 84 elif split_by == "sentence": 85 split_at = "." 86 elif split_by == "word": 87 split_at = " " 88 else: 89 raise NotImplementedError( 90 "DocumentSplitter only supports 'word', 'sentence', 'page' or 'passage' split_by options." 91 ) 92 units = text.split(split_at) 93 # Add the delimiter back to all units except the last one 94 for i in range(len(units) - 1): 95 units[i] += split_at 96 return units 97 98 def _concatenate_units(self, elements: List[str], split_length: int, split_overlap: int) -> List[str]: 99 """ 100 Concatenates the elements into parts of split_length units. 101 """ 102 text_splits = [] 103 segments = windowed(elements, n=split_length, step=split_length - split_overlap) 104 for seg in segments: 105 current_units = [unit for unit in seg if unit is not None] 106 txt = "".join(current_units) 107 if len(txt) > 0: 108 text_splits.append(txt) 109 return text_splits 110 [end of haystack/components/preprocessors/document_splitter.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/haystack/components/preprocessors/document_splitter.py b/haystack/components/preprocessors/document_splitter.py --- a/haystack/components/preprocessors/document_splitter.py +++ b/haystack/components/preprocessors/document_splitter.py @@ -1,5 +1,5 @@ from copy import deepcopy -from typing import List, Literal +from typing import Dict, List, Literal, Tuple from more_itertools import windowed @@ -53,7 +53,7 @@ :returns: A dictionary with the following key: - `documents`: List of documents with the split texts. A metadata field "source_id" is added to each - document to keep track of the original document that was split. Other metadata are copied from the original + document to keep track of the original document that was split. Another metadata field "page_number" is added to each number to keep track of the page it belonged to in the original document. Other metadata are copied from the original document. :raises TypeError: if the input is not a list of Documents. @@ -70,10 +70,12 @@ f"DocumentSplitter only works with text documents but document.content for document ID {doc.id} is None." ) units = self._split_into_units(doc.content, self.split_by) - text_splits = self._concatenate_units(units, self.split_length, self.split_overlap) + text_splits, splits_pages = self._concatenate_units(units, self.split_length, self.split_overlap) metadata = deepcopy(doc.meta) metadata["source_id"] = doc.id - split_docs += [Document(content=txt, meta=metadata) for txt in text_splits] + split_docs += self._create_docs_from_splits( + text_splits=text_splits, splits_pages=splits_pages, meta=metadata + ) return {"documents": split_docs} def _split_into_units(self, text: str, split_by: Literal["word", "sentence", "passage", "page"]) -> List[str]: @@ -95,15 +97,40 @@ units[i] += split_at return units - def _concatenate_units(self, elements: List[str], split_length: int, split_overlap: int) -> List[str]: + def _concatenate_units( + self, elements: List[str], split_length: int, split_overlap: int + ) -> Tuple[List[str], List[int]]: """ - Concatenates the elements into parts of split_length units. + Concatenates the elements into parts of split_length units keeping track of the original page number that each element belongs. """ text_splits = [] + splits_pages = [] + cur_page = 1 segments = windowed(elements, n=split_length, step=split_length - split_overlap) for seg in segments: current_units = [unit for unit in seg if unit is not None] txt = "".join(current_units) if len(txt) > 0: text_splits.append(txt) - return text_splits + splits_pages.append(cur_page) + processed_units = current_units[: split_length - split_overlap] + if self.split_by == "page": + num_page_breaks = len(processed_units) + else: + num_page_breaks = sum(processed_unit.count("\f") for processed_unit in processed_units) + cur_page += num_page_breaks + return text_splits, splits_pages + + @staticmethod + def _create_docs_from_splits(text_splits: List[str], splits_pages: List[int], meta: Dict) -> List[Document]: + """ + Creates Document objects from text splits enriching them with page number and the metadata of the original document. + """ + documents: List[Document] = [] + + for i, txt in enumerate(text_splits): + meta = deepcopy(meta) + doc = Document(content=txt, meta=meta) + doc.meta["page_number"] = splits_pages[i] + documents.append(doc) + return documents
{"golden_diff": "diff --git a/haystack/components/preprocessors/document_splitter.py b/haystack/components/preprocessors/document_splitter.py\n--- a/haystack/components/preprocessors/document_splitter.py\n+++ b/haystack/components/preprocessors/document_splitter.py\n@@ -1,5 +1,5 @@\n from copy import deepcopy\n-from typing import List, Literal\n+from typing import Dict, List, Literal, Tuple\n \n from more_itertools import windowed\n \n@@ -53,7 +53,7 @@\n \n :returns: A dictionary with the following key:\n - `documents`: List of documents with the split texts. A metadata field \"source_id\" is added to each\n- document to keep track of the original document that was split. Other metadata are copied from the original\n+ document to keep track of the original document that was split. Another metadata field \"page_number\" is added to each number to keep track of the page it belonged to in the original document. Other metadata are copied from the original\n document.\n \n :raises TypeError: if the input is not a list of Documents.\n@@ -70,10 +70,12 @@\n f\"DocumentSplitter only works with text documents but document.content for document ID {doc.id} is None.\"\n )\n units = self._split_into_units(doc.content, self.split_by)\n- text_splits = self._concatenate_units(units, self.split_length, self.split_overlap)\n+ text_splits, splits_pages = self._concatenate_units(units, self.split_length, self.split_overlap)\n metadata = deepcopy(doc.meta)\n metadata[\"source_id\"] = doc.id\n- split_docs += [Document(content=txt, meta=metadata) for txt in text_splits]\n+ split_docs += self._create_docs_from_splits(\n+ text_splits=text_splits, splits_pages=splits_pages, meta=metadata\n+ )\n return {\"documents\": split_docs}\n \n def _split_into_units(self, text: str, split_by: Literal[\"word\", \"sentence\", \"passage\", \"page\"]) -> List[str]:\n@@ -95,15 +97,40 @@\n units[i] += split_at\n return units\n \n- def _concatenate_units(self, elements: List[str], split_length: int, split_overlap: int) -> List[str]:\n+ def _concatenate_units(\n+ self, elements: List[str], split_length: int, split_overlap: int\n+ ) -> Tuple[List[str], List[int]]:\n \"\"\"\n- Concatenates the elements into parts of split_length units.\n+ Concatenates the elements into parts of split_length units keeping track of the original page number that each element belongs.\n \"\"\"\n text_splits = []\n+ splits_pages = []\n+ cur_page = 1\n segments = windowed(elements, n=split_length, step=split_length - split_overlap)\n for seg in segments:\n current_units = [unit for unit in seg if unit is not None]\n txt = \"\".join(current_units)\n if len(txt) > 0:\n text_splits.append(txt)\n- return text_splits\n+ splits_pages.append(cur_page)\n+ processed_units = current_units[: split_length - split_overlap]\n+ if self.split_by == \"page\":\n+ num_page_breaks = len(processed_units)\n+ else:\n+ num_page_breaks = sum(processed_unit.count(\"\\f\") for processed_unit in processed_units)\n+ cur_page += num_page_breaks\n+ return text_splits, splits_pages\n+\n+ @staticmethod\n+ def _create_docs_from_splits(text_splits: List[str], splits_pages: List[int], meta: Dict) -> List[Document]:\n+ \"\"\"\n+ Creates Document objects from text splits enriching them with page number and the metadata of the original document.\n+ \"\"\"\n+ documents: List[Document] = []\n+\n+ for i, txt in enumerate(text_splits):\n+ meta = deepcopy(meta)\n+ doc = Document(content=txt, meta=meta)\n+ doc.meta[\"page_number\"] = splits_pages[i]\n+ documents.append(doc)\n+ return documents\n", "issue": "feat: Add `page_number` to meta of Documents in `DocumentSplitter`\n**Is your feature request related to a problem? Please describe.**\r\nIn Haystack v1 we had an option in the Preprocessor to add the original `page_number` to a Document's meta data when it was split into a chunk. This feature made down stream applications of visualizing the retrieved text from original files (e.g. PDFs) very easy and straightforward, so I'd like to see it in Haystack v2 as well. \r\n\r\n**Describe the solution you'd like**\r\nI would like to add the option to store the `page_number` in the meta info of the Document in the DocumentSplitter component. I believe we can use a similar/same implementation of calculating this like we did for the Preprocessor. \r\n\n", "before_files": [{"content": "from copy import deepcopy\nfrom typing import List, Literal\n\nfrom more_itertools import windowed\n\nfrom haystack import Document, component\n\n\n@component\nclass DocumentSplitter:\n \"\"\"\n Splits a list of text documents into a list of text documents with shorter texts.\n\n Splitting documents with long texts is a common preprocessing step during indexing.\n This allows Embedders to create significant semantic representations\n and avoids exceeding the maximum context length of language models.\n \"\"\"\n\n def __init__(\n self,\n split_by: Literal[\"word\", \"sentence\", \"page\", \"passage\"] = \"word\",\n split_length: int = 200,\n split_overlap: int = 0,\n ):\n \"\"\"\n Initialize the DocumentSplitter.\n\n :param split_by: The unit by which the document should be split. Choose from \"word\" for splitting by \" \",\n \"sentence\" for splitting by \".\", \"page\" for splitting by \"\\\\f\" or \"passage\" for splitting by \"\\\\n\\\\n\".\n :param split_length: The maximum number of units in each split.\n :param split_overlap: The number of units that each split should overlap.\n \"\"\"\n\n self.split_by = split_by\n if split_by not in [\"word\", \"sentence\", \"page\", \"passage\"]:\n raise ValueError(\"split_by must be one of 'word', 'sentence', 'page' or 'passage'.\")\n if split_length <= 0:\n raise ValueError(\"split_length must be greater than 0.\")\n self.split_length = split_length\n if split_overlap < 0:\n raise ValueError(\"split_overlap must be greater than or equal to 0.\")\n self.split_overlap = split_overlap\n\n @component.output_types(documents=List[Document])\n def run(self, documents: List[Document]):\n \"\"\"\n Split documents into smaller parts.\n\n Splits documents by the unit expressed in `split_by`, with a length of `split_length`\n and an overlap of `split_overlap`.\n\n :param documents: The documents to split.\n\n :returns: A dictionary with the following key:\n - `documents`: List of documents with the split texts. A metadata field \"source_id\" is added to each\n document to keep track of the original document that was split. Other metadata are copied from the original\n document.\n\n :raises TypeError: if the input is not a list of Documents.\n :raises ValueError: if the content of a document is None.\n \"\"\"\n\n if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)):\n raise TypeError(\"DocumentSplitter expects a List of Documents as input.\")\n\n split_docs = []\n for doc in documents:\n if doc.content is None:\n raise ValueError(\n f\"DocumentSplitter only works with text documents but document.content for document ID {doc.id} is None.\"\n )\n units = self._split_into_units(doc.content, self.split_by)\n text_splits = self._concatenate_units(units, self.split_length, self.split_overlap)\n metadata = deepcopy(doc.meta)\n metadata[\"source_id\"] = doc.id\n split_docs += [Document(content=txt, meta=metadata) for txt in text_splits]\n return {\"documents\": split_docs}\n\n def _split_into_units(self, text: str, split_by: Literal[\"word\", \"sentence\", \"passage\", \"page\"]) -> List[str]:\n if split_by == \"page\":\n split_at = \"\\f\"\n elif split_by == \"passage\":\n split_at = \"\\n\\n\"\n elif split_by == \"sentence\":\n split_at = \".\"\n elif split_by == \"word\":\n split_at = \" \"\n else:\n raise NotImplementedError(\n \"DocumentSplitter only supports 'word', 'sentence', 'page' or 'passage' split_by options.\"\n )\n units = text.split(split_at)\n # Add the delimiter back to all units except the last one\n for i in range(len(units) - 1):\n units[i] += split_at\n return units\n\n def _concatenate_units(self, elements: List[str], split_length: int, split_overlap: int) -> List[str]:\n \"\"\"\n Concatenates the elements into parts of split_length units.\n \"\"\"\n text_splits = []\n segments = windowed(elements, n=split_length, step=split_length - split_overlap)\n for seg in segments:\n current_units = [unit for unit in seg if unit is not None]\n txt = \"\".join(current_units)\n if len(txt) > 0:\n text_splits.append(txt)\n return text_splits\n", "path": "haystack/components/preprocessors/document_splitter.py"}]}
1,927
886
gh_patches_debug_33719
rasdani/github-patches
git_diff
akvo__akvo-rsr-1647
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add a default Organsiation Logo image - [x] Finalise image decision - [x] Implement new image ## Test plan GIVEN the organisation directory page WHEN an organisation has no logo THEN the image as shown below should be displayed </issue> <code> [start of akvo/rsr/templatetags/rsr_utils.py] 1 # -*- coding: utf-8 -*- 2 """Akvo RSR is covered by the GNU Affero General Public License. 3 4 See more details in the license.txt file located at the root folder of the Akvo RSR module. 5 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 6 """ 7 8 from __future__ import absolute_import, print_function 9 10 from django import template 11 from django.contrib.auth import get_user_model 12 from akvo.rsr.models import Keyword, PartnerSite, Project, ProjectUpdate, Organisation 13 register = template.Library() 14 15 16 @register.inclusion_tag('rsr_utils/img.html', takes_context=True) 17 def img(context, obj, width, height, alt): 18 """Standard way to show image.""" 19 geometry = '{}x{}'.format(width, height) 20 21 # Based on type get image 22 img = obj 23 if isinstance(obj, Project): 24 img = obj.current_image 25 elif isinstance(obj, ProjectUpdate): 26 img = obj.photo 27 elif isinstance(obj, Organisation): 28 img = obj.logo 29 elif isinstance(obj, get_user_model()): 30 img = obj.avatar 31 elif isinstance(obj, PartnerSite): 32 img = obj.logo 33 elif isinstance(obj, Keyword): 34 img = obj.logo 35 36 height = '{}.px'.format(height) 37 38 return {'alt': alt, 39 'height': height, 40 'img': img, 41 'geometry': geometry, 42 'width': width} 43 44 45 @register.inclusion_tag('rsr_utils/vid_img.html', takes_context=True) 46 def vid_img(context, obj, width, height, alt): 47 """Standard way to show video thumbnail.""" 48 geometry = '{}x{}'.format(width, height) 49 50 # Based on type get video 51 vid = obj 52 if isinstance(obj, ProjectUpdate): 53 vid = obj.video 54 55 height = '{}.px'.format(height) 56 57 return {'alt': alt, 58 'height': height, 59 'vid': vid, 60 'geometry': geometry, 61 'width': width} 62 [end of akvo/rsr/templatetags/rsr_utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/akvo/rsr/templatetags/rsr_utils.py b/akvo/rsr/templatetags/rsr_utils.py --- a/akvo/rsr/templatetags/rsr_utils.py +++ b/akvo/rsr/templatetags/rsr_utils.py @@ -8,6 +8,7 @@ from __future__ import absolute_import, print_function from django import template +from django.conf import settings from django.contrib.auth import get_user_model from akvo.rsr.models import Keyword, PartnerSite, Project, ProjectUpdate, Organisation register = template.Library() @@ -16,16 +17,22 @@ @register.inclusion_tag('rsr_utils/img.html', takes_context=True) def img(context, obj, width, height, alt): """Standard way to show image.""" - geometry = '{}x{}'.format(width, height) + img = "" + geometry = "{}x{}".format(width, height) + default_img = "//placehold.it/{}".format(geometry) - # Based on type get image - img = obj if isinstance(obj, Project): img = obj.current_image elif isinstance(obj, ProjectUpdate): img = obj.photo elif isinstance(obj, Organisation): - img = obj.logo + if obj.logo: + img = obj.logo + else: + default_img = "//{}{}{}".format( + context["request"].get_host(), + getattr(settings, "STATIC_URL"), + "images/default-org-logo.jpg") elif isinstance(obj, get_user_model()): img = obj.avatar elif isinstance(obj, PartnerSite): @@ -33,13 +40,10 @@ elif isinstance(obj, Keyword): img = obj.logo - height = '{}.px'.format(height) - - return {'alt': alt, - 'height': height, - 'img': img, - 'geometry': geometry, - 'width': width} + return {"default_img": default_img, + "geometry": geometry, + "img": img, + "alt": alt} @register.inclusion_tag('rsr_utils/vid_img.html', takes_context=True)
{"golden_diff": "diff --git a/akvo/rsr/templatetags/rsr_utils.py b/akvo/rsr/templatetags/rsr_utils.py\n--- a/akvo/rsr/templatetags/rsr_utils.py\n+++ b/akvo/rsr/templatetags/rsr_utils.py\n@@ -8,6 +8,7 @@\n from __future__ import absolute_import, print_function\n \n from django import template\n+from django.conf import settings\n from django.contrib.auth import get_user_model\n from akvo.rsr.models import Keyword, PartnerSite, Project, ProjectUpdate, Organisation\n register = template.Library()\n@@ -16,16 +17,22 @@\n @register.inclusion_tag('rsr_utils/img.html', takes_context=True)\n def img(context, obj, width, height, alt):\n \"\"\"Standard way to show image.\"\"\"\n- geometry = '{}x{}'.format(width, height)\n+ img = \"\"\n+ geometry = \"{}x{}\".format(width, height)\n+ default_img = \"//placehold.it/{}\".format(geometry)\n \n- # Based on type get image\n- img = obj\n if isinstance(obj, Project):\n img = obj.current_image\n elif isinstance(obj, ProjectUpdate):\n img = obj.photo\n elif isinstance(obj, Organisation):\n- img = obj.logo\n+ if obj.logo:\n+ img = obj.logo\n+ else:\n+ default_img = \"//{}{}{}\".format(\n+ context[\"request\"].get_host(),\n+ getattr(settings, \"STATIC_URL\"),\n+ \"images/default-org-logo.jpg\")\n elif isinstance(obj, get_user_model()):\n img = obj.avatar\n elif isinstance(obj, PartnerSite):\n@@ -33,13 +40,10 @@\n elif isinstance(obj, Keyword):\n img = obj.logo\n \n- height = '{}.px'.format(height)\n-\n- return {'alt': alt,\n- 'height': height,\n- 'img': img,\n- 'geometry': geometry,\n- 'width': width}\n+ return {\"default_img\": default_img,\n+ \"geometry\": geometry,\n+ \"img\": img,\n+ \"alt\": alt}\n \n \n @register.inclusion_tag('rsr_utils/vid_img.html', takes_context=True)\n", "issue": "Add a default Organsiation Logo image\n- [x] Finalise image decision\n- [x] Implement new image\n## Test plan\n\nGIVEN the organisation directory page\nWHEN an organisation has no logo\nTHEN the image as shown below should be displayed\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nfrom django import template\nfrom django.contrib.auth import get_user_model\nfrom akvo.rsr.models import Keyword, PartnerSite, Project, ProjectUpdate, Organisation\nregister = template.Library()\n\n\[email protected]_tag('rsr_utils/img.html', takes_context=True)\ndef img(context, obj, width, height, alt):\n \"\"\"Standard way to show image.\"\"\"\n geometry = '{}x{}'.format(width, height)\n\n # Based on type get image\n img = obj\n if isinstance(obj, Project):\n img = obj.current_image\n elif isinstance(obj, ProjectUpdate):\n img = obj.photo\n elif isinstance(obj, Organisation):\n img = obj.logo\n elif isinstance(obj, get_user_model()):\n img = obj.avatar\n elif isinstance(obj, PartnerSite):\n img = obj.logo\n elif isinstance(obj, Keyword):\n img = obj.logo\n\n height = '{}.px'.format(height)\n\n return {'alt': alt,\n 'height': height,\n 'img': img,\n 'geometry': geometry,\n 'width': width}\n\n\[email protected]_tag('rsr_utils/vid_img.html', takes_context=True)\ndef vid_img(context, obj, width, height, alt):\n \"\"\"Standard way to show video thumbnail.\"\"\"\n geometry = '{}x{}'.format(width, height)\n\n # Based on type get video\n vid = obj\n if isinstance(obj, ProjectUpdate):\n vid = obj.video\n\n height = '{}.px'.format(height)\n\n return {'alt': alt,\n 'height': height,\n 'vid': vid,\n 'geometry': geometry,\n 'width': width}\n", "path": "akvo/rsr/templatetags/rsr_utils.py"}]}
1,153
489
gh_patches_debug_18945
rasdani/github-patches
git_diff
jazzband__pip-tools-410
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Extras not propagated into requirements.txt after insertion Extras, in this context, are specific functional extensions to a particular package. This is similar to #154 but differs because this triggers when adding an extra to an existing requirement. The cached result appears to prevent appropriate detection that the extra was added. ##### Steps to replicate 1. Create a `requirements.in` as follows: ```text pip-tools hypothesis ``` 2. Run `pip-compile` from the command line. 3. Edit the requirements.in file to the following: ```text pip-tools hypothesis[django] ``` 4. Re-run `pip-compile` from the command line. 3. Examine `requirements.txt` ##### Expected result ```text # # This file is autogenerated by pip-compile # To update, run: # # pip-compile --output-file reqs.txt reqs.in # click==6.6 # via pip-tools django==1.10.3 # via hypothesis fake-factory==0.7.2 # via hypothesis first==2.0.1 # via pip-tools hypothesis[django]==3.6.0 pip-tools==1.7.0 python-dateutil==2.6.0 # via fake-factory pytz==2016.7 # via hypothesis six==1.10.0 # via fake-factory, pip-tools, python-dateutil ``` ##### Actual result ```text # # This file is autogenerated by pip-compile # To update, run: # # pip-compile --output-file reqs.txt reqs.in # click==6.6 # via pip-tools first==2.0.1 # via pip-tools hypothesis==3.6.0 pip-tools==1.7.0 six==1.10.0 # via pip-tools ``` ##### Workaround Simply deleting the existing `requirements.txt` file and installing from scratch appears to work, but shouldn't be a necessary part of the pip-tools workflow. </issue> <code> [start of piptools/repositories/local.py] 1 # coding: utf-8 2 from __future__ import (absolute_import, division, print_function, 3 unicode_literals) 4 5 from piptools.utils import key_from_req 6 from .base import BaseRepository 7 8 9 def ireq_satisfied_by_existing_pin(ireq, existing_pin): 10 """ 11 Return True if the given InstallationRequirement is satisfied by the 12 previously encountered version pin. 13 """ 14 if hasattr(existing_pin.req, 'specs'): 15 # pip < 8.1.2 16 version = existing_pin.req.specs[0][1] 17 return version in ireq.req 18 else: 19 # pip >= 8.1.2 20 version = next(iter(existing_pin.req.specifier)).version 21 return version in ireq.req.specifier 22 23 24 class LocalRequirementsRepository(BaseRepository): 25 """ 26 The LocalRequirementsRepository proxied the _real_ repository by first 27 checking if a requirement can be satisfied by existing pins (i.e. the 28 result of a previous compile step). 29 30 In effect, if a requirement can be satisfied with a version pinned in the 31 requirements file, we prefer that version over the best match found in 32 PyPI. This keeps updates to the requirements.txt down to a minimum. 33 """ 34 def __init__(self, existing_pins, proxied_repository): 35 self.repository = proxied_repository 36 self.existing_pins = existing_pins 37 38 @property 39 def finder(self): 40 return self.repository.finder 41 42 @property 43 def session(self): 44 return self.repository.session 45 46 @property 47 def DEFAULT_INDEX_URL(self): 48 return self.repository.DEFAULT_INDEX_URL 49 50 def clear_caches(self): 51 self.repository.clear_caches() 52 53 def freshen_build_caches(self): 54 self.repository.freshen_build_caches() 55 56 def find_best_match(self, ireq, prereleases=None): 57 key = key_from_req(ireq.req) 58 existing_pin = self.existing_pins.get(key) 59 if existing_pin and ireq_satisfied_by_existing_pin(ireq, existing_pin): 60 return existing_pin 61 else: 62 return self.repository.find_best_match(ireq, prereleases) 63 64 def get_dependencies(self, ireq): 65 return self.repository.get_dependencies(ireq) 66 [end of piptools/repositories/local.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/piptools/repositories/local.py b/piptools/repositories/local.py --- a/piptools/repositories/local.py +++ b/piptools/repositories/local.py @@ -2,7 +2,7 @@ from __future__ import (absolute_import, division, print_function, unicode_literals) -from piptools.utils import key_from_req +from piptools.utils import as_tuple, key_from_req, make_install_requirement from .base import BaseRepository @@ -57,7 +57,10 @@ key = key_from_req(ireq.req) existing_pin = self.existing_pins.get(key) if existing_pin and ireq_satisfied_by_existing_pin(ireq, existing_pin): - return existing_pin + project, version, _ = as_tuple(existing_pin) + return make_install_requirement( + project, version, ireq.extras + ) else: return self.repository.find_best_match(ireq, prereleases)
{"golden_diff": "diff --git a/piptools/repositories/local.py b/piptools/repositories/local.py\n--- a/piptools/repositories/local.py\n+++ b/piptools/repositories/local.py\n@@ -2,7 +2,7 @@\n from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n \n-from piptools.utils import key_from_req\n+from piptools.utils import as_tuple, key_from_req, make_install_requirement\n from .base import BaseRepository\n \n \n@@ -57,7 +57,10 @@\n key = key_from_req(ireq.req)\n existing_pin = self.existing_pins.get(key)\n if existing_pin and ireq_satisfied_by_existing_pin(ireq, existing_pin):\n- return existing_pin\n+ project, version, _ = as_tuple(existing_pin)\n+ return make_install_requirement(\n+ project, version, ireq.extras\n+ )\n else:\n return self.repository.find_best_match(ireq, prereleases)\n", "issue": "Extras not propagated into requirements.txt after insertion\nExtras, in this context, are specific functional extensions to a particular package.\r\n\r\nThis is similar to #154 but differs because this triggers when adding an extra to an existing requirement.\r\n\r\nThe cached result appears to prevent appropriate detection that the extra was added.\r\n\r\n##### Steps to replicate\r\n\r\n1. Create a `requirements.in` as follows:\r\n ```text\r\npip-tools\r\nhypothesis\r\n ```\r\n2. Run `pip-compile` from the command line.\r\n3. Edit the requirements.in file to the following:\r\n ```text\r\npip-tools\r\nhypothesis[django]\r\n ```\r\n4. Re-run `pip-compile` from the command line.\r\n3. Examine `requirements.txt`\r\n\r\n##### Expected result\r\n\r\n```text\r\n#\r\n# This file is autogenerated by pip-compile\r\n# To update, run:\r\n#\r\n# pip-compile --output-file reqs.txt reqs.in\r\n#\r\nclick==6.6 # via pip-tools\r\ndjango==1.10.3 # via hypothesis\r\nfake-factory==0.7.2 # via hypothesis\r\nfirst==2.0.1 # via pip-tools\r\nhypothesis[django]==3.6.0\r\npip-tools==1.7.0\r\npython-dateutil==2.6.0 # via fake-factory\r\npytz==2016.7 # via hypothesis\r\nsix==1.10.0 # via fake-factory, pip-tools, python-dateutil\r\n```\r\n\r\n##### Actual result\r\n\r\n```text\r\n#\r\n# This file is autogenerated by pip-compile\r\n# To update, run:\r\n#\r\n# pip-compile --output-file reqs.txt reqs.in\r\n#\r\nclick==6.6 # via pip-tools\r\nfirst==2.0.1 # via pip-tools\r\nhypothesis==3.6.0\r\npip-tools==1.7.0\r\nsix==1.10.0 # via pip-tools\r\n```\r\n\r\n##### Workaround\r\n\r\nSimply deleting the existing `requirements.txt` file and installing from scratch appears to work, but shouldn't be a necessary part of the pip-tools workflow.\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nfrom piptools.utils import key_from_req\nfrom .base import BaseRepository\n\n\ndef ireq_satisfied_by_existing_pin(ireq, existing_pin):\n \"\"\"\n Return True if the given InstallationRequirement is satisfied by the\n previously encountered version pin.\n \"\"\"\n if hasattr(existing_pin.req, 'specs'):\n # pip < 8.1.2\n version = existing_pin.req.specs[0][1]\n return version in ireq.req\n else:\n # pip >= 8.1.2\n version = next(iter(existing_pin.req.specifier)).version\n return version in ireq.req.specifier\n\n\nclass LocalRequirementsRepository(BaseRepository):\n \"\"\"\n The LocalRequirementsRepository proxied the _real_ repository by first\n checking if a requirement can be satisfied by existing pins (i.e. the\n result of a previous compile step).\n\n In effect, if a requirement can be satisfied with a version pinned in the\n requirements file, we prefer that version over the best match found in\n PyPI. This keeps updates to the requirements.txt down to a minimum.\n \"\"\"\n def __init__(self, existing_pins, proxied_repository):\n self.repository = proxied_repository\n self.existing_pins = existing_pins\n\n @property\n def finder(self):\n return self.repository.finder\n\n @property\n def session(self):\n return self.repository.session\n\n @property\n def DEFAULT_INDEX_URL(self):\n return self.repository.DEFAULT_INDEX_URL\n\n def clear_caches(self):\n self.repository.clear_caches()\n\n def freshen_build_caches(self):\n self.repository.freshen_build_caches()\n\n def find_best_match(self, ireq, prereleases=None):\n key = key_from_req(ireq.req)\n existing_pin = self.existing_pins.get(key)\n if existing_pin and ireq_satisfied_by_existing_pin(ireq, existing_pin):\n return existing_pin\n else:\n return self.repository.find_best_match(ireq, prereleases)\n\n def get_dependencies(self, ireq):\n return self.repository.get_dependencies(ireq)\n", "path": "piptools/repositories/local.py"}]}
1,599
219
gh_patches_debug_16295
rasdani/github-patches
git_diff
easybuilders__easybuild-easyblocks-2286
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> stderr: ERROR: Unable to locate a modulefile for 'PrgEnv-pgi' I am installing CrayCCE on the a Cray XC40 and am recieving the error. ``` stderr: ERROR: Unable to locate a modulefile for 'PrgEnv-pgi'\n)") ``` Looking at the LOG I see... ``` module unload PrgEnv-gnu module unload PrgEnv-intel module unload PrgEnv-pgi ; stderr: ERROR: Unable to locate a modulefile for 'PrgEnv-pgi' ``` In our system we do not even have `PrgEnv-pgi` so it is natural that we get this error, but we are not using pgi either so it should be checked if such a module exists prior of unloading it. Also `PrgEnv-pgi` is not listed as a dependency in the config file. We are using module.tcl Modules Release 4.1.3.1 (2018-11-29) and I know that EasyBuild prefers Lmod, but still module.tcl is supported. Cheers Henric </issue> <code> [start of easybuild/easyblocks/generic/craytoolchain.py] 1 ## 2 # Copyright 2015-2020 Ghent University 3 # 4 # This file is part of EasyBuild, 5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), 6 # with support of Ghent University (http://ugent.be/hpc), 7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), 8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en) 9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). 10 # 11 # https://github.com/easybuilders/easybuild 12 # 13 # EasyBuild is free software: you can redistribute it and/or modify 14 # it under the terms of the GNU General Public License as published by 15 # the Free Software Foundation v2. 16 # 17 # EasyBuild is distributed in the hope that it will be useful, 18 # but WITHOUT ANY WARRANTY; without even the implied warranty of 19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 # GNU General Public License for more details. 21 # 22 # You should have received a copy of the GNU General Public License 23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. 24 ## 25 """ 26 EasyBuild support for installing Cray toolchains, implemented as an easyblock 27 28 @author: Kenneth Hoste (Ghent University) 29 @author: Guilherme Peretti Pezzi (CSCS) 30 @author: Petar Forai (IMP/IMBA) 31 """ 32 33 from easybuild.easyblocks.generic.bundle import Bundle 34 from easybuild.tools.build_log import EasyBuildError 35 36 37 KNOWN_PRGENVS = ['PrgEnv-cray', 'PrgEnv-gnu', 'PrgEnv-intel', 'PrgEnv-pgi'] 38 39 40 class CrayToolchain(Bundle): 41 """ 42 Compiler toolchain: generate module file only, nothing to build/install 43 """ 44 45 def prepare_step(self, *args, **kwargs): 46 """Prepare build environment (skip loaded of dependencies).""" 47 48 kwargs['load_tc_deps_modules'] = False 49 50 super(CrayToolchain, self).prepare_step(*args, **kwargs) 51 52 def make_module_dep(self): 53 """ 54 Generate load/swap statements for dependencies in the module file 55 """ 56 prgenv_mod = None 57 58 # collect 'swap' statement for dependencies (except PrgEnv) 59 swap_deps = [] 60 for dep in self.toolchain.dependencies: 61 mod_name = dep['full_mod_name'] 62 # determine versionless module name, e.g. 'fftw/3.3.4.1' => 'fftw' 63 dep_name = '/'.join(mod_name.split('/')[:-1]) 64 65 if mod_name.startswith('PrgEnv'): 66 prgenv_mod = mod_name 67 else: 68 swap_deps.append(self.module_generator.swap_module(dep_name, mod_name).lstrip()) 69 70 self.log.debug("Swap statements for dependencies of %s: %s", self.full_mod_name, swap_deps) 71 72 if prgenv_mod is None: 73 raise EasyBuildError("Could not find a PrgEnv-* module listed as dependency: %s", 74 self.toolchain.dependencies) 75 76 # unload statements for other PrgEnv modules 77 prgenv_unloads = [''] 78 for prgenv in [prgenv for prgenv in KNOWN_PRGENVS if not prgenv_mod.startswith(prgenv)]: 79 prgenv_unloads.append(self.module_generator.unload_module(prgenv).strip()) 80 81 # load statement for selected PrgEnv module (only when not loaded yet) 82 prgenv_load = self.module_generator.load_module(prgenv_mod, recursive_unload=False) 83 84 txt = '\n'.join(prgenv_unloads + [prgenv_load] + swap_deps) 85 return txt 86 [end of easybuild/easyblocks/generic/craytoolchain.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/easybuild/easyblocks/generic/craytoolchain.py b/easybuild/easyblocks/generic/craytoolchain.py --- a/easybuild/easyblocks/generic/craytoolchain.py +++ b/easybuild/easyblocks/generic/craytoolchain.py @@ -76,7 +76,9 @@ # unload statements for other PrgEnv modules prgenv_unloads = [''] for prgenv in [prgenv for prgenv in KNOWN_PRGENVS if not prgenv_mod.startswith(prgenv)]: - prgenv_unloads.append(self.module_generator.unload_module(prgenv).strip()) + is_loaded_guard = self.module_generator.is_loaded(prgenv) + unload_stmt = self.module_generator.unload_module(prgenv).strip() + prgenv_unloads.append(self.module_generator.conditional_statement(is_loaded_guard, unload_stmt)) # load statement for selected PrgEnv module (only when not loaded yet) prgenv_load = self.module_generator.load_module(prgenv_mod, recursive_unload=False)
{"golden_diff": "diff --git a/easybuild/easyblocks/generic/craytoolchain.py b/easybuild/easyblocks/generic/craytoolchain.py\n--- a/easybuild/easyblocks/generic/craytoolchain.py\n+++ b/easybuild/easyblocks/generic/craytoolchain.py\n@@ -76,7 +76,9 @@\n # unload statements for other PrgEnv modules\n prgenv_unloads = ['']\n for prgenv in [prgenv for prgenv in KNOWN_PRGENVS if not prgenv_mod.startswith(prgenv)]:\n- prgenv_unloads.append(self.module_generator.unload_module(prgenv).strip())\n+ is_loaded_guard = self.module_generator.is_loaded(prgenv)\n+ unload_stmt = self.module_generator.unload_module(prgenv).strip()\n+ prgenv_unloads.append(self.module_generator.conditional_statement(is_loaded_guard, unload_stmt))\n \n # load statement for selected PrgEnv module (only when not loaded yet)\n prgenv_load = self.module_generator.load_module(prgenv_mod, recursive_unload=False)\n", "issue": "stderr: ERROR: Unable to locate a modulefile for 'PrgEnv-pgi' \nI am installing CrayCCE on the a Cray XC40 and am recieving the error.\r\n\r\n```\r\nstderr: ERROR: Unable to locate a modulefile for 'PrgEnv-pgi'\\n)\") \r\n```\r\n\r\nLooking at the LOG I see...\r\n\r\n```\r\nmodule unload PrgEnv-gnu\r\nmodule unload PrgEnv-intel\r\nmodule unload PrgEnv-pgi\r\n\r\n; stderr: ERROR: Unable to locate a modulefile for 'PrgEnv-pgi' \r\n```\r\n\r\nIn our system we do not even have `PrgEnv-pgi` so it is natural that we get this error, but we are not using pgi either so\r\nit should be checked if such a module exists prior of unloading it.\r\nAlso `PrgEnv-pgi` is not listed as a dependency in the config file.\r\n\r\nWe are using module.tcl Modules Release 4.1.3.1 (2018-11-29)\r\nand I know that EasyBuild prefers Lmod, but still module.tcl is supported.\r\n\r\nCheers\r\nHenric\n", "before_files": [{"content": "##\n# Copyright 2015-2020 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for installing Cray toolchains, implemented as an easyblock\n\n@author: Kenneth Hoste (Ghent University)\n@author: Guilherme Peretti Pezzi (CSCS)\n@author: Petar Forai (IMP/IMBA)\n\"\"\"\n\nfrom easybuild.easyblocks.generic.bundle import Bundle\nfrom easybuild.tools.build_log import EasyBuildError\n\n\nKNOWN_PRGENVS = ['PrgEnv-cray', 'PrgEnv-gnu', 'PrgEnv-intel', 'PrgEnv-pgi']\n\n\nclass CrayToolchain(Bundle):\n \"\"\"\n Compiler toolchain: generate module file only, nothing to build/install\n \"\"\"\n\n def prepare_step(self, *args, **kwargs):\n \"\"\"Prepare build environment (skip loaded of dependencies).\"\"\"\n\n kwargs['load_tc_deps_modules'] = False\n\n super(CrayToolchain, self).prepare_step(*args, **kwargs)\n\n def make_module_dep(self):\n \"\"\"\n Generate load/swap statements for dependencies in the module file\n \"\"\"\n prgenv_mod = None\n\n # collect 'swap' statement for dependencies (except PrgEnv)\n swap_deps = []\n for dep in self.toolchain.dependencies:\n mod_name = dep['full_mod_name']\n # determine versionless module name, e.g. 'fftw/3.3.4.1' => 'fftw'\n dep_name = '/'.join(mod_name.split('/')[:-1])\n\n if mod_name.startswith('PrgEnv'):\n prgenv_mod = mod_name\n else:\n swap_deps.append(self.module_generator.swap_module(dep_name, mod_name).lstrip())\n\n self.log.debug(\"Swap statements for dependencies of %s: %s\", self.full_mod_name, swap_deps)\n\n if prgenv_mod is None:\n raise EasyBuildError(\"Could not find a PrgEnv-* module listed as dependency: %s\",\n self.toolchain.dependencies)\n\n # unload statements for other PrgEnv modules\n prgenv_unloads = ['']\n for prgenv in [prgenv for prgenv in KNOWN_PRGENVS if not prgenv_mod.startswith(prgenv)]:\n prgenv_unloads.append(self.module_generator.unload_module(prgenv).strip())\n\n # load statement for selected PrgEnv module (only when not loaded yet)\n prgenv_load = self.module_generator.load_module(prgenv_mod, recursive_unload=False)\n\n txt = '\\n'.join(prgenv_unloads + [prgenv_load] + swap_deps)\n return txt\n", "path": "easybuild/easyblocks/generic/craytoolchain.py"}]}
1,780
241
gh_patches_debug_19559
rasdani/github-patches
git_diff
scrapy__scrapy-584
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Deprecated class inheritance check bug ``` python >>> from scrapy.spider import BaseSpider >>> class A(BaseSpider): ... pass ... >>> class B(BaseSpider): ... pass ... >>> isinstance(A('foo'), B) True ``` </issue> <code> [start of scrapy/utils/deprecate.py] 1 """Some helpers for deprecation messages""" 2 3 import warnings 4 import inspect 5 from scrapy.exceptions import ScrapyDeprecationWarning 6 7 8 def attribute(obj, oldattr, newattr, version='0.12'): 9 cname = obj.__class__.__name__ 10 warnings.warn("%s.%s attribute is deprecated and will be no longer supported " 11 "in Scrapy %s, use %s.%s attribute instead" % \ 12 (cname, oldattr, version, cname, newattr), ScrapyDeprecationWarning, stacklevel=3) 13 14 15 def create_deprecated_class(name, new_class, clsdict=None, 16 warn_category=ScrapyDeprecationWarning, 17 warn_once=True, 18 old_class_path=None, 19 new_class_path=None, 20 subclass_warn_message="{cls} inherits from "\ 21 "deprecated class {old}, please inherit "\ 22 "from {new}.", 23 instance_warn_message="{cls} is deprecated, "\ 24 "instantiate {new} instead."): 25 """ 26 Return a "deprecated" class that causes its subclasses to issue a warning. 27 Subclasses of ``new_class`` are considered subclasses of this class. 28 It also warns when the deprecated class is instantiated, but do not when 29 its subclasses are instantiated. 30 31 It can be used to rename a base class in a library. For example, if we 32 have 33 34 class OldName(SomeClass): 35 # ... 36 37 and we want to rename it to NewName, we can do the following:: 38 39 class NewName(SomeClass): 40 # ... 41 42 OldName = create_deprecated_class('OldName', NewName) 43 44 Then, if user class inherits from OldName, warning is issued. Also, if 45 some code uses ``issubclass(sub, OldName)`` or ``isinstance(sub(), OldName)`` 46 checks they'll still return True if sub is a subclass of NewName instead of 47 OldName. 48 """ 49 50 class DeprecatedClass(new_class.__class__): 51 52 deprecated_class = None 53 warned_on_subclass = False 54 55 def __new__(metacls, name, bases, clsdict_): 56 cls = super(DeprecatedClass, metacls).__new__(metacls, name, bases, clsdict_) 57 if metacls.deprecated_class is None: 58 metacls.deprecated_class = cls 59 return cls 60 61 def __init__(cls, name, bases, clsdict_): 62 meta = cls.__class__ 63 old = meta.deprecated_class 64 if old in bases and not (warn_once and meta.warned_on_subclass): 65 meta.warned_on_subclass = True 66 msg = subclass_warn_message.format(cls=_clspath(cls), 67 old=_clspath(old, old_class_path), 68 new=_clspath(new_class, new_class_path)) 69 if warn_once: 70 msg += ' (warning only on first subclass, there may be others)' 71 warnings.warn(msg, warn_category, stacklevel=2) 72 super(DeprecatedClass, cls).__init__(name, bases, clsdict_) 73 74 # see http://www.python.org/dev/peps/pep-3119/#overloading-isinstance-and-issubclass 75 # and http://docs.python.org/2/reference/datamodel.html#customizing-instance-and-subclass-checks 76 # for implementation details 77 def __instancecheck__(cls, inst): 78 return any(cls.__subclasscheck__(c) 79 for c in {type(inst), inst.__class__}) 80 81 def __subclasscheck__(cls, sub): 82 if not inspect.isclass(sub): 83 raise TypeError("issubclass() arg 1 must be a class") 84 85 mro = getattr(sub, '__mro__', ()) 86 candidates = {cls, new_class} 87 return any(c in candidates for c in mro) 88 89 def __call__(cls, *args, **kwargs): 90 old = DeprecatedClass.deprecated_class 91 if cls is old: 92 msg = instance_warn_message.format(cls=_clspath(cls, old_class_path), 93 new=_clspath(new_class, new_class_path)) 94 warnings.warn(msg, warn_category, stacklevel=2) 95 return super(DeprecatedClass, cls).__call__(*args, **kwargs) 96 97 deprecated_cls = DeprecatedClass(name, (new_class,), clsdict or {}) 98 frm = inspect.stack()[1] 99 parent_module = inspect.getmodule(frm[0]) 100 if parent_module is not None: 101 deprecated_cls.__module__ = parent_module.__name__ 102 103 return deprecated_cls 104 105 106 def _clspath(cls, forced=None): 107 if forced is not None: 108 return forced 109 return '{}.{}'.format(cls.__module__, cls.__name__) 110 [end of scrapy/utils/deprecate.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/utils/deprecate.py b/scrapy/utils/deprecate.py --- a/scrapy/utils/deprecate.py +++ b/scrapy/utils/deprecate.py @@ -79,12 +79,18 @@ for c in {type(inst), inst.__class__}) def __subclasscheck__(cls, sub): + if cls is not DeprecatedClass.deprecated_class: + # we should do the magic only if second `issubclass` argument + # is the deprecated class itself - subclasses of the + # deprecated class should not use custom `__subclasscheck__` + # method. + return super(DeprecatedClass, cls).__subclasscheck__(sub) + if not inspect.isclass(sub): raise TypeError("issubclass() arg 1 must be a class") mro = getattr(sub, '__mro__', ()) - candidates = {cls, new_class} - return any(c in candidates for c in mro) + return any(c in {cls, new_class} for c in mro) def __call__(cls, *args, **kwargs): old = DeprecatedClass.deprecated_class
{"golden_diff": "diff --git a/scrapy/utils/deprecate.py b/scrapy/utils/deprecate.py\n--- a/scrapy/utils/deprecate.py\n+++ b/scrapy/utils/deprecate.py\n@@ -79,12 +79,18 @@\n for c in {type(inst), inst.__class__})\n \n def __subclasscheck__(cls, sub):\n+ if cls is not DeprecatedClass.deprecated_class:\n+ # we should do the magic only if second `issubclass` argument\n+ # is the deprecated class itself - subclasses of the\n+ # deprecated class should not use custom `__subclasscheck__`\n+ # method.\n+ return super(DeprecatedClass, cls).__subclasscheck__(sub)\n+\n if not inspect.isclass(sub):\n raise TypeError(\"issubclass() arg 1 must be a class\")\n \n mro = getattr(sub, '__mro__', ())\n- candidates = {cls, new_class}\n- return any(c in candidates for c in mro)\n+ return any(c in {cls, new_class} for c in mro)\n \n def __call__(cls, *args, **kwargs):\n old = DeprecatedClass.deprecated_class\n", "issue": "Deprecated class inheritance check bug\n``` python\n>>> from scrapy.spider import BaseSpider\n>>> class A(BaseSpider):\n... pass\n... \n>>> class B(BaseSpider):\n... pass\n... \n>>> isinstance(A('foo'), B)\nTrue\n```\n\n", "before_files": [{"content": "\"\"\"Some helpers for deprecation messages\"\"\"\n\nimport warnings\nimport inspect\nfrom scrapy.exceptions import ScrapyDeprecationWarning\n\n\ndef attribute(obj, oldattr, newattr, version='0.12'):\n cname = obj.__class__.__name__\n warnings.warn(\"%s.%s attribute is deprecated and will be no longer supported \"\n \"in Scrapy %s, use %s.%s attribute instead\" % \\\n (cname, oldattr, version, cname, newattr), ScrapyDeprecationWarning, stacklevel=3)\n\n\ndef create_deprecated_class(name, new_class, clsdict=None,\n warn_category=ScrapyDeprecationWarning,\n warn_once=True,\n old_class_path=None,\n new_class_path=None,\n subclass_warn_message=\"{cls} inherits from \"\\\n \"deprecated class {old}, please inherit \"\\\n \"from {new}.\",\n instance_warn_message=\"{cls} is deprecated, \"\\\n \"instantiate {new} instead.\"):\n \"\"\"\n Return a \"deprecated\" class that causes its subclasses to issue a warning.\n Subclasses of ``new_class`` are considered subclasses of this class.\n It also warns when the deprecated class is instantiated, but do not when\n its subclasses are instantiated.\n\n It can be used to rename a base class in a library. For example, if we\n have\n\n class OldName(SomeClass):\n # ...\n\n and we want to rename it to NewName, we can do the following::\n\n class NewName(SomeClass):\n # ...\n\n OldName = create_deprecated_class('OldName', NewName)\n\n Then, if user class inherits from OldName, warning is issued. Also, if\n some code uses ``issubclass(sub, OldName)`` or ``isinstance(sub(), OldName)``\n checks they'll still return True if sub is a subclass of NewName instead of\n OldName.\n \"\"\"\n\n class DeprecatedClass(new_class.__class__):\n\n deprecated_class = None\n warned_on_subclass = False\n\n def __new__(metacls, name, bases, clsdict_):\n cls = super(DeprecatedClass, metacls).__new__(metacls, name, bases, clsdict_)\n if metacls.deprecated_class is None:\n metacls.deprecated_class = cls\n return cls\n\n def __init__(cls, name, bases, clsdict_):\n meta = cls.__class__\n old = meta.deprecated_class\n if old in bases and not (warn_once and meta.warned_on_subclass):\n meta.warned_on_subclass = True\n msg = subclass_warn_message.format(cls=_clspath(cls),\n old=_clspath(old, old_class_path),\n new=_clspath(new_class, new_class_path))\n if warn_once:\n msg += ' (warning only on first subclass, there may be others)'\n warnings.warn(msg, warn_category, stacklevel=2)\n super(DeprecatedClass, cls).__init__(name, bases, clsdict_)\n\n # see http://www.python.org/dev/peps/pep-3119/#overloading-isinstance-and-issubclass\n # and http://docs.python.org/2/reference/datamodel.html#customizing-instance-and-subclass-checks\n # for implementation details\n def __instancecheck__(cls, inst):\n return any(cls.__subclasscheck__(c)\n for c in {type(inst), inst.__class__})\n\n def __subclasscheck__(cls, sub):\n if not inspect.isclass(sub):\n raise TypeError(\"issubclass() arg 1 must be a class\")\n\n mro = getattr(sub, '__mro__', ())\n candidates = {cls, new_class}\n return any(c in candidates for c in mro)\n\n def __call__(cls, *args, **kwargs):\n old = DeprecatedClass.deprecated_class\n if cls is old:\n msg = instance_warn_message.format(cls=_clspath(cls, old_class_path),\n new=_clspath(new_class, new_class_path))\n warnings.warn(msg, warn_category, stacklevel=2)\n return super(DeprecatedClass, cls).__call__(*args, **kwargs)\n\n deprecated_cls = DeprecatedClass(name, (new_class,), clsdict or {})\n frm = inspect.stack()[1]\n parent_module = inspect.getmodule(frm[0])\n if parent_module is not None:\n deprecated_cls.__module__ = parent_module.__name__\n\n return deprecated_cls\n\n\ndef _clspath(cls, forced=None):\n if forced is not None:\n return forced\n return '{}.{}'.format(cls.__module__, cls.__name__)\n", "path": "scrapy/utils/deprecate.py"}]}
1,812
260
gh_patches_debug_17388
rasdani/github-patches
git_diff
Zeroto521__my-data-toolkit-816
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> BUG: CRS is missing https://github.com/Zeroto521/my-data-toolkit/blob/93aaefa0193c1822af21c60d9edbbc9acff68815/dtoolkit/geoaccessor/series/to_geoframe.py#L86-L87 `np.asarray` will remove CRS information from GeoSeries. </issue> <code> [start of dtoolkit/geoaccessor/series/to_geoframe.py] 1 from __future__ import annotations 2 3 from typing import TYPE_CHECKING 4 5 import geopandas as gpd 6 import numpy as np 7 import pandas as pd 8 from geopandas.base import is_geometry_type 9 10 from dtoolkit.accessor.register import register_series_method 11 12 if TYPE_CHECKING: 13 from pyproj import CRS 14 15 16 @register_series_method 17 def to_geoframe( 18 s: pd.Series, 19 /, 20 geometry: gpd.GeoSeries = None, 21 crs: CRS | str | int = None, 22 **kwargs, 23 ) -> gpd.GeoDataFrame | pd.DataFrame: 24 """ 25 Transform an array of shapely scalars :class:`~pandas.Series` to 26 a :class:`~geopandas.GeoDataFrame`. 27 28 Parameters 29 ---------- 30 geometry : GeoSeries, optional 31 It will be prior set as 'geometry' column on GeoDataFrame. If the input 32 is a GeoSeries, its index will be ignored. 33 34 crs : CRS, str, int, optional 35 Coordinate Reference System of the geometry objects. Can be anything 36 accepted by :meth:`~pyproj.crs.CRS.from_user_input`, such as an authority 37 string (eg "EPSG:4326" / 4326) or a WKT string. 38 39 **kwargs 40 See the documentation for :class:`~geopandas.GeoDataFrame` and for complete 41 details on the keyword arguments. 42 43 Returns 44 ------- 45 DataFrame or GeoDataFrame 46 GeoDataFrame if the data is an array of shapely scalars or ``geometry`` is set. 47 48 See Also 49 -------- 50 dtoolkit.geoaccessor.series.to_geoseries 51 dtoolkit.geoaccessor.dataframe.to_geoframe 52 53 Examples 54 -------- 55 >>> import dtoolkit.geoaccessor 56 >>> import pandas as pd 57 >>> s = pd.Series( 58 ... pd.Series( 59 ... [ 60 ... "POINT (1 1)", 61 ... "POINT (2 2)", 62 ... "POINT (3 3)", 63 ... ], 64 ... ) 65 ... .from_wkt(drop=True, crs=4326) 66 ... ) 67 >>> s 68 0 POINT (1.00000 1.00000) 69 1 POINT (2.00000 2.00000) 70 2 POINT (3.00000 3.00000) 71 dtype: geometry 72 >>> type(s) 73 <class 'pandas.core.series.Series'> 74 >>> gs = s.to_geoframe() 75 >>> gs 76 geometry 77 0 POINT (1.00000 1.00000) 78 1 POINT (2.00000 2.00000) 79 2 POINT (3.00000 3.00000) 80 >>> type(gs) 81 <class 'geopandas.geodataframe.GeoDataFrame'> 82 """ 83 84 if geometry is not None: 85 # FIXME: https://github.com/geopandas/geopandas/issues/2660 86 if isinstance(geometry, gpd.GeoSeries): 87 geometry = np.asarray(geometry) 88 return gpd.GeoDataFrame(s, geometry=geometry, crs=crs, **kwargs) 89 elif is_geometry_type(s): 90 return gpd.GeoDataFrame(geometry=s, crs=crs, **kwargs) 91 else: 92 return s.to_frame() 93 [end of dtoolkit/geoaccessor/series/to_geoframe.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dtoolkit/geoaccessor/series/to_geoframe.py b/dtoolkit/geoaccessor/series/to_geoframe.py --- a/dtoolkit/geoaccessor/series/to_geoframe.py +++ b/dtoolkit/geoaccessor/series/to_geoframe.py @@ -3,7 +3,6 @@ from typing import TYPE_CHECKING import geopandas as gpd -import numpy as np import pandas as pd from geopandas.base import is_geometry_type @@ -84,7 +83,7 @@ if geometry is not None: # FIXME: https://github.com/geopandas/geopandas/issues/2660 if isinstance(geometry, gpd.GeoSeries): - geometry = np.asarray(geometry) + geometry = geometry.set_axis(s.index) return gpd.GeoDataFrame(s, geometry=geometry, crs=crs, **kwargs) elif is_geometry_type(s): return gpd.GeoDataFrame(geometry=s, crs=crs, **kwargs)
{"golden_diff": "diff --git a/dtoolkit/geoaccessor/series/to_geoframe.py b/dtoolkit/geoaccessor/series/to_geoframe.py\n--- a/dtoolkit/geoaccessor/series/to_geoframe.py\n+++ b/dtoolkit/geoaccessor/series/to_geoframe.py\n@@ -3,7 +3,6 @@\n from typing import TYPE_CHECKING\n \n import geopandas as gpd\n-import numpy as np\n import pandas as pd\n from geopandas.base import is_geometry_type\n \n@@ -84,7 +83,7 @@\n if geometry is not None:\n # FIXME: https://github.com/geopandas/geopandas/issues/2660\n if isinstance(geometry, gpd.GeoSeries):\n- geometry = np.asarray(geometry)\n+ geometry = geometry.set_axis(s.index)\n return gpd.GeoDataFrame(s, geometry=geometry, crs=crs, **kwargs)\n elif is_geometry_type(s):\n return gpd.GeoDataFrame(geometry=s, crs=crs, **kwargs)\n", "issue": "BUG: CRS is missing\nhttps://github.com/Zeroto521/my-data-toolkit/blob/93aaefa0193c1822af21c60d9edbbc9acff68815/dtoolkit/geoaccessor/series/to_geoframe.py#L86-L87\r\n\r\n`np.asarray` will remove CRS information from GeoSeries.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport geopandas as gpd\nimport numpy as np\nimport pandas as pd\nfrom geopandas.base import is_geometry_type\n\nfrom dtoolkit.accessor.register import register_series_method\n\nif TYPE_CHECKING:\n from pyproj import CRS\n\n\n@register_series_method\ndef to_geoframe(\n s: pd.Series,\n /,\n geometry: gpd.GeoSeries = None,\n crs: CRS | str | int = None,\n **kwargs,\n) -> gpd.GeoDataFrame | pd.DataFrame:\n \"\"\"\n Transform an array of shapely scalars :class:`~pandas.Series` to\n a :class:`~geopandas.GeoDataFrame`.\n\n Parameters\n ----------\n geometry : GeoSeries, optional\n It will be prior set as 'geometry' column on GeoDataFrame. If the input\n is a GeoSeries, its index will be ignored.\n\n crs : CRS, str, int, optional\n Coordinate Reference System of the geometry objects. Can be anything\n accepted by :meth:`~pyproj.crs.CRS.from_user_input`, such as an authority\n string (eg \"EPSG:4326\" / 4326) or a WKT string.\n\n **kwargs\n See the documentation for :class:`~geopandas.GeoDataFrame` and for complete\n details on the keyword arguments.\n\n Returns\n -------\n DataFrame or GeoDataFrame\n GeoDataFrame if the data is an array of shapely scalars or ``geometry`` is set.\n\n See Also\n --------\n dtoolkit.geoaccessor.series.to_geoseries\n dtoolkit.geoaccessor.dataframe.to_geoframe\n\n Examples\n --------\n >>> import dtoolkit.geoaccessor\n >>> import pandas as pd\n >>> s = pd.Series(\n ... pd.Series(\n ... [\n ... \"POINT (1 1)\",\n ... \"POINT (2 2)\",\n ... \"POINT (3 3)\",\n ... ],\n ... )\n ... .from_wkt(drop=True, crs=4326)\n ... )\n >>> s\n 0 POINT (1.00000 1.00000)\n 1 POINT (2.00000 2.00000)\n 2 POINT (3.00000 3.00000)\n dtype: geometry\n >>> type(s)\n <class 'pandas.core.series.Series'>\n >>> gs = s.to_geoframe()\n >>> gs\n geometry\n 0 POINT (1.00000 1.00000)\n 1 POINT (2.00000 2.00000)\n 2 POINT (3.00000 3.00000)\n >>> type(gs)\n <class 'geopandas.geodataframe.GeoDataFrame'>\n \"\"\"\n\n if geometry is not None:\n # FIXME: https://github.com/geopandas/geopandas/issues/2660\n if isinstance(geometry, gpd.GeoSeries):\n geometry = np.asarray(geometry)\n return gpd.GeoDataFrame(s, geometry=geometry, crs=crs, **kwargs)\n elif is_geometry_type(s):\n return gpd.GeoDataFrame(geometry=s, crs=crs, **kwargs)\n else:\n return s.to_frame()\n", "path": "dtoolkit/geoaccessor/series/to_geoframe.py"}]}
1,596
231
gh_patches_debug_36434
rasdani/github-patches
git_diff
team-ocean__veros-70
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Allow for custom setup folders </issue> <code> [start of veros/cli/veros_copy_setup.py] 1 #!/usr/bin/env python 2 3 import os 4 import shutil 5 import pkg_resources 6 import functools 7 8 import click 9 10 SETUPDIR = pkg_resources.resource_filename('veros', 'setup') 11 SETUPS = sorted([ 12 setup for setup in os.listdir(SETUPDIR) 13 if os.path.isdir(os.path.join(SETUPDIR, setup)) 14 and not setup.startswith('_') 15 ]) 16 IGNORE_PATTERNS = ['__init__.py', '*.pyc', '__pycache__/'] 17 18 19 def copy_setup(setup, to=None): 20 """Copy a standard setup to another directory""" 21 if to is None: 22 to = os.path.join(os.getcwd(), setup) 23 24 parent = os.path.dirname(os.path.realpath(to)) 25 26 if not os.path.exists(parent): 27 os.makedirs(parent) 28 29 ignore = shutil.ignore_patterns(*IGNORE_PATTERNS) 30 shutil.copytree( 31 os.path.join(SETUPDIR, setup), to, ignore=ignore 32 ) 33 34 35 @click.command('veros-copy-setup') 36 @click.argument('setup', type=click.Choice(SETUPS), metavar='SETUP') 37 @click.option('--to', type=click.Path(dir_okay=False, file_okay=False), required=False, 38 default=None, help='Target directory (default: copy to current working directory)') 39 @functools.wraps(copy_setup) 40 def cli(*args, **kwargs): 41 copy_setup(*args, **kwargs) 42 43 44 if __name__ == '__main__': 45 cli() 46 [end of veros/cli/veros_copy_setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/veros/cli/veros_copy_setup.py b/veros/cli/veros_copy_setup.py --- a/veros/cli/veros_copy_setup.py +++ b/veros/cli/veros_copy_setup.py @@ -7,35 +7,69 @@ import click -SETUPDIR = pkg_resources.resource_filename('veros', 'setup') -SETUPS = sorted([ - setup for setup in os.listdir(SETUPDIR) - if os.path.isdir(os.path.join(SETUPDIR, setup)) - and not setup.startswith('_') -]) +SETUPDIR_ENVVAR = 'VEROS_SETUP_DIR' IGNORE_PATTERNS = ['__init__.py', '*.pyc', '__pycache__/'] +SETUPS = {} + +setup_dirs = [pkg_resources.resource_filename('veros', 'setup')] + +for setup_dir in os.environ.get(SETUPDIR_ENVVAR, '').split(';'): + if os.path.isdir(setup_dir): + setup_dirs.append(setup_dir) + +# populate {setup_name: path} mapping +for setup_dir in setup_dirs: + for setup in os.listdir(setup_dir): + setup_path = os.path.join(setup_dir, setup) + if not os.path.isdir(setup_path): + continue + if setup.startswith(('_', '.')): + continue + SETUPS[setup] = setup_path + +SETUP_NAMES = sorted(SETUPS.keys()) def copy_setup(setup, to=None): - """Copy a standard setup to another directory""" + """Copy a standard setup to another directory. + + Argument must be one of: {setups} + + Example: + + $ veros copy-setup global_4deg --to ~/veros-setups/4deg-lowfric + + Further directories containing setup templates can be added to this command + via the {setup_envvar} environment variable. + """ if to is None: to = os.path.join(os.getcwd(), setup) - parent = os.path.dirname(os.path.realpath(to)) + if os.path.exists(to): + raise RuntimeError('Target directory must not exist') + + to_parent = os.path.dirname(os.path.realpath(to)) - if not os.path.exists(parent): - os.makedirs(parent) + if not os.path.exists(to_parent): + os.makedirs(to_parent) ignore = shutil.ignore_patterns(*IGNORE_PATTERNS) shutil.copytree( - os.path.join(SETUPDIR, setup), to, ignore=ignore + SETUPS[setup], to, ignore=ignore ) +copy_setup.__doc__ = copy_setup.__doc__.format( + setups=', '.join(SETUP_NAMES), setup_envvar=SETUPDIR_ENVVAR +) + + @click.command('veros-copy-setup') [email protected]('setup', type=click.Choice(SETUPS), metavar='SETUP') [email protected]('--to', type=click.Path(dir_okay=False, file_okay=False), required=False, - default=None, help='Target directory (default: copy to current working directory)') [email protected]('setup', type=click.Choice(SETUP_NAMES), metavar='SETUP') [email protected]('--to', required=False, default=None, + type=click.Path(dir_okay=False, file_okay=False, writable=True), + help=('Target directory, must not exist ' + '(default: copy to current working directory)')) @functools.wraps(copy_setup) def cli(*args, **kwargs): copy_setup(*args, **kwargs)
{"golden_diff": "diff --git a/veros/cli/veros_copy_setup.py b/veros/cli/veros_copy_setup.py\n--- a/veros/cli/veros_copy_setup.py\n+++ b/veros/cli/veros_copy_setup.py\n@@ -7,35 +7,69 @@\n \n import click\n \n-SETUPDIR = pkg_resources.resource_filename('veros', 'setup')\n-SETUPS = sorted([\n- setup for setup in os.listdir(SETUPDIR)\n- if os.path.isdir(os.path.join(SETUPDIR, setup))\n- and not setup.startswith('_')\n-])\n+SETUPDIR_ENVVAR = 'VEROS_SETUP_DIR'\n IGNORE_PATTERNS = ['__init__.py', '*.pyc', '__pycache__/']\n+SETUPS = {}\n+\n+setup_dirs = [pkg_resources.resource_filename('veros', 'setup')]\n+\n+for setup_dir in os.environ.get(SETUPDIR_ENVVAR, '').split(';'):\n+ if os.path.isdir(setup_dir):\n+ setup_dirs.append(setup_dir)\n+\n+# populate {setup_name: path} mapping\n+for setup_dir in setup_dirs:\n+ for setup in os.listdir(setup_dir):\n+ setup_path = os.path.join(setup_dir, setup)\n+ if not os.path.isdir(setup_path):\n+ continue\n+ if setup.startswith(('_', '.')):\n+ continue\n+ SETUPS[setup] = setup_path\n+\n+SETUP_NAMES = sorted(SETUPS.keys())\n \n \n def copy_setup(setup, to=None):\n- \"\"\"Copy a standard setup to another directory\"\"\"\n+ \"\"\"Copy a standard setup to another directory.\n+\n+ Argument must be one of: {setups}\n+\n+ Example:\n+\n+ $ veros copy-setup global_4deg --to ~/veros-setups/4deg-lowfric\n+\n+ Further directories containing setup templates can be added to this command\n+ via the {setup_envvar} environment variable.\n+ \"\"\"\n if to is None:\n to = os.path.join(os.getcwd(), setup)\n \n- parent = os.path.dirname(os.path.realpath(to))\n+ if os.path.exists(to):\n+ raise RuntimeError('Target directory must not exist')\n+\n+ to_parent = os.path.dirname(os.path.realpath(to))\n \n- if not os.path.exists(parent):\n- os.makedirs(parent)\n+ if not os.path.exists(to_parent):\n+ os.makedirs(to_parent)\n \n ignore = shutil.ignore_patterns(*IGNORE_PATTERNS)\n shutil.copytree(\n- os.path.join(SETUPDIR, setup), to, ignore=ignore\n+ SETUPS[setup], to, ignore=ignore\n )\n \n \n+copy_setup.__doc__ = copy_setup.__doc__.format(\n+ setups=', '.join(SETUP_NAMES), setup_envvar=SETUPDIR_ENVVAR\n+)\n+\n+\n @click.command('veros-copy-setup')\[email protected]('setup', type=click.Choice(SETUPS), metavar='SETUP')\[email protected]('--to', type=click.Path(dir_okay=False, file_okay=False), required=False,\n- default=None, help='Target directory (default: copy to current working directory)')\[email protected]('setup', type=click.Choice(SETUP_NAMES), metavar='SETUP')\[email protected]('--to', required=False, default=None,\n+ type=click.Path(dir_okay=False, file_okay=False, writable=True),\n+ help=('Target directory, must not exist '\n+ '(default: copy to current working directory)'))\n @functools.wraps(copy_setup)\n def cli(*args, **kwargs):\n copy_setup(*args, **kwargs)\n", "issue": "Allow for custom setup folders\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport shutil\nimport pkg_resources\nimport functools\n\nimport click\n\nSETUPDIR = pkg_resources.resource_filename('veros', 'setup')\nSETUPS = sorted([\n setup for setup in os.listdir(SETUPDIR)\n if os.path.isdir(os.path.join(SETUPDIR, setup))\n and not setup.startswith('_')\n])\nIGNORE_PATTERNS = ['__init__.py', '*.pyc', '__pycache__/']\n\n\ndef copy_setup(setup, to=None):\n \"\"\"Copy a standard setup to another directory\"\"\"\n if to is None:\n to = os.path.join(os.getcwd(), setup)\n\n parent = os.path.dirname(os.path.realpath(to))\n\n if not os.path.exists(parent):\n os.makedirs(parent)\n\n ignore = shutil.ignore_patterns(*IGNORE_PATTERNS)\n shutil.copytree(\n os.path.join(SETUPDIR, setup), to, ignore=ignore\n )\n\n\[email protected]('veros-copy-setup')\[email protected]('setup', type=click.Choice(SETUPS), metavar='SETUP')\[email protected]('--to', type=click.Path(dir_okay=False, file_okay=False), required=False,\n default=None, help='Target directory (default: copy to current working directory)')\[email protected](copy_setup)\ndef cli(*args, **kwargs):\n copy_setup(*args, **kwargs)\n\n\nif __name__ == '__main__':\n cli()\n", "path": "veros/cli/veros_copy_setup.py"}]}
944
787
gh_patches_debug_3767
rasdani/github-patches
git_diff
kartoza__prj.app-321
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> More menu navigation cleanups <img width="834" alt="screen shot 2016-06-22 at 00 00 38" src="https://cloud.githubusercontent.com/assets/178003/16247917/6d784382-380c-11e6-91f6-29423d263b8c.png"> - [ ] Make project icon and name clickable - click should take you to e.g. /en/qgis/ - [ ] Changelogs -> Changelog - [ ] Remove version entries item - [ ] Remove version thumbnails item - [ ] Remove add entry item </issue> <code> [start of django_project/base/templatetags/custom_markup.py] 1 import markdown 2 from django import template 3 from django.template.defaultfilters import stringfilter 4 from django.utils.encoding import force_unicode 5 from django.utils.safestring import mark_safe 6 7 register = template.Library() 8 9 10 @register.filter(name='base_markdown', is_safe=True) 11 @stringfilter 12 def base_markdown(value): 13 extensions = ["nl2br", ] 14 15 return mark_safe(markdown.markdown(force_unicode(value), 16 extensions, 17 safe_mode=True, 18 enable_attributes=False)) 19 20 21 @register.filter(name='is_gif', is_safe=True) 22 @stringfilter 23 def is_gif(value): 24 return value[-4:] == '.gif' 25 [end of django_project/base/templatetags/custom_markup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/django_project/base/templatetags/custom_markup.py b/django_project/base/templatetags/custom_markup.py --- a/django_project/base/templatetags/custom_markup.py +++ b/django_project/base/templatetags/custom_markup.py @@ -22,3 +22,17 @@ @stringfilter def is_gif(value): return value[-4:] == '.gif' + + [email protected]_tag('button_span.html', takes_context=True) +def show_button_icon(context, value): + + context_icon = { + 'add': 'glyphicon glyphicon-asterisk', + 'update': 'glyphicon glyphicon-pencil', + 'delete': 'glyphicon glyphicon-minus' + } + + return { + 'button_icon': context_icon[value] + }
{"golden_diff": "diff --git a/django_project/base/templatetags/custom_markup.py b/django_project/base/templatetags/custom_markup.py\n--- a/django_project/base/templatetags/custom_markup.py\n+++ b/django_project/base/templatetags/custom_markup.py\n@@ -22,3 +22,17 @@\n @stringfilter\n def is_gif(value):\n return value[-4:] == '.gif'\n+\n+\[email protected]_tag('button_span.html', takes_context=True)\n+def show_button_icon(context, value):\n+\n+ context_icon = {\n+ 'add': 'glyphicon glyphicon-asterisk',\n+ 'update': 'glyphicon glyphicon-pencil',\n+ 'delete': 'glyphicon glyphicon-minus'\n+ }\n+\n+ return {\n+ 'button_icon': context_icon[value]\n+ }\n", "issue": "More menu navigation cleanups\n<img width=\"834\" alt=\"screen shot 2016-06-22 at 00 00 38\" src=\"https://cloud.githubusercontent.com/assets/178003/16247917/6d784382-380c-11e6-91f6-29423d263b8c.png\">\n- [ ] Make project icon and name clickable - click should take you to e.g. /en/qgis/\n- [ ] Changelogs -> Changelog\n- [ ] Remove version entries item\n- [ ] Remove version thumbnails item\n- [ ] Remove add entry item\n\n", "before_files": [{"content": "import markdown\nfrom django import template\nfrom django.template.defaultfilters import stringfilter\nfrom django.utils.encoding import force_unicode\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n\[email protected](name='base_markdown', is_safe=True)\n@stringfilter\ndef base_markdown(value):\n extensions = [\"nl2br\", ]\n\n return mark_safe(markdown.markdown(force_unicode(value),\n extensions,\n safe_mode=True,\n enable_attributes=False))\n\n\[email protected](name='is_gif', is_safe=True)\n@stringfilter\ndef is_gif(value):\n return value[-4:] == '.gif'\n", "path": "django_project/base/templatetags/custom_markup.py"}]}
884
180
gh_patches_debug_67495
rasdani/github-patches
git_diff
vllm-project__vllm-605
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> RayWorker import error When import `vllm` I got following error ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/code/vllm/vllm/__init__.py", line 4, in <module> from vllm.engine.async_llm_engine import AsyncLLMEngine File "/code/vllm/vllm/engine/async_llm_engine.py", line 7, in <module> from vllm.engine.llm_engine import LLMEngine File "/code/vllm/vllm/engine/llm_engine.py", line 9, in <module> from vllm.engine.ray_utils import initialize_cluster, ray, RayWorker ImportError: cannot import name 'RayWorker' from 'vllm.engine.ray_utils' ``` It seems `ray` requires `pandas` I haven't installed it. </issue> <code> [start of vllm/engine/ray_utils.py] 1 import socket 2 from typing import Optional, Tuple, TYPE_CHECKING 3 4 from vllm.config import ParallelConfig 5 6 try: 7 import ray 8 from ray.air.util.torch_dist import TorchDistributedWorker 9 10 class RayWorker(TorchDistributedWorker): 11 """Ray wrapper for vllm.worker.Worker, allowing Worker to be 12 lazliy initialized after Ray sets CUDA_VISIBLE_DEVICES.""" 13 14 def __init__(self) -> None: 15 self.worker = None 16 17 def init_worker(self, worker_init_fn): 18 self.worker = worker_init_fn() 19 20 def __getattr__(self, name): 21 return getattr(self.worker, name) 22 23 def execute_method(self, method, *args, **kwargs): 24 executor = getattr(self, method) 25 return executor(*args, **kwargs) 26 27 except ImportError: 28 ray = None 29 TorchDistributedWorker = None 30 31 if TYPE_CHECKING: 32 from ray.util.placement_group import PlacementGroup 33 34 35 def get_open_port(): 36 with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: 37 s.bind(("", 0)) 38 return s.getsockname()[1] 39 40 41 def initialize_cluster( 42 parallel_config: ParallelConfig, 43 engine_use_ray: bool = False, 44 ray_address: Optional[str] = None, 45 ) -> Tuple[str, Optional["PlacementGroup"]]: 46 """Initialize the distributed cluster probably with Ray. 47 48 Args: 49 parallel_config: The configurations for parallel execution. 50 engine_use_ray: Whether to use Ray for async engine. 51 ray_address: The address of the Ray cluster. If None, uses 52 the default Ray cluster address. 53 54 Returns: 55 A tuple of (`distributed_init_method`, `all_stage_devices`). The 56 `distributed_init_method` is the address for initializing the 57 distributed backend. `all_stage_devices` includes device IDs for 58 each worker in each pipeline stage. Each device ID is a tuple of 59 (rank, node resource, device id). 60 """ 61 if parallel_config.worker_use_ray or engine_use_ray: 62 if ray is None: 63 raise ImportError( 64 "Ray is not installed. Please install Ray to use distributed " 65 "serving.") 66 # Connect to a ray cluster. 67 ray.init(address=ray_address, ignore_reinit_error=True) 68 69 if not parallel_config.worker_use_ray: 70 # Initialize cluster locally. 71 port = get_open_port() 72 # We need to setup the distributed init method to make sure 73 # the distributed megatron code (e.g., get world size) works correctly. 74 distributed_init_method = f"tcp://localhost:{port}" 75 return distributed_init_method, None 76 77 current_placement_group = ray.util.get_current_placement_group() 78 if current_placement_group: 79 # We are in a placement group 80 bundles = current_placement_group.bundle_specs 81 # Verify that we can use the placement group. 82 gpu_bundles = 0 83 for bundle in bundles: 84 bundle_gpus = bundle.get("GPU", 0) 85 if bundle_gpus > 1: 86 raise ValueError( 87 "Placement group bundle cannot have more than 1 GPU.") 88 if bundle_gpus: 89 gpu_bundles += 1 90 if parallel_config.world_size > gpu_bundles: 91 raise ValueError( 92 "The number of required GPUs exceeds the total number of " 93 "available GPUs in the placement group.") 94 else: 95 num_gpus_in_cluster = ray.cluster_resources().get("GPU", 0) 96 if parallel_config.world_size > num_gpus_in_cluster: 97 raise ValueError( 98 "The number of required GPUs exceeds the total number of " 99 "available GPUs in the cluster.") 100 # Create a new placement group 101 current_placement_group = ray.util.placement_group([{ 102 "GPU": 1 103 }] * parallel_config.world_size) 104 # Wait until PG is ready - this will block until all 105 # requested resources are available, and will timeout 106 # if they cannot be provisioned. 107 ray.get(current_placement_group.ready(), timeout=1800) 108 109 return None, current_placement_group 110 [end of vllm/engine/ray_utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/vllm/engine/ray_utils.py b/vllm/engine/ray_utils.py --- a/vllm/engine/ray_utils.py +++ b/vllm/engine/ray_utils.py @@ -27,6 +27,7 @@ except ImportError: ray = None TorchDistributedWorker = None + RayWorker = None # pylint: disable=invalid-name if TYPE_CHECKING: from ray.util.placement_group import PlacementGroup
{"golden_diff": "diff --git a/vllm/engine/ray_utils.py b/vllm/engine/ray_utils.py\n--- a/vllm/engine/ray_utils.py\n+++ b/vllm/engine/ray_utils.py\n@@ -27,6 +27,7 @@\n except ImportError:\n ray = None\n TorchDistributedWorker = None\n+ RayWorker = None # pylint: disable=invalid-name\n \n if TYPE_CHECKING:\n from ray.util.placement_group import PlacementGroup\n", "issue": "RayWorker import error\nWhen import `vllm` I got following error\r\n```\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/code/vllm/vllm/__init__.py\", line 4, in <module>\r\n from vllm.engine.async_llm_engine import AsyncLLMEngine\r\n File \"/code/vllm/vllm/engine/async_llm_engine.py\", line 7, in <module>\r\n from vllm.engine.llm_engine import LLMEngine\r\n File \"/code/vllm/vllm/engine/llm_engine.py\", line 9, in <module>\r\n from vllm.engine.ray_utils import initialize_cluster, ray, RayWorker\r\nImportError: cannot import name 'RayWorker' from 'vllm.engine.ray_utils' \r\n```\r\nIt seems `ray` requires `pandas` I haven't installed it.\r\n\n", "before_files": [{"content": "import socket\nfrom typing import Optional, Tuple, TYPE_CHECKING\n\nfrom vllm.config import ParallelConfig\n\ntry:\n import ray\n from ray.air.util.torch_dist import TorchDistributedWorker\n\n class RayWorker(TorchDistributedWorker):\n \"\"\"Ray wrapper for vllm.worker.Worker, allowing Worker to be\n lazliy initialized after Ray sets CUDA_VISIBLE_DEVICES.\"\"\"\n\n def __init__(self) -> None:\n self.worker = None\n\n def init_worker(self, worker_init_fn):\n self.worker = worker_init_fn()\n\n def __getattr__(self, name):\n return getattr(self.worker, name)\n\n def execute_method(self, method, *args, **kwargs):\n executor = getattr(self, method)\n return executor(*args, **kwargs)\n\nexcept ImportError:\n ray = None\n TorchDistributedWorker = None\n\nif TYPE_CHECKING:\n from ray.util.placement_group import PlacementGroup\n\n\ndef get_open_port():\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((\"\", 0))\n return s.getsockname()[1]\n\n\ndef initialize_cluster(\n parallel_config: ParallelConfig,\n engine_use_ray: bool = False,\n ray_address: Optional[str] = None,\n) -> Tuple[str, Optional[\"PlacementGroup\"]]:\n \"\"\"Initialize the distributed cluster probably with Ray.\n\n Args:\n parallel_config: The configurations for parallel execution.\n engine_use_ray: Whether to use Ray for async engine.\n ray_address: The address of the Ray cluster. If None, uses\n the default Ray cluster address.\n\n Returns:\n A tuple of (`distributed_init_method`, `all_stage_devices`). The\n `distributed_init_method` is the address for initializing the\n distributed backend. `all_stage_devices` includes device IDs for\n each worker in each pipeline stage. Each device ID is a tuple of\n (rank, node resource, device id).\n \"\"\"\n if parallel_config.worker_use_ray or engine_use_ray:\n if ray is None:\n raise ImportError(\n \"Ray is not installed. Please install Ray to use distributed \"\n \"serving.\")\n # Connect to a ray cluster.\n ray.init(address=ray_address, ignore_reinit_error=True)\n\n if not parallel_config.worker_use_ray:\n # Initialize cluster locally.\n port = get_open_port()\n # We need to setup the distributed init method to make sure\n # the distributed megatron code (e.g., get world size) works correctly.\n distributed_init_method = f\"tcp://localhost:{port}\"\n return distributed_init_method, None\n\n current_placement_group = ray.util.get_current_placement_group()\n if current_placement_group:\n # We are in a placement group\n bundles = current_placement_group.bundle_specs\n # Verify that we can use the placement group.\n gpu_bundles = 0\n for bundle in bundles:\n bundle_gpus = bundle.get(\"GPU\", 0)\n if bundle_gpus > 1:\n raise ValueError(\n \"Placement group bundle cannot have more than 1 GPU.\")\n if bundle_gpus:\n gpu_bundles += 1\n if parallel_config.world_size > gpu_bundles:\n raise ValueError(\n \"The number of required GPUs exceeds the total number of \"\n \"available GPUs in the placement group.\")\n else:\n num_gpus_in_cluster = ray.cluster_resources().get(\"GPU\", 0)\n if parallel_config.world_size > num_gpus_in_cluster:\n raise ValueError(\n \"The number of required GPUs exceeds the total number of \"\n \"available GPUs in the cluster.\")\n # Create a new placement group\n current_placement_group = ray.util.placement_group([{\n \"GPU\": 1\n }] * parallel_config.world_size)\n # Wait until PG is ready - this will block until all\n # requested resources are available, and will timeout\n # if they cannot be provisioned.\n ray.get(current_placement_group.ready(), timeout=1800)\n\n return None, current_placement_group\n", "path": "vllm/engine/ray_utils.py"}]}
1,832
104
gh_patches_debug_14593
rasdani/github-patches
git_diff
PlasmaPy__PlasmaPy-703
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Create classes to represent ionization state distributions My plan for this PR is to create classes to represent the ionization state distributions of one or more elements. I am going to add in a bunch of dunder methods like `__getitem__` and maybe `__call__` to help making access to the ionization states more straightfoward and intuitive. Any suggestions on the naming convention will be helpful so that we can maximize readability. Eventually we'll need a way to calculate ionization state distributions assuming collisional ionization equilibrium, but that will be for a different PR. The purpose of this PR is to set up how to store and access the ionization distributions. This will be discussed in #352. This will address some of #352. It will probably be best to wait until after the `0.1.0` release to merge this, since this PR is only for a partial implementation anyway. </issue> <code> [start of plasmapy/classes/sources/plasma3d.py] 1 """ 2 Defines the core Plasma class used by PlasmaPy to represent plasma properties. 3 """ 4 5 import numpy as np 6 import astropy.units as u 7 import itertools 8 9 from plasmapy.formulary.magnetostatics import MagnetoStatics 10 11 from astropy.constants import mu0 12 13 from plasmapy.classes import GenericPlasma 14 15 __all__ = [ 16 "Plasma3D" 17 ] 18 19 20 class Plasma3D(GenericPlasma): 21 """ 22 Core class for describing and calculating plasma parameters with 23 spatial dimensions. 24 25 Attributes 26 ---------- 27 x : `astropy.units.Quantity` 28 x-coordinates within the plasma domain. Equal to the 29 `domain_x` input parameter. 30 y : `astropy.units.Quantity` 31 y-coordinates within the plasma domain. Equal to the 32 `domain_y` input parameter. 33 z : `astropy.units.Quantity` 34 z-coordinates within the plasma domain. Equal to the 35 `domain_z` input parameter. 36 grid : `astropy.units.Quantity` 37 (3, x, y, z) array containing the values of each coordinate at 38 every point in the domain. 39 domain_shape : tuple 40 Shape of the plasma domain. 41 density : `astropy.units.Quantity` 42 (x, y, z) array of mass density at every point in the domain. 43 momentum : `astropy.units.Quantity` 44 (3, x, y, z) array of the momentum vector at every point in 45 the domain. 46 pressure : `astropy.units.Quantity` 47 (x, y, z) array of pressure at every point in the domain. 48 magnetic_field : `astropy.units.Quantity` 49 (3, x, y, z) array of the magnetic field vector at every point 50 in the domain. 51 52 Parameters 53 ---------- 54 domain_x : `astropy.units.Quantity` 55 1D array of x-coordinates for the plasma domain. Must have 56 units convertable to length. 57 domain_y : `astropy.units.Quantity` 58 1D array of y-coordinates for the plasma domain. Must have 59 units convertable to length. 60 domain_z : `astropy.units.Quantity` 61 1D array of z-coordinates for the plasma domain. Must have 62 units convertable to length. 63 64 """ 65 @u.quantity_input(domain_x=u.m, domain_y=u.m, domain_z=u.m) 66 def __init__(self, domain_x, domain_y, domain_z): 67 # Define domain sizes 68 self.x = domain_x 69 self.y = domain_y 70 self.z = domain_z 71 72 self.grid = np.array(np.meshgrid(self.x, self.y, self.z, 73 indexing='ij')) 74 self.domain_shape = (len(self.x), len(self.y), len(self.z)) 75 76 # Initiate core plasma variables 77 self.density = np.zeros(self.domain_shape) * u.kg / u.m**3 78 self.momentum = np.zeros((3, *self.domain_shape)) * u.kg / (u.m ** 2 * u.s) 79 self.pressure = np.zeros(self.domain_shape) * u.Pa 80 self.magnetic_field = np.zeros((3, *self.domain_shape)) * u.T 81 self.electric_field = np.zeros((3, *self.domain_shape)) * u.V / u.m 82 83 @property 84 def velocity(self): 85 return self.momentum / self.density 86 87 @property 88 def magnetic_field_strength(self): 89 B = self.magnetic_field 90 return np.sqrt(np.sum(B * B, axis=0)) 91 92 @property 93 def electric_field_strength(self): 94 E = self.electric_field 95 return np.sqrt(np.sum(E * E, axis=0)) 96 97 @property 98 def alfven_speed(self): 99 B = self.magnetic_field 100 rho = self.density 101 return np.sqrt(np.sum(B * B, axis=0) / (mu0 * rho)) 102 103 @classmethod 104 def is_datasource_for(cls, **kwargs): 105 if len(kwargs) == 3: 106 match = all(f'domain_{direction}' in kwargs.keys() for direction in 'xyz') 107 else: 108 match = False 109 return match 110 111 def add_magnetostatic(self, *mstats: MagnetoStatics): 112 # for each MagnetoStatic argument 113 for mstat in mstats: 114 # loop over 3D-index (ix,iy,iz) 115 for point_index in itertools.product(*[list(range(n)) for n in self.domain_shape]): 116 # get coordinate 117 p = self.grid[(slice(None),)+point_index] # function as [:, *index] 118 # calculate magnetic field at this point and add back 119 self.magnetic_field[(slice(None),)+point_index] += mstat.magnetic_field(p) 120 [end of plasmapy/classes/sources/plasma3d.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plasmapy/classes/sources/plasma3d.py b/plasmapy/classes/sources/plasma3d.py --- a/plasmapy/classes/sources/plasma3d.py +++ b/plasmapy/classes/sources/plasma3d.py @@ -110,9 +110,10 @@ def add_magnetostatic(self, *mstats: MagnetoStatics): # for each MagnetoStatic argument + prod = itertools.product(*[list(range(n)) for n in self.domain_shape]) for mstat in mstats: # loop over 3D-index (ix,iy,iz) - for point_index in itertools.product(*[list(range(n)) for n in self.domain_shape]): + for point_index in prod: # get coordinate p = self.grid[(slice(None),)+point_index] # function as [:, *index] # calculate magnetic field at this point and add back
{"golden_diff": "diff --git a/plasmapy/classes/sources/plasma3d.py b/plasmapy/classes/sources/plasma3d.py\n--- a/plasmapy/classes/sources/plasma3d.py\n+++ b/plasmapy/classes/sources/plasma3d.py\n@@ -110,9 +110,10 @@\n \n def add_magnetostatic(self, *mstats: MagnetoStatics):\n # for each MagnetoStatic argument\n+ prod = itertools.product(*[list(range(n)) for n in self.domain_shape])\n for mstat in mstats:\n # loop over 3D-index (ix,iy,iz)\n- for point_index in itertools.product(*[list(range(n)) for n in self.domain_shape]):\n+ for point_index in prod:\n # get coordinate\n p = self.grid[(slice(None),)+point_index] # function as [:, *index]\n # calculate magnetic field at this point and add back\n", "issue": "Create classes to represent ionization state distributions\nMy plan for this PR is to create classes to represent the ionization state distributions of one or more elements. I am going to add in a bunch of dunder methods like `__getitem__` and maybe `__call__` to help making access to the ionization states more straightfoward and intuitive. Any suggestions on the naming convention will be helpful so that we can maximize readability. \r\n\r\nEventually we'll need a way to calculate ionization state distributions assuming collisional ionization equilibrium, but that will be for a different PR. The purpose of this PR is to set up how to store and access the ionization distributions. This will be discussed in #352.\r\n\r\nThis will address some of #352. It will probably be best to wait until after the `0.1.0` release to merge this, since this PR is only for a partial implementation anyway.\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nDefines the core Plasma class used by PlasmaPy to represent plasma properties.\n\"\"\"\n\nimport numpy as np\nimport astropy.units as u\nimport itertools\n\nfrom plasmapy.formulary.magnetostatics import MagnetoStatics\n\nfrom astropy.constants import mu0\n\nfrom plasmapy.classes import GenericPlasma\n\n__all__ = [\n \"Plasma3D\"\n]\n\n\nclass Plasma3D(GenericPlasma):\n \"\"\"\n Core class for describing and calculating plasma parameters with\n spatial dimensions.\n\n Attributes\n ----------\n x : `astropy.units.Quantity`\n x-coordinates within the plasma domain. Equal to the\n `domain_x` input parameter.\n y : `astropy.units.Quantity`\n y-coordinates within the plasma domain. Equal to the\n `domain_y` input parameter.\n z : `astropy.units.Quantity`\n z-coordinates within the plasma domain. Equal to the\n `domain_z` input parameter.\n grid : `astropy.units.Quantity`\n (3, x, y, z) array containing the values of each coordinate at\n every point in the domain.\n domain_shape : tuple\n Shape of the plasma domain.\n density : `astropy.units.Quantity`\n (x, y, z) array of mass density at every point in the domain.\n momentum : `astropy.units.Quantity`\n (3, x, y, z) array of the momentum vector at every point in\n the domain.\n pressure : `astropy.units.Quantity`\n (x, y, z) array of pressure at every point in the domain.\n magnetic_field : `astropy.units.Quantity`\n (3, x, y, z) array of the magnetic field vector at every point\n in the domain.\n\n Parameters\n ----------\n domain_x : `astropy.units.Quantity`\n 1D array of x-coordinates for the plasma domain. Must have\n units convertable to length.\n domain_y : `astropy.units.Quantity`\n 1D array of y-coordinates for the plasma domain. Must have\n units convertable to length.\n domain_z : `astropy.units.Quantity`\n 1D array of z-coordinates for the plasma domain. Must have\n units convertable to length.\n\n \"\"\"\n @u.quantity_input(domain_x=u.m, domain_y=u.m, domain_z=u.m)\n def __init__(self, domain_x, domain_y, domain_z):\n # Define domain sizes\n self.x = domain_x\n self.y = domain_y\n self.z = domain_z\n\n self.grid = np.array(np.meshgrid(self.x, self.y, self.z,\n indexing='ij'))\n self.domain_shape = (len(self.x), len(self.y), len(self.z))\n\n # Initiate core plasma variables\n self.density = np.zeros(self.domain_shape) * u.kg / u.m**3\n self.momentum = np.zeros((3, *self.domain_shape)) * u.kg / (u.m ** 2 * u.s)\n self.pressure = np.zeros(self.domain_shape) * u.Pa\n self.magnetic_field = np.zeros((3, *self.domain_shape)) * u.T\n self.electric_field = np.zeros((3, *self.domain_shape)) * u.V / u.m\n\n @property\n def velocity(self):\n return self.momentum / self.density\n\n @property\n def magnetic_field_strength(self):\n B = self.magnetic_field\n return np.sqrt(np.sum(B * B, axis=0))\n\n @property\n def electric_field_strength(self):\n E = self.electric_field\n return np.sqrt(np.sum(E * E, axis=0))\n\n @property\n def alfven_speed(self):\n B = self.magnetic_field\n rho = self.density\n return np.sqrt(np.sum(B * B, axis=0) / (mu0 * rho))\n\n @classmethod\n def is_datasource_for(cls, **kwargs):\n if len(kwargs) == 3:\n match = all(f'domain_{direction}' in kwargs.keys() for direction in 'xyz')\n else:\n match = False\n return match\n\n def add_magnetostatic(self, *mstats: MagnetoStatics):\n # for each MagnetoStatic argument\n for mstat in mstats:\n # loop over 3D-index (ix,iy,iz)\n for point_index in itertools.product(*[list(range(n)) for n in self.domain_shape]):\n # get coordinate\n p = self.grid[(slice(None),)+point_index] # function as [:, *index]\n # calculate magnetic field at this point and add back\n self.magnetic_field[(slice(None),)+point_index] += mstat.magnetic_field(p)\n", "path": "plasmapy/classes/sources/plasma3d.py"}]}
2,033
209
gh_patches_debug_1994
rasdani/github-patches
git_diff
frappe__hrms-1526
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Organizational Chart: Total connections includes employees left ### Information about bug <img width="329" alt="Screenshot 2024-03-08 at 11 20 37 AM" src="https://github.com/frappe/hrms/assets/20027965/b88248f8-502e-41fa-ba1a-87c0cd43165a"> The current system displays a total count of connections for each employee, including those who are no longer with the company. However, when viewing the connections, only active employees are shown. **Expected Output:** The count now reflects only active employees, ensuring consistency between the number displayed and the individuals visible upon selecting any employee. ### Module HR ### Version ERPNext: v14.x.x-develop () (develop) Frappe Framework: v15.x.x-develop () (develop) Frappe HR: v16.0.0-dev (develop) ### Installation method manual install ### Relevant log output / Stack trace / Full Error Message. _No response_ ### Code of Conduct - [x] I agree to follow this project's Code of Conduct </issue> <code> [start of hrms/hr/page/organizational_chart/organizational_chart.py] 1 import frappe 2 from frappe.query_builder.functions import Count 3 4 5 @frappe.whitelist() 6 def get_children(parent=None, company=None, exclude_node=None): 7 filters = [["status", "=", "Active"]] 8 if company and company != "All Companies": 9 filters.append(["company", "=", company]) 10 11 if parent and company and parent != company: 12 filters.append(["reports_to", "=", parent]) 13 else: 14 filters.append(["reports_to", "=", ""]) 15 16 if exclude_node: 17 filters.append(["name", "!=", exclude_node]) 18 19 employees = frappe.get_all( 20 "Employee", 21 fields=[ 22 "employee_name as name", 23 "name as id", 24 "lft", 25 "rgt", 26 "reports_to", 27 "image", 28 "designation as title", 29 ], 30 filters=filters, 31 order_by="name", 32 ) 33 34 for employee in employees: 35 employee.connections = get_connections(employee.id, employee.lft, employee.rgt) 36 employee.expandable = bool(employee.connections) 37 38 return employees 39 40 41 def get_connections(employee: str, lft: int, rgt: int) -> int: 42 Employee = frappe.qb.DocType("Employee") 43 query = ( 44 frappe.qb.from_(Employee) 45 .select(Count(Employee.name)) 46 .where((Employee.lft > lft) & (Employee.rgt < rgt)) 47 ).run() 48 49 return query[0][0] 50 [end of hrms/hr/page/organizational_chart/organizational_chart.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/hrms/hr/page/organizational_chart/organizational_chart.py b/hrms/hr/page/organizational_chart/organizational_chart.py --- a/hrms/hr/page/organizational_chart/organizational_chart.py +++ b/hrms/hr/page/organizational_chart/organizational_chart.py @@ -43,7 +43,7 @@ query = ( frappe.qb.from_(Employee) .select(Count(Employee.name)) - .where((Employee.lft > lft) & (Employee.rgt < rgt)) + .where((Employee.lft > lft) & (Employee.rgt < rgt) & (Employee.status == "Active")) ).run() return query[0][0]
{"golden_diff": "diff --git a/hrms/hr/page/organizational_chart/organizational_chart.py b/hrms/hr/page/organizational_chart/organizational_chart.py\n--- a/hrms/hr/page/organizational_chart/organizational_chart.py\n+++ b/hrms/hr/page/organizational_chart/organizational_chart.py\n@@ -43,7 +43,7 @@\n \tquery = (\n \t\tfrappe.qb.from_(Employee)\n \t\t.select(Count(Employee.name))\n-\t\t.where((Employee.lft > lft) & (Employee.rgt < rgt))\n+\t\t.where((Employee.lft > lft) & (Employee.rgt < rgt) & (Employee.status == \"Active\"))\n \t).run()\n \n \treturn query[0][0]\n", "issue": "Organizational Chart: Total connections includes employees left\n### Information about bug\n\n<img width=\"329\" alt=\"Screenshot 2024-03-08 at 11 20 37\u202fAM\" src=\"https://github.com/frappe/hrms/assets/20027965/b88248f8-502e-41fa-ba1a-87c0cd43165a\">\r\n\r\nThe current system displays a total count of connections for each employee, including those who are no longer with the company. However, when viewing the connections, only active employees are shown.\r\n\r\n**Expected Output:**\r\nThe count now reflects only active employees, ensuring consistency between the number displayed and the individuals visible upon selecting any employee.\n\n### Module\n\nHR\n\n### Version\n\nERPNext: v14.x.x-develop () (develop)\r\n\r\nFrappe Framework: v15.x.x-develop () (develop)\r\n\r\nFrappe HR: v16.0.0-dev (develop)\n\n### Installation method\n\nmanual install\n\n### Relevant log output / Stack trace / Full Error Message.\n\n_No response_\n\n### Code of Conduct\n\n- [x] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "import frappe\nfrom frappe.query_builder.functions import Count\n\n\[email protected]()\ndef get_children(parent=None, company=None, exclude_node=None):\n\tfilters = [[\"status\", \"=\", \"Active\"]]\n\tif company and company != \"All Companies\":\n\t\tfilters.append([\"company\", \"=\", company])\n\n\tif parent and company and parent != company:\n\t\tfilters.append([\"reports_to\", \"=\", parent])\n\telse:\n\t\tfilters.append([\"reports_to\", \"=\", \"\"])\n\n\tif exclude_node:\n\t\tfilters.append([\"name\", \"!=\", exclude_node])\n\n\temployees = frappe.get_all(\n\t\t\"Employee\",\n\t\tfields=[\n\t\t\t\"employee_name as name\",\n\t\t\t\"name as id\",\n\t\t\t\"lft\",\n\t\t\t\"rgt\",\n\t\t\t\"reports_to\",\n\t\t\t\"image\",\n\t\t\t\"designation as title\",\n\t\t],\n\t\tfilters=filters,\n\t\torder_by=\"name\",\n\t)\n\n\tfor employee in employees:\n\t\temployee.connections = get_connections(employee.id, employee.lft, employee.rgt)\n\t\temployee.expandable = bool(employee.connections)\n\n\treturn employees\n\n\ndef get_connections(employee: str, lft: int, rgt: int) -> int:\n\tEmployee = frappe.qb.DocType(\"Employee\")\n\tquery = (\n\t\tfrappe.qb.from_(Employee)\n\t\t.select(Count(Employee.name))\n\t\t.where((Employee.lft > lft) & (Employee.rgt < rgt))\n\t).run()\n\n\treturn query[0][0]\n", "path": "hrms/hr/page/organizational_chart/organizational_chart.py"}]}
1,235
163
gh_patches_debug_11872
rasdani/github-patches
git_diff
googleapis__python-bigquery-465
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bound the maximum supported Python version Related: #462. The BigQuery library does not support Python 3.9 yet, as the `bqstorage` and `pandas` extras are blocked by the `pyarrow` dependency. Pyarrow [added](https://issues.apache.org/jira/browse/ARROW-10224) the Python 3.9 support, but the change has not been released yet (awaiting `pyarrow==3.0.0`). We need to reflect this in `setup.py`'s ``python_requires`. </issue> <code> [start of setup.py] 1 # Copyright 2018 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import io 16 import os 17 18 import setuptools 19 20 21 # Package metadata. 22 23 name = "google-cloud-bigquery" 24 description = "Google BigQuery API client library" 25 26 # Should be one of: 27 # 'Development Status :: 3 - Alpha' 28 # 'Development Status :: 4 - Beta' 29 # 'Development Status :: 5 - Production/Stable' 30 release_status = "Development Status :: 5 - Production/Stable" 31 dependencies = [ 32 "google-api-core[grpc] >= 1.23.0, < 2.0.0dev", 33 "proto-plus >= 1.10.0", 34 "google-cloud-core >= 1.4.1, < 2.0dev", 35 "google-resumable-media >= 0.6.0, < 2.0dev", 36 "six >=1.13.0,< 2.0.0dev", 37 "protobuf >= 3.12.0", 38 ] 39 extras = { 40 "bqstorage": [ 41 "google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev", 42 # Due to an issue in pip's dependency resolver, the `grpc` extra is not 43 # installed, even though `google-cloud-bigquery-storage` specifies it 44 # as `google-api-core[grpc]`. We thus need to explicitly specify it here. 45 # See: https://github.com/googleapis/python-bigquery/issues/83 The 46 # grpc.Channel.close() method isn't added until 1.32.0. 47 # https://github.com/grpc/grpc/pull/15254 48 "grpcio >= 1.32.0, < 2.0dev", 49 "pyarrow >= 1.0.0, < 3.0dev", 50 ], 51 "pandas": [ 52 "pandas>=0.23.0", 53 # pyarrow 1.0.0 is required for the use of timestamp_as_object keyword. 54 "pyarrow >= 1.0.0, < 3.0dev", 55 ], 56 "tqdm": ["tqdm >= 4.7.4, <5.0.0dev"], 57 "opentelemetry": [ 58 "opentelemetry-api==0.11b0", 59 "opentelemetry-sdk==0.11b0", 60 "opentelemetry-instrumentation==0.11b0", 61 ], 62 } 63 64 all_extras = [] 65 66 for extra in extras: 67 all_extras.extend(extras[extra]) 68 69 extras["all"] = all_extras 70 71 # Setup boilerplate below this line. 72 73 package_root = os.path.abspath(os.path.dirname(__file__)) 74 75 readme_filename = os.path.join(package_root, "README.rst") 76 with io.open(readme_filename, encoding="utf-8") as readme_file: 77 readme = readme_file.read() 78 79 version = {} 80 with open(os.path.join(package_root, "google/cloud/bigquery/version.py")) as fp: 81 exec(fp.read(), version) 82 version = version["__version__"] 83 84 # Only include packages under the 'google' namespace. Do not include tests, 85 # benchmarks, etc. 86 packages = [ 87 package 88 for package in setuptools.PEP420PackageFinder.find() 89 if package.startswith("google") 90 ] 91 92 # Determine which namespaces are needed. 93 namespaces = ["google"] 94 if "google.cloud" in packages: 95 namespaces.append("google.cloud") 96 97 98 setuptools.setup( 99 name=name, 100 version=version, 101 description=description, 102 long_description=readme, 103 author="Google LLC", 104 author_email="[email protected]", 105 license="Apache 2.0", 106 url="https://github.com/googleapis/python-bigquery", 107 classifiers=[ 108 release_status, 109 "Intended Audience :: Developers", 110 "License :: OSI Approved :: Apache Software License", 111 "Programming Language :: Python", 112 "Programming Language :: Python :: 3", 113 "Programming Language :: Python :: 3.6", 114 "Programming Language :: Python :: 3.7", 115 "Programming Language :: Python :: 3.8", 116 "Operating System :: OS Independent", 117 "Topic :: Internet", 118 ], 119 platforms="Posix; MacOS X; Windows", 120 packages=packages, 121 namespace_packages=namespaces, 122 install_requires=dependencies, 123 extras_require=extras, 124 python_requires=">=3.6", 125 include_package_data=True, 126 zip_safe=False, 127 ) 128 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -33,7 +33,6 @@ "proto-plus >= 1.10.0", "google-cloud-core >= 1.4.1, < 2.0dev", "google-resumable-media >= 0.6.0, < 2.0dev", - "six >=1.13.0,< 2.0.0dev", "protobuf >= 3.12.0", ] extras = { @@ -121,7 +120,7 @@ namespace_packages=namespaces, install_requires=dependencies, extras_require=extras, - python_requires=">=3.6", + python_requires=">=3.6, <3.9", include_package_data=True, zip_safe=False, )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,7 +33,6 @@\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n- \"six >=1.13.0,< 2.0.0dev\",\n \"protobuf >= 3.12.0\",\n ]\n extras = {\n@@ -121,7 +120,7 @@\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n- python_requires=\">=3.6\",\n+ python_requires=\">=3.6, <3.9\",\n include_package_data=True,\n zip_safe=False,\n )\n", "issue": "Bound the maximum supported Python version\nRelated: #462.\r\n\r\nThe BigQuery library does not support Python 3.9 yet, as the `bqstorage` and `pandas` extras are blocked by the `pyarrow` dependency. Pyarrow [added](https://issues.apache.org/jira/browse/ARROW-10224) the Python 3.9 support, but the change has not been released yet (awaiting `pyarrow==3.0.0`).\r\n\r\nWe need to reflect this in `setup.py`'s ``python_requires`.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.23.0, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"six >=1.13.0,< 2.0.0dev\",\n \"protobuf >= 3.12.0\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 3.0dev\",\n ],\n \"pandas\": [\n \"pandas>=0.23.0\",\n # pyarrow 1.0.0 is required for the use of timestamp_as_object keyword.\n \"pyarrow >= 1.0.0, < 3.0dev\",\n ],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api==0.11b0\",\n \"opentelemetry-sdk==0.11b0\",\n \"opentelemetry-instrumentation==0.11b0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}]}
2,010
194
gh_patches_debug_10629
rasdani/github-patches
git_diff
jumpserver__jumpserver-138
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> change(version num) change jumpserver version from 0.3.0 -> 0.3.1 </issue> <code> [start of jperm/models.py] 1 import datetime 2 3 from django.db import models 4 from jasset.models import Asset, AssetGroup 5 from juser.models import User, UserGroup 6 7 8 class PermLog(models.Model): 9 datetime = models.DateTimeField(auto_now_add=True) 10 action = models.CharField(max_length=100, null=True, blank=True, default='') 11 results = models.CharField(max_length=1000, null=True, blank=True, default='') 12 is_success = models.BooleanField(default=False) 13 is_finish = models.BooleanField(default=False) 14 15 16 class PermSudo(models.Model): 17 name = models.CharField(max_length=100, unique=True) 18 date_added = models.DateTimeField(auto_now=True) 19 commands = models.TextField() 20 comment = models.CharField(max_length=100, null=True, blank=True, default='') 21 22 def __unicode__(self): 23 return self.name 24 25 26 class PermRole(models.Model): 27 name = models.CharField(max_length=100, unique=True) 28 comment = models.CharField(max_length=100, null=True, blank=True, default='') 29 password = models.CharField(max_length=100) 30 key_path = models.CharField(max_length=100) 31 date_added = models.DateTimeField(auto_now=True) 32 sudo = models.ManyToManyField(PermSudo, related_name='perm_role') 33 34 def __unicode__(self): 35 return self.name 36 37 38 class PermRule(models.Model): 39 date_added = models.DateTimeField(auto_now=True) 40 name = models.CharField(max_length=100, unique=True) 41 comment = models.CharField(max_length=100) 42 asset = models.ManyToManyField(Asset, related_name='perm_rule') 43 asset_group = models.ManyToManyField(AssetGroup, related_name='perm_rule') 44 user = models.ManyToManyField(User, related_name='perm_rule') 45 user_group = models.ManyToManyField(UserGroup, related_name='perm_rule') 46 role = models.ManyToManyField(PermRole, related_name='perm_rule') 47 48 def __unicode__(self): 49 return self.name 50 51 52 class PermPush(models.Model): 53 asset = models.ForeignKey(Asset, related_name='perm_push') 54 role = models.ForeignKey(PermRole, related_name='perm_push') 55 is_public_key = models.BooleanField(default=False) 56 is_password = models.BooleanField(default=False) 57 success = models.BooleanField(default=False) 58 result = models.TextField(default='') 59 date_added = models.DateTimeField(auto_now=True) 60 61 [end of jperm/models.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/jperm/models.py b/jperm/models.py --- a/jperm/models.py +++ b/jperm/models.py @@ -26,7 +26,7 @@ class PermRole(models.Model): name = models.CharField(max_length=100, unique=True) comment = models.CharField(max_length=100, null=True, blank=True, default='') - password = models.CharField(max_length=100) + password = models.CharField(max_length=128) key_path = models.CharField(max_length=100) date_added = models.DateTimeField(auto_now=True) sudo = models.ManyToManyField(PermSudo, related_name='perm_role')
{"golden_diff": "diff --git a/jperm/models.py b/jperm/models.py\n--- a/jperm/models.py\n+++ b/jperm/models.py\n@@ -26,7 +26,7 @@\n class PermRole(models.Model):\n name = models.CharField(max_length=100, unique=True)\n comment = models.CharField(max_length=100, null=True, blank=True, default='')\n- password = models.CharField(max_length=100)\n+ password = models.CharField(max_length=128)\n key_path = models.CharField(max_length=100)\n date_added = models.DateTimeField(auto_now=True)\n sudo = models.ManyToManyField(PermSudo, related_name='perm_role')\n", "issue": "change(version num) change jumpserver version\nfrom 0.3.0 -> 0.3.1\n\n", "before_files": [{"content": "import datetime\n\nfrom django.db import models\nfrom jasset.models import Asset, AssetGroup\nfrom juser.models import User, UserGroup\n\n\nclass PermLog(models.Model):\n datetime = models.DateTimeField(auto_now_add=True)\n action = models.CharField(max_length=100, null=True, blank=True, default='')\n results = models.CharField(max_length=1000, null=True, blank=True, default='')\n is_success = models.BooleanField(default=False)\n is_finish = models.BooleanField(default=False)\n\n\nclass PermSudo(models.Model):\n name = models.CharField(max_length=100, unique=True)\n date_added = models.DateTimeField(auto_now=True)\n commands = models.TextField()\n comment = models.CharField(max_length=100, null=True, blank=True, default='')\n\n def __unicode__(self):\n return self.name\n\n\nclass PermRole(models.Model):\n name = models.CharField(max_length=100, unique=True)\n comment = models.CharField(max_length=100, null=True, blank=True, default='')\n password = models.CharField(max_length=100)\n key_path = models.CharField(max_length=100)\n date_added = models.DateTimeField(auto_now=True)\n sudo = models.ManyToManyField(PermSudo, related_name='perm_role')\n\n def __unicode__(self):\n return self.name\n\n\nclass PermRule(models.Model):\n date_added = models.DateTimeField(auto_now=True)\n name = models.CharField(max_length=100, unique=True)\n comment = models.CharField(max_length=100)\n asset = models.ManyToManyField(Asset, related_name='perm_rule')\n asset_group = models.ManyToManyField(AssetGroup, related_name='perm_rule')\n user = models.ManyToManyField(User, related_name='perm_rule')\n user_group = models.ManyToManyField(UserGroup, related_name='perm_rule')\n role = models.ManyToManyField(PermRole, related_name='perm_rule')\n\n def __unicode__(self):\n return self.name\n\n\nclass PermPush(models.Model):\n asset = models.ForeignKey(Asset, related_name='perm_push')\n role = models.ForeignKey(PermRole, related_name='perm_push')\n is_public_key = models.BooleanField(default=False)\n is_password = models.BooleanField(default=False)\n success = models.BooleanField(default=False)\n result = models.TextField(default='')\n date_added = models.DateTimeField(auto_now=True)\n\n", "path": "jperm/models.py"}]}
1,171
147
gh_patches_debug_9067
rasdani/github-patches
git_diff
nautobot__nautobot-5190
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add support for an OS Model to define target OS image per device ### Proposed Functionality Support a model to define OSs that should be used by each device and ensure there is also a relationship between vendor and platform. ### Use Case As a user, I should be able to define the intended OS or list of supported OSs per device. I should preferably be able to run search queries within the UI using greater than and less than (<>) operators to verify if my devices. ### Database Changes Yes, will require a new database model(s), as well as updates to Vendor and Platform models ### External Dependencies Maybe - need to explore external library(ies) for parsing idiosyncratic vendor OS version strings. </issue> <code> [start of nautobot/dcim/api/urls.py] 1 from nautobot.core.api.routers import OrderedDefaultRouter 2 3 from . import views 4 5 router = OrderedDefaultRouter() 6 router.APIRootView = views.DCIMRootView 7 8 # Locations 9 router.register("location-types", views.LocationTypeViewSet) 10 router.register("locations", views.LocationViewSet) 11 12 # Racks 13 router.register("rack-groups", views.RackGroupViewSet) 14 router.register("racks", views.RackViewSet) 15 router.register("rack-reservations", views.RackReservationViewSet) 16 17 # Device types 18 router.register("hardware-families", views.HardwareFamilyViewSet) 19 router.register("manufacturers", views.ManufacturerViewSet) 20 router.register("device-types", views.DeviceTypeViewSet) 21 22 # Device type components 23 router.register("console-port-templates", views.ConsolePortTemplateViewSet) 24 router.register("console-server-port-templates", views.ConsoleServerPortTemplateViewSet) 25 router.register("power-port-templates", views.PowerPortTemplateViewSet) 26 router.register("power-outlet-templates", views.PowerOutletTemplateViewSet) 27 router.register("interface-templates", views.InterfaceTemplateViewSet) 28 router.register("front-port-templates", views.FrontPortTemplateViewSet) 29 router.register("rear-port-templates", views.RearPortTemplateViewSet) 30 router.register("device-bay-templates", views.DeviceBayTemplateViewSet) 31 32 # Devices 33 router.register("platforms", views.PlatformViewSet) 34 router.register("devices", views.DeviceViewSet) 35 36 # Device components 37 router.register("console-ports", views.ConsolePortViewSet) 38 router.register("console-server-ports", views.ConsoleServerPortViewSet) 39 router.register("power-ports", views.PowerPortViewSet) 40 router.register("power-outlets", views.PowerOutletViewSet) 41 router.register("interfaces", views.InterfaceViewSet) 42 router.register("front-ports", views.FrontPortViewSet) 43 router.register("rear-ports", views.RearPortViewSet) 44 router.register("device-bays", views.DeviceBayViewSet) 45 router.register("inventory-items", views.InventoryItemViewSet) 46 47 # Connections 48 router.register("console-connections", views.ConsoleConnectionViewSet, basename="consoleconnections") 49 router.register("power-connections", views.PowerConnectionViewSet, basename="powerconnections") 50 router.register( 51 "interface-connections", 52 views.InterfaceConnectionViewSet, 53 basename="interfaceconnections", 54 ) 55 56 # Cables 57 router.register("cables", views.CableViewSet) 58 59 # Interface Redundancy Group 60 router.register("interface-redundancy-groups", views.InterfaceRedundancyGroupViewSet) 61 router.register("interface-redundancy-group-associations", views.InterfaceRedundancyGroupAssociationViewSet) 62 63 # Virtual chassis 64 router.register("virtual-chassis", views.VirtualChassisViewSet) 65 66 # Power 67 router.register("power-panels", views.PowerPanelViewSet) 68 router.register("power-feeds", views.PowerFeedViewSet) 69 70 # Device Redundancy Group 71 router.register("device-redundancy-groups", views.DeviceRedundancyGroupViewSet) 72 73 # Software images 74 router.register("software-images", views.SoftwareImageViewSet) 75 router.register("software-versions", views.SoftwareVersionViewSet) 76 router.register("device-type-to-software-image", views.DeviceTypeToSoftwareImageViewSet) 77 78 # Miscellaneous 79 router.register("connected-device", views.ConnectedDeviceViewSet, basename="connected-device") 80 81 app_name = "dcim-api" 82 urlpatterns = router.urls 83 [end of nautobot/dcim/api/urls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nautobot/dcim/api/urls.py b/nautobot/dcim/api/urls.py --- a/nautobot/dcim/api/urls.py +++ b/nautobot/dcim/api/urls.py @@ -73,7 +73,7 @@ # Software images router.register("software-images", views.SoftwareImageViewSet) router.register("software-versions", views.SoftwareVersionViewSet) -router.register("device-type-to-software-image", views.DeviceTypeToSoftwareImageViewSet) +router.register("device-types-to-software-images", views.DeviceTypeToSoftwareImageViewSet) # Miscellaneous router.register("connected-device", views.ConnectedDeviceViewSet, basename="connected-device")
{"golden_diff": "diff --git a/nautobot/dcim/api/urls.py b/nautobot/dcim/api/urls.py\n--- a/nautobot/dcim/api/urls.py\n+++ b/nautobot/dcim/api/urls.py\n@@ -73,7 +73,7 @@\n # Software images\n router.register(\"software-images\", views.SoftwareImageViewSet)\n router.register(\"software-versions\", views.SoftwareVersionViewSet)\n-router.register(\"device-type-to-software-image\", views.DeviceTypeToSoftwareImageViewSet)\n+router.register(\"device-types-to-software-images\", views.DeviceTypeToSoftwareImageViewSet)\n \n # Miscellaneous\n router.register(\"connected-device\", views.ConnectedDeviceViewSet, basename=\"connected-device\")\n", "issue": "Add support for an OS Model to define target OS image per device\n### Proposed Functionality\r\nSupport a model to define OSs that should be used by each device and ensure there is also a relationship between vendor and platform.\r\n\r\n### Use Case\r\nAs a user, I should be able to define the intended OS or list of supported OSs per device. I should preferably be able to run search queries within the UI using greater than and less than (<>) operators to verify if my devices.\r\n\r\n### Database Changes\r\nYes, will require a new database model(s), as well as updates to Vendor and Platform models\r\n\r\n### External Dependencies\r\nMaybe - need to explore external library(ies) for parsing idiosyncratic vendor OS version strings.\r\n\n", "before_files": [{"content": "from nautobot.core.api.routers import OrderedDefaultRouter\n\nfrom . import views\n\nrouter = OrderedDefaultRouter()\nrouter.APIRootView = views.DCIMRootView\n\n# Locations\nrouter.register(\"location-types\", views.LocationTypeViewSet)\nrouter.register(\"locations\", views.LocationViewSet)\n\n# Racks\nrouter.register(\"rack-groups\", views.RackGroupViewSet)\nrouter.register(\"racks\", views.RackViewSet)\nrouter.register(\"rack-reservations\", views.RackReservationViewSet)\n\n# Device types\nrouter.register(\"hardware-families\", views.HardwareFamilyViewSet)\nrouter.register(\"manufacturers\", views.ManufacturerViewSet)\nrouter.register(\"device-types\", views.DeviceTypeViewSet)\n\n# Device type components\nrouter.register(\"console-port-templates\", views.ConsolePortTemplateViewSet)\nrouter.register(\"console-server-port-templates\", views.ConsoleServerPortTemplateViewSet)\nrouter.register(\"power-port-templates\", views.PowerPortTemplateViewSet)\nrouter.register(\"power-outlet-templates\", views.PowerOutletTemplateViewSet)\nrouter.register(\"interface-templates\", views.InterfaceTemplateViewSet)\nrouter.register(\"front-port-templates\", views.FrontPortTemplateViewSet)\nrouter.register(\"rear-port-templates\", views.RearPortTemplateViewSet)\nrouter.register(\"device-bay-templates\", views.DeviceBayTemplateViewSet)\n\n# Devices\nrouter.register(\"platforms\", views.PlatformViewSet)\nrouter.register(\"devices\", views.DeviceViewSet)\n\n# Device components\nrouter.register(\"console-ports\", views.ConsolePortViewSet)\nrouter.register(\"console-server-ports\", views.ConsoleServerPortViewSet)\nrouter.register(\"power-ports\", views.PowerPortViewSet)\nrouter.register(\"power-outlets\", views.PowerOutletViewSet)\nrouter.register(\"interfaces\", views.InterfaceViewSet)\nrouter.register(\"front-ports\", views.FrontPortViewSet)\nrouter.register(\"rear-ports\", views.RearPortViewSet)\nrouter.register(\"device-bays\", views.DeviceBayViewSet)\nrouter.register(\"inventory-items\", views.InventoryItemViewSet)\n\n# Connections\nrouter.register(\"console-connections\", views.ConsoleConnectionViewSet, basename=\"consoleconnections\")\nrouter.register(\"power-connections\", views.PowerConnectionViewSet, basename=\"powerconnections\")\nrouter.register(\n \"interface-connections\",\n views.InterfaceConnectionViewSet,\n basename=\"interfaceconnections\",\n)\n\n# Cables\nrouter.register(\"cables\", views.CableViewSet)\n\n# Interface Redundancy Group\nrouter.register(\"interface-redundancy-groups\", views.InterfaceRedundancyGroupViewSet)\nrouter.register(\"interface-redundancy-group-associations\", views.InterfaceRedundancyGroupAssociationViewSet)\n\n# Virtual chassis\nrouter.register(\"virtual-chassis\", views.VirtualChassisViewSet)\n\n# Power\nrouter.register(\"power-panels\", views.PowerPanelViewSet)\nrouter.register(\"power-feeds\", views.PowerFeedViewSet)\n\n# Device Redundancy Group\nrouter.register(\"device-redundancy-groups\", views.DeviceRedundancyGroupViewSet)\n\n# Software images\nrouter.register(\"software-images\", views.SoftwareImageViewSet)\nrouter.register(\"software-versions\", views.SoftwareVersionViewSet)\nrouter.register(\"device-type-to-software-image\", views.DeviceTypeToSoftwareImageViewSet)\n\n# Miscellaneous\nrouter.register(\"connected-device\", views.ConnectedDeviceViewSet, basename=\"connected-device\")\n\napp_name = \"dcim-api\"\nurlpatterns = router.urls\n", "path": "nautobot/dcim/api/urls.py"}]}
1,531
148
gh_patches_debug_1490
rasdani/github-patches
git_diff
coala__coala-bears-2811
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> HgCommitBear: Make asciinema </issue> <code> [start of bears/vcs/mercurial/HgCommitBear.py] 1 import os 2 import shutil 3 4 from bears.vcs.CommitBear import _CommitBear 5 from coala_utils.ContextManagers import change_directory 6 from coalib.misc.Shell import run_shell_command 7 8 9 class HgCommitBear(_CommitBear): 10 LANGUAGES = {'Hg'} 11 CAN_DETECT = {'Formatting'} 12 13 @classmethod 14 def check_prerequisites(cls): 15 if shutil.which('hg') is None: 16 return 'hg is not installed.' 17 else: 18 return True 19 20 def get_remotes(): 21 remotes, _ = run_shell_command('hg paths') 22 return remotes 23 24 def get_head_commit(self): 25 with change_directory(self.get_config_dir() or os.getcwd()): 26 return run_shell_command('hg log -l 1 --template "{desc}"') 27 [end of bears/vcs/mercurial/HgCommitBear.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bears/vcs/mercurial/HgCommitBear.py b/bears/vcs/mercurial/HgCommitBear.py --- a/bears/vcs/mercurial/HgCommitBear.py +++ b/bears/vcs/mercurial/HgCommitBear.py @@ -9,6 +9,7 @@ class HgCommitBear(_CommitBear): LANGUAGES = {'Hg'} CAN_DETECT = {'Formatting'} + ASCIINEMA_URL = 'https://asciinema.org/a/3Kfn2EDjYLmsbPoL7lRuLyhlN' @classmethod def check_prerequisites(cls):
{"golden_diff": "diff --git a/bears/vcs/mercurial/HgCommitBear.py b/bears/vcs/mercurial/HgCommitBear.py\n--- a/bears/vcs/mercurial/HgCommitBear.py\n+++ b/bears/vcs/mercurial/HgCommitBear.py\n@@ -9,6 +9,7 @@\n class HgCommitBear(_CommitBear):\n LANGUAGES = {'Hg'}\n CAN_DETECT = {'Formatting'}\n+ ASCIINEMA_URL = 'https://asciinema.org/a/3Kfn2EDjYLmsbPoL7lRuLyhlN'\n \n @classmethod\n def check_prerequisites(cls):\n", "issue": "HgCommitBear: Make asciinema\n\n", "before_files": [{"content": "import os\nimport shutil\n\nfrom bears.vcs.CommitBear import _CommitBear\nfrom coala_utils.ContextManagers import change_directory\nfrom coalib.misc.Shell import run_shell_command\n\n\nclass HgCommitBear(_CommitBear):\n LANGUAGES = {'Hg'}\n CAN_DETECT = {'Formatting'}\n\n @classmethod\n def check_prerequisites(cls):\n if shutil.which('hg') is None:\n return 'hg is not installed.'\n else:\n return True\n\n def get_remotes():\n remotes, _ = run_shell_command('hg paths')\n return remotes\n\n def get_head_commit(self):\n with change_directory(self.get_config_dir() or os.getcwd()):\n return run_shell_command('hg log -l 1 --template \"{desc}\"')\n", "path": "bears/vcs/mercurial/HgCommitBear.py"}]}
773
147
gh_patches_debug_26289
rasdani/github-patches
git_diff
great-expectations__great_expectations-4506
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use cleaner solution for non-truncating division in python 2 Prefer `from __future__ import division` to `1.*x/y` </issue> <code> [start of examples/expectations/regex_based_column_map_expectation_template.py] 1 """ 2 This is a template for creating custom RegexBasedColumnMapExpectations. 3 For detailed instructions on how to use it, please see: 4 https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_regex_based_column_map_expectations 5 """ 6 7 from typing import Dict, Optional 8 9 from great_expectations.core.expectation_configuration import ExpectationConfiguration 10 from great_expectations.exceptions.exceptions import ( 11 InvalidExpectationConfigurationError, 12 ) 13 from great_expectations.expectations.regex_based_column_map_expectation import ( 14 RegexBasedColumnMapExpectation, 15 RegexColumnMapMetricProvider, 16 ) 17 18 19 # <snippet> 20 # This class defines the Expectation itself 21 class ExpectColumnValuesToMatchSomeRegex(RegexBasedColumnMapExpectation): 22 """TODO: Add a docstring here""" 23 24 # These values will be used to configure the metric created by your expectation 25 regex_snake_name = "regex_name" 26 regex_camel_name = "RegexName" 27 regex = "regex pattern" 28 semantic_type_name_plural = None 29 30 # These examples will be shown in the public gallery. 31 # They will also be executed as unit tests for your Expectation. 32 examples = [] 33 34 def validate_configuration(self, configuration: Optional[ExpectationConfiguration]): 35 """ 36 Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that 37 necessary configuration arguments have been provided for the validation of the expectation. 38 39 Args: 40 configuration (OPTIONAL[ExpectationConfiguration]): \ 41 An optional Expectation Configuration entry that will be used to configure the expectation 42 Returns: 43 True if the configuration has been validated successfully. Otherwise, raises an exception 44 """ 45 46 super().validate_configuration(configuration) 47 if configuration is None: 48 configuration = self.configuration 49 50 # # Check other things in configuration.kwargs and raise Exceptions if needed 51 # try: 52 # assert ( 53 # ... 54 # ), "message" 55 # assert ( 56 # ... 57 # ), "message" 58 # except AssertionError as e: 59 # raise InvalidExpectationConfigurationError(str(e)) 60 61 return True 62 63 # Here your regex is used to create a custom metric for this expectation 64 map_metric = RegexBasedColumnMapExpectation.register_metric( 65 regex_snake_name=regex_snake_name, 66 regex_camel_name=regex_camel_name, 67 regex_=regex, 68 ) 69 70 # This object contains metadata for display in the public Gallery 71 library_metadata = { 72 "tags": [], # Tags for this Expectation in the Gallery 73 "contributors": [ # Github handles for all contributors to this Expectation. 74 "@your_name_here", # Don't forget to add your github handle here! 75 ], 76 } 77 78 79 # </snippet> 80 if __name__ == "__main__": 81 ExpectColumnValuesToMatchSomeRegex().print_diagnostic_checklist() 82 [end of examples/expectations/regex_based_column_map_expectation_template.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/expectations/regex_based_column_map_expectation_template.py b/examples/expectations/regex_based_column_map_expectation_template.py --- a/examples/expectations/regex_based_column_map_expectation_template.py +++ b/examples/expectations/regex_based_column_map_expectation_template.py @@ -31,35 +31,6 @@ # They will also be executed as unit tests for your Expectation. examples = [] - def validate_configuration(self, configuration: Optional[ExpectationConfiguration]): - """ - Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that - necessary configuration arguments have been provided for the validation of the expectation. - - Args: - configuration (OPTIONAL[ExpectationConfiguration]): \ - An optional Expectation Configuration entry that will be used to configure the expectation - Returns: - True if the configuration has been validated successfully. Otherwise, raises an exception - """ - - super().validate_configuration(configuration) - if configuration is None: - configuration = self.configuration - - # # Check other things in configuration.kwargs and raise Exceptions if needed - # try: - # assert ( - # ... - # ), "message" - # assert ( - # ... - # ), "message" - # except AssertionError as e: - # raise InvalidExpectationConfigurationError(str(e)) - - return True - # Here your regex is used to create a custom metric for this expectation map_metric = RegexBasedColumnMapExpectation.register_metric( regex_snake_name=regex_snake_name,
{"golden_diff": "diff --git a/examples/expectations/regex_based_column_map_expectation_template.py b/examples/expectations/regex_based_column_map_expectation_template.py\n--- a/examples/expectations/regex_based_column_map_expectation_template.py\n+++ b/examples/expectations/regex_based_column_map_expectation_template.py\n@@ -31,35 +31,6 @@\n # They will also be executed as unit tests for your Expectation.\n examples = []\n \n- def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):\n- \"\"\"\n- Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that\n- necessary configuration arguments have been provided for the validation of the expectation.\n-\n- Args:\n- configuration (OPTIONAL[ExpectationConfiguration]): \\\n- An optional Expectation Configuration entry that will be used to configure the expectation\n- Returns:\n- True if the configuration has been validated successfully. Otherwise, raises an exception\n- \"\"\"\n-\n- super().validate_configuration(configuration)\n- if configuration is None:\n- configuration = self.configuration\n-\n- # # Check other things in configuration.kwargs and raise Exceptions if needed\n- # try:\n- # assert (\n- # ...\n- # ), \"message\"\n- # assert (\n- # ...\n- # ), \"message\"\n- # except AssertionError as e:\n- # raise InvalidExpectationConfigurationError(str(e))\n-\n- return True\n-\n # Here your regex is used to create a custom metric for this expectation\n map_metric = RegexBasedColumnMapExpectation.register_metric(\n regex_snake_name=regex_snake_name,\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "\"\"\"\nThis is a template for creating custom RegexBasedColumnMapExpectations.\nFor detailed instructions on how to use it, please see:\n https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_regex_based_column_map_expectations\n\"\"\"\n\nfrom typing import Dict, Optional\n\nfrom great_expectations.core.expectation_configuration import ExpectationConfiguration\nfrom great_expectations.exceptions.exceptions import (\n InvalidExpectationConfigurationError,\n)\nfrom great_expectations.expectations.regex_based_column_map_expectation import (\n RegexBasedColumnMapExpectation,\n RegexColumnMapMetricProvider,\n)\n\n\n# <snippet>\n# This class defines the Expectation itself\nclass ExpectColumnValuesToMatchSomeRegex(RegexBasedColumnMapExpectation):\n \"\"\"TODO: Add a docstring here\"\"\"\n\n # These values will be used to configure the metric created by your expectation\n regex_snake_name = \"regex_name\"\n regex_camel_name = \"RegexName\"\n regex = \"regex pattern\"\n semantic_type_name_plural = None\n\n # These examples will be shown in the public gallery.\n # They will also be executed as unit tests for your Expectation.\n examples = []\n\n def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):\n \"\"\"\n Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that\n necessary configuration arguments have been provided for the validation of the expectation.\n\n Args:\n configuration (OPTIONAL[ExpectationConfiguration]): \\\n An optional Expectation Configuration entry that will be used to configure the expectation\n Returns:\n True if the configuration has been validated successfully. Otherwise, raises an exception\n \"\"\"\n\n super().validate_configuration(configuration)\n if configuration is None:\n configuration = self.configuration\n\n # # Check other things in configuration.kwargs and raise Exceptions if needed\n # try:\n # assert (\n # ...\n # ), \"message\"\n # assert (\n # ...\n # ), \"message\"\n # except AssertionError as e:\n # raise InvalidExpectationConfigurationError(str(e))\n\n return True\n\n # Here your regex is used to create a custom metric for this expectation\n map_metric = RegexBasedColumnMapExpectation.register_metric(\n regex_snake_name=regex_snake_name,\n regex_camel_name=regex_camel_name,\n regex_=regex,\n )\n\n # This object contains metadata for display in the public Gallery\n library_metadata = {\n \"tags\": [], # Tags for this Expectation in the Gallery\n \"contributors\": [ # Github handles for all contributors to this Expectation.\n \"@your_name_here\", # Don't forget to add your github handle here!\n ],\n }\n\n\n# </snippet>\nif __name__ == \"__main__\":\n ExpectColumnValuesToMatchSomeRegex().print_diagnostic_checklist()\n", "path": "examples/expectations/regex_based_column_map_expectation_template.py"}]}
1,357
366
gh_patches_debug_51093
rasdani/github-patches
git_diff
ivy-llc__ivy-16195
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> argmax </issue> <code> [start of ivy/functional/frontends/paddle/tensor/tensor.py] 1 # local 2 import ivy 3 import ivy.functional.frontends.paddle as paddle_frontend 4 from ivy.functional.frontends.paddle.func_wrapper import ( 5 _to_ivy_array, 6 ) 7 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes 8 9 10 class Tensor: 11 def __init__(self, array, dtype=None, place="cpu", stop_gradient=True): 12 self._ivy_array = ( 13 ivy.array(array, dtype=dtype, device=place) 14 if not isinstance(array, ivy.Array) 15 else array 16 ) 17 self._dtype = dtype 18 self._place = place 19 self._stop_gradient = stop_gradient 20 21 def __repr__(self): 22 return ( 23 str(self._ivy_array.__repr__()) 24 .replace("ivy.array", "ivy.frontends.paddle.Tensor") 25 .replace("dev", "place") 26 ) 27 28 # Properties # 29 # ---------- # 30 31 @property 32 def ivy_array(self): 33 return self._ivy_array 34 35 @property 36 def place(self): 37 return self.ivy_array.device 38 39 @property 40 def dtype(self): 41 return self._ivy_array.dtype 42 43 @property 44 def shape(self): 45 return self._ivy_array.shape 46 47 @property 48 def ndim(self): 49 return self.dim() 50 51 # Setters # 52 # --------# 53 54 @ivy_array.setter 55 def ivy_array(self, array): 56 self._ivy_array = ( 57 ivy.array(array) if not isinstance(array, ivy.Array) else array 58 ) 59 60 # Special Methods # 61 # -------------------# 62 63 def __getitem__(self, item): 64 ivy_args = ivy.nested_map([self, item], _to_ivy_array) 65 ret = ivy.get_item(*ivy_args) 66 return paddle_frontend.Tensor(ret) 67 68 def __setitem__(self, item, value): 69 item, value = ivy.nested_map([item, value], _to_ivy_array) 70 self.ivy_array[item] = value 71 72 def __iter__(self): 73 if self.ndim == 0: 74 raise TypeError("iteration over a 0-d tensor not supported") 75 for i in range(self.shape[0]): 76 yield self[i] 77 78 # Instance Methods # 79 # ---------------- # 80 81 def reshape(self, *args, shape=None): 82 if args and shape: 83 raise TypeError("reshape() got multiple values for argument 'shape'") 84 if shape is not None: 85 return paddle_frontend.reshape(self._ivy_array, shape) 86 if args: 87 if isinstance(args[0], (tuple, list)): 88 shape = args[0] 89 return paddle_frontend.reshape(self._ivy_array, shape) 90 else: 91 return paddle_frontend.reshape(self._ivy_array, args) 92 return paddle_frontend.reshape(self._ivy_array) 93 94 def dim(self): 95 return self.ivy_array.ndim 96 97 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 98 def abs(self): 99 return paddle_frontend.abs(self) 100 101 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 102 def asinh(self, name=None): 103 return ivy.asinh(self._ivy_array) 104 105 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle") 106 def asin(self, name=None): 107 return ivy.asin(self._ivy_array) 108 109 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle") 110 def log(self, name=None): 111 return ivy.log(self._ivy_array) 112 113 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle") 114 def sin(self, name=None): 115 return ivy.sin(self._ivy_array) 116 117 [end of ivy/functional/frontends/paddle/tensor/tensor.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/paddle/tensor/tensor.py b/ivy/functional/frontends/paddle/tensor/tensor.py --- a/ivy/functional/frontends/paddle/tensor/tensor.py +++ b/ivy/functional/frontends/paddle/tensor/tensor.py @@ -114,3 +114,6 @@ def sin(self, name=None): return ivy.sin(self._ivy_array) + @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") + def argmax(self, axis=None, keepdim=False, dtype=None, name=None): + return ivy.argmax(self._ivy_array, axis=axis, keepdims=keepdim, dtype=dtype)
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/tensor.py b/ivy/functional/frontends/paddle/tensor/tensor.py\n--- a/ivy/functional/frontends/paddle/tensor/tensor.py\n+++ b/ivy/functional/frontends/paddle/tensor/tensor.py\n@@ -114,3 +114,6 @@\n def sin(self, name=None):\r\n return ivy.sin(self._ivy_array)\r\n \r\n+ @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n+ def argmax(self, axis=None, keepdim=False, dtype=None, name=None):\r\n+ return ivy.argmax(self._ivy_array, axis=axis, keepdims=keepdim, dtype=dtype)\n", "issue": "argmax\n\n", "before_files": [{"content": "# local\r\nimport ivy\r\nimport ivy.functional.frontends.paddle as paddle_frontend\r\nfrom ivy.functional.frontends.paddle.func_wrapper import (\r\n _to_ivy_array,\r\n)\r\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\r\n\r\n\r\nclass Tensor:\r\n def __init__(self, array, dtype=None, place=\"cpu\", stop_gradient=True):\r\n self._ivy_array = (\r\n ivy.array(array, dtype=dtype, device=place)\r\n if not isinstance(array, ivy.Array)\r\n else array\r\n )\r\n self._dtype = dtype\r\n self._place = place\r\n self._stop_gradient = stop_gradient\r\n\r\n def __repr__(self):\r\n return (\r\n str(self._ivy_array.__repr__())\r\n .replace(\"ivy.array\", \"ivy.frontends.paddle.Tensor\")\r\n .replace(\"dev\", \"place\")\r\n )\r\n\r\n # Properties #\r\n # ---------- #\r\n\r\n @property\r\n def ivy_array(self):\r\n return self._ivy_array\r\n\r\n @property\r\n def place(self):\r\n return self.ivy_array.device\r\n\r\n @property\r\n def dtype(self):\r\n return self._ivy_array.dtype\r\n\r\n @property\r\n def shape(self):\r\n return self._ivy_array.shape\r\n\r\n @property\r\n def ndim(self):\r\n return self.dim()\r\n\r\n # Setters #\r\n # --------#\r\n\r\n @ivy_array.setter\r\n def ivy_array(self, array):\r\n self._ivy_array = (\r\n ivy.array(array) if not isinstance(array, ivy.Array) else array\r\n )\r\n\r\n # Special Methods #\r\n # -------------------#\r\n\r\n def __getitem__(self, item):\r\n ivy_args = ivy.nested_map([self, item], _to_ivy_array)\r\n ret = ivy.get_item(*ivy_args)\r\n return paddle_frontend.Tensor(ret)\r\n\r\n def __setitem__(self, item, value):\r\n item, value = ivy.nested_map([item, value], _to_ivy_array)\r\n self.ivy_array[item] = value\r\n\r\n def __iter__(self):\r\n if self.ndim == 0:\r\n raise TypeError(\"iteration over a 0-d tensor not supported\")\r\n for i in range(self.shape[0]):\r\n yield self[i]\r\n\r\n # Instance Methods #\r\n # ---------------- #\r\n\r\n def reshape(self, *args, shape=None):\r\n if args and shape:\r\n raise TypeError(\"reshape() got multiple values for argument 'shape'\")\r\n if shape is not None:\r\n return paddle_frontend.reshape(self._ivy_array, shape)\r\n if args:\r\n if isinstance(args[0], (tuple, list)):\r\n shape = args[0]\r\n return paddle_frontend.reshape(self._ivy_array, shape)\r\n else:\r\n return paddle_frontend.reshape(self._ivy_array, args)\r\n return paddle_frontend.reshape(self._ivy_array)\r\n\r\n def dim(self):\r\n return self.ivy_array.ndim\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def abs(self):\r\n return paddle_frontend.abs(self)\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def asinh(self, name=None):\r\n return ivy.asinh(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def asin(self, name=None):\r\n return ivy.asin(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def log(self, name=None):\r\n return ivy.log(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def sin(self, name=None):\r\n return ivy.sin(self._ivy_array)\r\n\r\n", "path": "ivy/functional/frontends/paddle/tensor/tensor.py"}]}
1,670
176
gh_patches_debug_10275
rasdani/github-patches
git_diff
mindsdb__lightwood-524
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `sktime` missing dependency In requirements.txt `sktime[arima] >= 0.5.0` bat `(mindsdb) C:\Users\User>pip install sktime[arima]>=0.5.0 WARNING: sktime 0.7.0 does not provide the extra 'arima'` install `(mindsdb) C:\Users\User>pip install sktime[arima]==0.5.0` `sktime` missing dependency In requirements.txt `sktime[arima] >= 0.5.0` bat `(mindsdb) C:\Users\User>pip install sktime[arima]>=0.5.0 WARNING: sktime 0.7.0 does not provide the extra 'arima'` install `(mindsdb) C:\Users\User>pip install sktime[arima]==0.5.0` </issue> <code> [start of lightwood/model/sktime.py] 1 import numpy as np 2 import pandas as pd 3 from typing import Dict, List, Union 4 from sktime.forecasting.arima import AutoARIMA 5 6 from lightwood.api import dtype 7 from lightwood.helpers.log import log 8 from lightwood.model.base import BaseModel 9 from lightwood.encoder.time_series.helpers.common import get_group_matches 10 from lightwood.data.encoded_ds import EncodedDs, ConcatedEncodedDs 11 12 13 class SkTime(BaseModel): 14 forecaster: str 15 n_ts_predictions: int 16 target: str 17 supports_proba: bool 18 19 def __init__( 20 self, stop_after: int, target: str, dtype_dict: Dict[str, str], 21 n_ts_predictions: int, ts_analysis: Dict): 22 super().__init__(stop_after) 23 self.target = target 24 dtype_dict[target] = dtype.float 25 self.model_class = AutoARIMA 26 self.models = {} 27 self.n_ts_predictions = n_ts_predictions 28 self.ts_analysis = ts_analysis 29 self.forecasting_horizon = np.arange(1, self.n_ts_predictions) 30 self.cutoff_index = {} # marks index at which training data stops and forecasting window starts 31 self.grouped_by = ['__default'] if not ts_analysis['tss'].group_by else ts_analysis['tss'].group_by 32 self.supports_proba = False 33 self.stable = True 34 35 def fit(self, ds_arr: List[EncodedDs]) -> None: 36 log.info('Started fitting sktime forecaster for array prediction') 37 38 all_folds = ConcatedEncodedDs(ds_arr) 39 df = all_folds.data_frame.sort_values(by=f'__mdb_original_{self.ts_analysis["tss"].order_by[0]}') 40 data = {'data': df[self.target], 41 'group_info': {gcol: df[gcol].tolist() 42 for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}} 43 44 for group in self.ts_analysis['group_combinations']: 45 self.models[group] = self.model_class() 46 47 if self.grouped_by == ['__default']: 48 series_idxs = data['data'].index 49 series_data = data['data'].values 50 else: 51 series_idxs, series_data = get_group_matches(data, group) 52 53 if series_data.size > 0: 54 series = pd.Series(series_data.squeeze(), index=series_idxs) 55 series = series.sort_index(ascending=True) 56 series = series.reset_index(drop=True) 57 try: 58 self.models[group].fit(series) 59 except ValueError: 60 self.models[group] = self.model_class(deseasonalize=False) 61 self.models[group].fit(series) 62 63 self.cutoff_index[group] = len(series) 64 65 if self.grouped_by == ['__default']: 66 break 67 68 def __call__(self, ds: Union[EncodedDs, ConcatedEncodedDs], predict_proba: bool = False) -> pd.DataFrame: 69 if predict_proba: 70 log.warning('This model does not output probability estimates') 71 72 length = sum(ds.encoded_ds_lenghts) if isinstance(ds, ConcatedEncodedDs) else len(ds) 73 ydf = pd.DataFrame(0, # zero-filled 74 index=np.arange(length), 75 columns=['prediction'], 76 dtype=object) 77 78 data = {'data': ds.data_frame[self.target].reset_index(drop=True), 79 'group_info': {gcol: ds.data_frame[gcol].tolist() 80 for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}} 81 82 # all_idxs = list(range(length)) # @TODO: substract, and assign empty predictions to remainder 83 84 for group in self.ts_analysis['group_combinations']: 85 86 if self.grouped_by == ['__default']: 87 series_idxs = data['data'].index 88 series_data = data['data'].values 89 else: 90 series_idxs, series_data = get_group_matches(data, group) 91 92 if series_data.size > 0: 93 series = pd.Series(series_data.squeeze(), index=series_idxs) 94 series = series.sort_index(ascending=True) 95 series = series.reset_index(drop=True) 96 97 for idx, _ in enumerate(series.iteritems()): 98 ydf['prediction'].iloc[series_idxs[idx]] = self.models[group].predict( 99 np.arange(idx, # +cutoff 100 idx + self.n_ts_predictions)).tolist() # +cutoff 101 102 if self.grouped_by == ['__default']: 103 break 104 105 return ydf[['prediction']] 106 [end of lightwood/model/sktime.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lightwood/model/sktime.py b/lightwood/model/sktime.py --- a/lightwood/model/sktime.py +++ b/lightwood/model/sktime.py @@ -42,7 +42,8 @@ for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}} for group in self.ts_analysis['group_combinations']: - self.models[group] = self.model_class() + # many warnings might be thrown inside of statsmodels during stepwise procedure + self.models[group] = self.model_class(suppress_warnings=True) if self.grouped_by == ['__default']: series_idxs = data['data'].index
{"golden_diff": "diff --git a/lightwood/model/sktime.py b/lightwood/model/sktime.py\n--- a/lightwood/model/sktime.py\n+++ b/lightwood/model/sktime.py\n@@ -42,7 +42,8 @@\n for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}}\n \n for group in self.ts_analysis['group_combinations']:\n- self.models[group] = self.model_class()\n+ # many warnings might be thrown inside of statsmodels during stepwise procedure\n+ self.models[group] = self.model_class(suppress_warnings=True)\n \n if self.grouped_by == ['__default']:\n series_idxs = data['data'].index\n", "issue": "`sktime` missing dependency\nIn requirements.txt\r\n`sktime[arima] >= 0.5.0`\r\nbat \r\n`(mindsdb) C:\\Users\\User>pip install sktime[arima]>=0.5.0\r\nWARNING: sktime 0.7.0 does not provide the extra 'arima'`\r\ninstall\r\n`(mindsdb) C:\\Users\\User>pip install sktime[arima]==0.5.0`\n`sktime` missing dependency\nIn requirements.txt\r\n`sktime[arima] >= 0.5.0`\r\nbat \r\n`(mindsdb) C:\\Users\\User>pip install sktime[arima]>=0.5.0\r\nWARNING: sktime 0.7.0 does not provide the extra 'arima'`\r\ninstall\r\n`(mindsdb) C:\\Users\\User>pip install sktime[arima]==0.5.0`\n", "before_files": [{"content": "import numpy as np\nimport pandas as pd\nfrom typing import Dict, List, Union\nfrom sktime.forecasting.arima import AutoARIMA\n\nfrom lightwood.api import dtype\nfrom lightwood.helpers.log import log\nfrom lightwood.model.base import BaseModel\nfrom lightwood.encoder.time_series.helpers.common import get_group_matches\nfrom lightwood.data.encoded_ds import EncodedDs, ConcatedEncodedDs\n\n\nclass SkTime(BaseModel):\n forecaster: str\n n_ts_predictions: int\n target: str\n supports_proba: bool\n\n def __init__(\n self, stop_after: int, target: str, dtype_dict: Dict[str, str],\n n_ts_predictions: int, ts_analysis: Dict):\n super().__init__(stop_after)\n self.target = target\n dtype_dict[target] = dtype.float\n self.model_class = AutoARIMA\n self.models = {}\n self.n_ts_predictions = n_ts_predictions\n self.ts_analysis = ts_analysis\n self.forecasting_horizon = np.arange(1, self.n_ts_predictions)\n self.cutoff_index = {} # marks index at which training data stops and forecasting window starts\n self.grouped_by = ['__default'] if not ts_analysis['tss'].group_by else ts_analysis['tss'].group_by\n self.supports_proba = False\n self.stable = True\n\n def fit(self, ds_arr: List[EncodedDs]) -> None:\n log.info('Started fitting sktime forecaster for array prediction')\n\n all_folds = ConcatedEncodedDs(ds_arr)\n df = all_folds.data_frame.sort_values(by=f'__mdb_original_{self.ts_analysis[\"tss\"].order_by[0]}')\n data = {'data': df[self.target],\n 'group_info': {gcol: df[gcol].tolist()\n for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}}\n\n for group in self.ts_analysis['group_combinations']:\n self.models[group] = self.model_class()\n\n if self.grouped_by == ['__default']:\n series_idxs = data['data'].index\n series_data = data['data'].values\n else:\n series_idxs, series_data = get_group_matches(data, group)\n\n if series_data.size > 0:\n series = pd.Series(series_data.squeeze(), index=series_idxs)\n series = series.sort_index(ascending=True)\n series = series.reset_index(drop=True)\n try:\n self.models[group].fit(series)\n except ValueError:\n self.models[group] = self.model_class(deseasonalize=False)\n self.models[group].fit(series)\n\n self.cutoff_index[group] = len(series)\n\n if self.grouped_by == ['__default']:\n break\n\n def __call__(self, ds: Union[EncodedDs, ConcatedEncodedDs], predict_proba: bool = False) -> pd.DataFrame:\n if predict_proba:\n log.warning('This model does not output probability estimates')\n\n length = sum(ds.encoded_ds_lenghts) if isinstance(ds, ConcatedEncodedDs) else len(ds)\n ydf = pd.DataFrame(0, # zero-filled\n index=np.arange(length),\n columns=['prediction'],\n dtype=object)\n\n data = {'data': ds.data_frame[self.target].reset_index(drop=True),\n 'group_info': {gcol: ds.data_frame[gcol].tolist()\n for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}}\n\n # all_idxs = list(range(length)) # @TODO: substract, and assign empty predictions to remainder\n\n for group in self.ts_analysis['group_combinations']:\n\n if self.grouped_by == ['__default']:\n series_idxs = data['data'].index\n series_data = data['data'].values\n else:\n series_idxs, series_data = get_group_matches(data, group)\n\n if series_data.size > 0:\n series = pd.Series(series_data.squeeze(), index=series_idxs)\n series = series.sort_index(ascending=True)\n series = series.reset_index(drop=True)\n\n for idx, _ in enumerate(series.iteritems()):\n ydf['prediction'].iloc[series_idxs[idx]] = self.models[group].predict(\n np.arange(idx, # +cutoff\n idx + self.n_ts_predictions)).tolist() # +cutoff\n\n if self.grouped_by == ['__default']:\n break\n\n return ydf[['prediction']]\n", "path": "lightwood/model/sktime.py"}]}
1,914
155
gh_patches_debug_26823
rasdani/github-patches
git_diff
ydataai__ydata-profiling-67
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add badges for "highly skewed", "zeros" I enjoy the badges on the report and am happy to submit a PR for this if it isn't being worked on elsewhere. </issue> <code> [start of pandas_profiling/templates.py] 1 # coding=UTF-8 2 3 '''This file contains all templates used for generating the HTML profile report''' 4 5 from jinja2 import Environment, PackageLoader 6 7 # Initializing Jinja 8 pl = PackageLoader('pandas_profiling', 'templates') 9 jinja2_env = Environment(lstrip_blocks=True, trim_blocks=True, loader=pl) 10 11 # Mapping between template name and file 12 templates = {'freq_table_row': 'freq_table_row.html', 13 'mini_freq_table_row': 'mini_freq_table_row.html', 14 'freq_table': 'freq_table.html', 15 'mini_freq_table': 'mini_freq_table.html', 16 'row_num': 'row_num.html', 17 'row_date': 'row_date.html', 18 'row_cat': 'row_cat.html', 19 'row_bool': 'row_bool.html', 20 'row_corr': 'row_corr.html', 21 'row_recoded': 'row_recoded.html', 22 'row_const': 'row_const.html', 23 'row_unique': 'row_unique.html', 24 'overview': 'overview.html', 25 'sample': 'sample.html', 26 'base': 'base.html', 27 'wrapper': 'wrapper.html' 28 } 29 30 # Mapping between row type and var type 31 var_type = {'NUM': 'Numeric', 32 'DATE': 'Date', 33 'CAT': 'Categorical', 34 'UNIQUE': 'Categorical, Unique', 35 'BOOL': 'Boolean', 36 'CONST': 'Constant', 37 'CORR': 'Highly correlated', 38 'RECODED': 'Recoded' 39 } 40 41 42 def template(template_name): 43 """Return a jinja template ready for rendering. If needed, global variables are initialized. 44 45 Parameters 46 ---------- 47 template_name: str, the name of the template as defined in the templates mapping 48 49 Returns 50 ------- 51 The Jinja template ready for rendering 52 """ 53 globals = None 54 if template_name.startswith('row_'): 55 # This is a row template setting global variable 56 globals = dict() 57 globals['vartype'] = var_type[template_name.split('_')[1].upper()] 58 return jinja2_env.get_template(templates[template_name], globals=globals) 59 60 61 # mapping between row type and template name 62 row_templates_dict = {'NUM': template('row_num'), 63 'DATE': template('row_date'), 64 'DISCRETE': template('row_num'), 65 'CAT': template('row_cat'), 66 'BOOL': template('row_bool'), 67 'UNIQUE': template('row_unique'), 68 'CONST': template('row_const'), 69 'CORR': template('row_corr'), 70 'RECODED': template('row_recoded') 71 } 72 73 # The number of column to use in the display of the frequency table according to the category 74 mini_freq_table_nb_col = {'CAT': 6, 'BOOL': 3} 75 76 messages = dict() 77 messages['CONST'] = u'{0[varname]} has constant value {0[mode]} <span class="label label-primary">Rejected</span>' 78 messages['CORR'] = u'{0[varname]} is highly correlated with {0[correlation_var]} (ρ = {0[correlation]}) <span class="label label-primary">Rejected</span>' 79 messages['RECODED'] = u'{0[varname]} is a recoding of {0[correlation_var]} <span class="label label-primary">Rejected</span>' 80 messages['HIGH_CARDINALITY'] = u'{varname} has a high cardinality: {0[distinct_count]} distinct values <span class="label label-warning">Warning</span>' 81 messages['n_duplicates'] = u'Dataset has {0[n_duplicates]} duplicate rows <span class="label label-warning">Warning</span>' 82 messages['skewness'] = u'{varname} is highly skewed (γ1 = {0[skewness]})' 83 messages['p_missing'] = u'{varname} has {0[n_missing]} / {0[p_missing]} missing values <span class="label label-default">Missing</span>' 84 messages['p_infinite'] = u'{varname} has {0[n_infinite]} / {0[p_infinite]} infinite values <span class="label label-default">Infinite</span>' 85 messages['p_zeros'] = u'{varname} has {0[n_zeros]} / {0[p_zeros]} zeros' 86 87 message_row = u'<li>{message}</li>' 88 [end of pandas_profiling/templates.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pandas_profiling/templates.py b/pandas_profiling/templates.py --- a/pandas_profiling/templates.py +++ b/pandas_profiling/templates.py @@ -79,9 +79,9 @@ messages['RECODED'] = u'{0[varname]} is a recoding of {0[correlation_var]} <span class="label label-primary">Rejected</span>' messages['HIGH_CARDINALITY'] = u'{varname} has a high cardinality: {0[distinct_count]} distinct values <span class="label label-warning">Warning</span>' messages['n_duplicates'] = u'Dataset has {0[n_duplicates]} duplicate rows <span class="label label-warning">Warning</span>' -messages['skewness'] = u'{varname} is highly skewed (γ1 = {0[skewness]})' +messages['skewness'] = u'{varname} is highly skewed (γ1 = {0[skewness]}) <span class="label label-info">Skewed</span>' messages['p_missing'] = u'{varname} has {0[n_missing]} / {0[p_missing]} missing values <span class="label label-default">Missing</span>' messages['p_infinite'] = u'{varname} has {0[n_infinite]} / {0[p_infinite]} infinite values <span class="label label-default">Infinite</span>' -messages['p_zeros'] = u'{varname} has {0[n_zeros]} / {0[p_zeros]} zeros' +messages['p_zeros'] = u'{varname} has {0[n_zeros]} / {0[p_zeros]} zeros <span class="label label-info">Zeros</span>' message_row = u'<li>{message}</li>'
{"golden_diff": "diff --git a/pandas_profiling/templates.py b/pandas_profiling/templates.py\n--- a/pandas_profiling/templates.py\n+++ b/pandas_profiling/templates.py\n@@ -79,9 +79,9 @@\n messages['RECODED'] = u'{0[varname]} is a recoding of {0[correlation_var]} <span class=\"label label-primary\">Rejected</span>'\n messages['HIGH_CARDINALITY'] = u'{varname} has a high cardinality: {0[distinct_count]} distinct values <span class=\"label label-warning\">Warning</span>'\n messages['n_duplicates'] = u'Dataset has {0[n_duplicates]} duplicate rows <span class=\"label label-warning\">Warning</span>'\n-messages['skewness'] = u'{varname} is highly skewed (\u03b31 = {0[skewness]})'\n+messages['skewness'] = u'{varname} is highly skewed (\u03b31 = {0[skewness]}) <span class=\"label label-info\">Skewed</span>'\n messages['p_missing'] = u'{varname} has {0[n_missing]} / {0[p_missing]} missing values <span class=\"label label-default\">Missing</span>'\n messages['p_infinite'] = u'{varname} has {0[n_infinite]} / {0[p_infinite]} infinite values <span class=\"label label-default\">Infinite</span>'\n-messages['p_zeros'] = u'{varname} has {0[n_zeros]} / {0[p_zeros]} zeros'\n+messages['p_zeros'] = u'{varname} has {0[n_zeros]} / {0[p_zeros]} zeros <span class=\"label label-info\">Zeros</span>'\n \n message_row = u'<li>{message}</li>'\n", "issue": "Add badges for \"highly skewed\", \"zeros\"\nI enjoy the badges on the report and am happy to submit a PR for this if it isn't being worked on elsewhere.\n", "before_files": [{"content": "# coding=UTF-8\n\n'''This file contains all templates used for generating the HTML profile report'''\n\nfrom jinja2 import Environment, PackageLoader\n\n# Initializing Jinja\npl = PackageLoader('pandas_profiling', 'templates')\njinja2_env = Environment(lstrip_blocks=True, trim_blocks=True, loader=pl)\n\n# Mapping between template name and file\ntemplates = {'freq_table_row': 'freq_table_row.html',\n 'mini_freq_table_row': 'mini_freq_table_row.html',\n 'freq_table': 'freq_table.html',\n 'mini_freq_table': 'mini_freq_table.html',\n 'row_num': 'row_num.html',\n 'row_date': 'row_date.html',\n 'row_cat': 'row_cat.html',\n 'row_bool': 'row_bool.html',\n 'row_corr': 'row_corr.html',\n 'row_recoded': 'row_recoded.html',\n 'row_const': 'row_const.html',\n 'row_unique': 'row_unique.html',\n 'overview': 'overview.html',\n 'sample': 'sample.html',\n 'base': 'base.html',\n 'wrapper': 'wrapper.html'\n }\n\n# Mapping between row type and var type\nvar_type = {'NUM': 'Numeric',\n 'DATE': 'Date',\n 'CAT': 'Categorical',\n 'UNIQUE': 'Categorical, Unique',\n 'BOOL': 'Boolean',\n 'CONST': 'Constant',\n 'CORR': 'Highly correlated',\n 'RECODED': 'Recoded'\n }\n\n\ndef template(template_name):\n \"\"\"Return a jinja template ready for rendering. If needed, global variables are initialized.\n\n Parameters\n ----------\n template_name: str, the name of the template as defined in the templates mapping\n\n Returns\n -------\n The Jinja template ready for rendering\n \"\"\"\n globals = None\n if template_name.startswith('row_'):\n # This is a row template setting global variable\n globals = dict()\n globals['vartype'] = var_type[template_name.split('_')[1].upper()]\n return jinja2_env.get_template(templates[template_name], globals=globals)\n\n\n# mapping between row type and template name\nrow_templates_dict = {'NUM': template('row_num'),\n 'DATE': template('row_date'),\n 'DISCRETE': template('row_num'),\n 'CAT': template('row_cat'),\n 'BOOL': template('row_bool'),\n 'UNIQUE': template('row_unique'),\n 'CONST': template('row_const'),\n 'CORR': template('row_corr'),\n 'RECODED': template('row_recoded')\n }\n\n# The number of column to use in the display of the frequency table according to the category\nmini_freq_table_nb_col = {'CAT': 6, 'BOOL': 3}\n\nmessages = dict()\nmessages['CONST'] = u'{0[varname]} has constant value {0[mode]} <span class=\"label label-primary\">Rejected</span>'\nmessages['CORR'] = u'{0[varname]} is highly correlated with {0[correlation_var]} (\u03c1 = {0[correlation]}) <span class=\"label label-primary\">Rejected</span>'\nmessages['RECODED'] = u'{0[varname]} is a recoding of {0[correlation_var]} <span class=\"label label-primary\">Rejected</span>'\nmessages['HIGH_CARDINALITY'] = u'{varname} has a high cardinality: {0[distinct_count]} distinct values <span class=\"label label-warning\">Warning</span>'\nmessages['n_duplicates'] = u'Dataset has {0[n_duplicates]} duplicate rows <span class=\"label label-warning\">Warning</span>'\nmessages['skewness'] = u'{varname} is highly skewed (\u03b31 = {0[skewness]})'\nmessages['p_missing'] = u'{varname} has {0[n_missing]} / {0[p_missing]} missing values <span class=\"label label-default\">Missing</span>'\nmessages['p_infinite'] = u'{varname} has {0[n_infinite]} / {0[p_infinite]} infinite values <span class=\"label label-default\">Infinite</span>'\nmessages['p_zeros'] = u'{varname} has {0[n_zeros]} / {0[p_zeros]} zeros'\n\nmessage_row = u'<li>{message}</li>'\n", "path": "pandas_profiling/templates.py"}]}
1,671
378
gh_patches_debug_29181
rasdani/github-patches
git_diff
liqd__a4-meinberlin-2155
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> project_list in container add project form contains all projects @CarolingerSeilchenspringer at the moment all projects (not only the ones belonging to the organisation) can be added to a container. Should that stay that way? </issue> <code> [start of meinberlin/apps/projectcontainers/forms.py] 1 from django.db.models import Q 2 from django.utils.translation import ugettext_lazy as _ 3 4 from adhocracy4.dashboard.forms import ProjectCreateForm 5 from adhocracy4.dashboard.forms import ProjectDashboardForm 6 from meinberlin.apps.contrib.widgets import Select2MultipleWidget 7 8 from . import models 9 10 LABELS = { 11 'name': _('Title of your container'), 12 'description': _('Short description of your container'), 13 'tile_image': _('Logo'), 14 } 15 16 HELP_TEXTS = { 17 'name': _('This title will appear on the ' 18 'teaser card and on top of the container ' 19 'detail page. It should be max. 120 characters long'), 20 'description': _('This short description will appear on ' 21 'the header of the container and in the teaser. ' 22 'It should briefly state the goal of the ' 23 'projects in max. 250 chars.'), 24 'tile_image': _( 25 'The image will be shown in the container tile.' 26 ), 27 } 28 29 30 class ContainerCreateForm(ProjectCreateForm): 31 32 class Meta: 33 model = models.ProjectContainer 34 fields = ['name', 'description', 35 'tile_image', 'tile_image_copyright'] 36 labels = LABELS 37 help_texts = HELP_TEXTS 38 39 40 class ContainerBasicForm(ProjectDashboardForm): 41 42 class Meta: 43 model = models.ProjectContainer 44 fields = ['name', 'description', 'tile_image', 45 'tile_image_copyright', 'is_archived'] 46 required_for_project_publish = ['name', 'description'] 47 labels = LABELS 48 help_texts = HELP_TEXTS 49 50 51 class ContainerInformationForm(ProjectDashboardForm): 52 53 class Meta: 54 model = models.ProjectContainer 55 fields = ['information'] 56 required_for_project_publish = ['information'] 57 labels = { 58 'information': _('Description of your container'), 59 } 60 61 62 class ContainerProjectsForm(ProjectDashboardForm): 63 64 def __init__(self, *args, **kwargs): 65 super().__init__(*args, **kwargs) 66 67 # We tried to find a good balance between a short list and 68 # all necessary projects. The details may change over time. 69 # Projects that are already selected should remain in the queryset. 70 self.fields['projects'].queryset = self.fields['projects'].queryset \ 71 .filter(projectcontainer=None)\ 72 .filter(Q(containers=self.instance) | 73 (Q(containers=None) & 74 Q(is_archived=False) & 75 Q(is_public=True)))\ 76 .order_by('name') 77 78 class Meta: 79 model = models.ProjectContainer 80 fields = ['projects'] 81 required_for_project_publish = ['projects'] 82 widgets = { 83 'projects': Select2MultipleWidget, 84 } 85 [end of meinberlin/apps/projectcontainers/forms.py] [start of meinberlin/apps/projectcontainers/views.py] 1 from django.utils.translation import ugettext_lazy as _ 2 from django.views import generic 3 4 from adhocracy4.dashboard import mixins as dashboard_mixins 5 from adhocracy4.dashboard.blueprints import ProjectBlueprint 6 from adhocracy4.dashboard.components.forms.views import \ 7 ProjectComponentFormView 8 from adhocracy4.dashboard.views import ProjectCreateView 9 10 from . import forms 11 from . import models 12 13 14 class ContainerCreateView(ProjectCreateView): 15 model = models.ProjectContainer 16 slug_url_kwarg = 'project_slug' 17 form_class = forms.ContainerCreateForm 18 template_name = 'meinberlin_projectcontainers/container_create_form.html' 19 success_message = _('Container successfully created.') 20 21 blueprint = ProjectBlueprint( 22 title=_('Container'), 23 description=_( 24 'A container contains multiple projects.' 25 ), 26 content=[], 27 image='', 28 settings_model=None, 29 ) 30 31 32 class ContainerBasicFormView(ProjectComponentFormView): 33 model = models.ProjectContainer 34 35 @property 36 def project(self): 37 project = super().project 38 return project.projectcontainer 39 40 def get_object(self, queryset=None): 41 return self.project 42 43 44 class ContainerProjectsView(ProjectComponentFormView): 45 model = models.ProjectContainer 46 47 @property 48 def project(self): 49 project = super().project 50 return project.projectcontainer 51 52 def get_object(self, queryset=None): 53 return self.project 54 55 56 class ContainerListView(dashboard_mixins.DashboardBaseMixin, 57 generic.ListView): 58 model = models.ProjectContainer 59 paginate_by = 12 60 template_name = 'meinberlin_projectcontainers/container_list.html' 61 permission_required = 'a4projects.add_project' 62 menu_item = 'project' 63 64 def get_queryset(self): 65 return super().get_queryset().filter( 66 organisation=self.organisation 67 ) 68 69 def get_permission_object(self): 70 return self.organisation 71 [end of meinberlin/apps/projectcontainers/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/meinberlin/apps/projectcontainers/forms.py b/meinberlin/apps/projectcontainers/forms.py --- a/meinberlin/apps/projectcontainers/forms.py +++ b/meinberlin/apps/projectcontainers/forms.py @@ -62,12 +62,20 @@ class ContainerProjectsForm(ProjectDashboardForm): def __init__(self, *args, **kwargs): + self.user = kwargs.pop('user') + self.organisation = kwargs.pop('organisation') super().__init__(*args, **kwargs) - # We tried to find a good balance between a short list and - # all necessary projects. The details may change over time. - # Projects that are already selected should remain in the queryset. - self.fields['projects'].queryset = self.fields['projects'].queryset \ + projects = self.fields['projects']\ + .queryset.filter(organisation=self.organisation) + if not self.organisation.has_initiator(self.user): + user_groups = self.user.groups.all() + org_groups = self.organisation.groups.all() + shared_groups = user_groups & org_groups + group = shared_groups.distinct().first() + projects = projects.filter(group=group) + + self.fields['projects'].queryset = projects \ .filter(projectcontainer=None)\ .filter(Q(containers=self.instance) | (Q(containers=None) & diff --git a/meinberlin/apps/projectcontainers/views.py b/meinberlin/apps/projectcontainers/views.py --- a/meinberlin/apps/projectcontainers/views.py +++ b/meinberlin/apps/projectcontainers/views.py @@ -52,6 +52,12 @@ def get_object(self, queryset=None): return self.project + def get_form_kwargs(self): + kwargs = super().get_form_kwargs() + kwargs['user'] = self.request.user + kwargs['organisation'] = self.project.organisation + return kwargs + class ContainerListView(dashboard_mixins.DashboardBaseMixin, generic.ListView):
{"golden_diff": "diff --git a/meinberlin/apps/projectcontainers/forms.py b/meinberlin/apps/projectcontainers/forms.py\n--- a/meinberlin/apps/projectcontainers/forms.py\n+++ b/meinberlin/apps/projectcontainers/forms.py\n@@ -62,12 +62,20 @@\n class ContainerProjectsForm(ProjectDashboardForm):\n \n def __init__(self, *args, **kwargs):\n+ self.user = kwargs.pop('user')\n+ self.organisation = kwargs.pop('organisation')\n super().__init__(*args, **kwargs)\n \n- # We tried to find a good balance between a short list and\n- # all necessary projects. The details may change over time.\n- # Projects that are already selected should remain in the queryset.\n- self.fields['projects'].queryset = self.fields['projects'].queryset \\\n+ projects = self.fields['projects']\\\n+ .queryset.filter(organisation=self.organisation)\n+ if not self.organisation.has_initiator(self.user):\n+ user_groups = self.user.groups.all()\n+ org_groups = self.organisation.groups.all()\n+ shared_groups = user_groups & org_groups\n+ group = shared_groups.distinct().first()\n+ projects = projects.filter(group=group)\n+\n+ self.fields['projects'].queryset = projects \\\n .filter(projectcontainer=None)\\\n .filter(Q(containers=self.instance) |\n (Q(containers=None) &\ndiff --git a/meinberlin/apps/projectcontainers/views.py b/meinberlin/apps/projectcontainers/views.py\n--- a/meinberlin/apps/projectcontainers/views.py\n+++ b/meinberlin/apps/projectcontainers/views.py\n@@ -52,6 +52,12 @@\n def get_object(self, queryset=None):\n return self.project\n \n+ def get_form_kwargs(self):\n+ kwargs = super().get_form_kwargs()\n+ kwargs['user'] = self.request.user\n+ kwargs['organisation'] = self.project.organisation\n+ return kwargs\n+\n \n class ContainerListView(dashboard_mixins.DashboardBaseMixin,\n generic.ListView):\n", "issue": "project_list in container add project form contains all projects\n@CarolingerSeilchenspringer at the moment all projects (not only the ones belonging to the organisation) can be added to a container. Should that stay that way?\r\n\n", "before_files": [{"content": "from django.db.models import Q\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.forms import ProjectCreateForm\nfrom adhocracy4.dashboard.forms import ProjectDashboardForm\nfrom meinberlin.apps.contrib.widgets import Select2MultipleWidget\n\nfrom . import models\n\nLABELS = {\n 'name': _('Title of your container'),\n 'description': _('Short description of your container'),\n 'tile_image': _('Logo'),\n}\n\nHELP_TEXTS = {\n 'name': _('This title will appear on the '\n 'teaser card and on top of the container '\n 'detail page. It should be max. 120 characters long'),\n 'description': _('This short description will appear on '\n 'the header of the container and in the teaser. '\n 'It should briefly state the goal of the '\n 'projects in max. 250 chars.'),\n 'tile_image': _(\n 'The image will be shown in the container tile.'\n ),\n}\n\n\nclass ContainerCreateForm(ProjectCreateForm):\n\n class Meta:\n model = models.ProjectContainer\n fields = ['name', 'description',\n 'tile_image', 'tile_image_copyright']\n labels = LABELS\n help_texts = HELP_TEXTS\n\n\nclass ContainerBasicForm(ProjectDashboardForm):\n\n class Meta:\n model = models.ProjectContainer\n fields = ['name', 'description', 'tile_image',\n 'tile_image_copyright', 'is_archived']\n required_for_project_publish = ['name', 'description']\n labels = LABELS\n help_texts = HELP_TEXTS\n\n\nclass ContainerInformationForm(ProjectDashboardForm):\n\n class Meta:\n model = models.ProjectContainer\n fields = ['information']\n required_for_project_publish = ['information']\n labels = {\n 'information': _('Description of your container'),\n }\n\n\nclass ContainerProjectsForm(ProjectDashboardForm):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # We tried to find a good balance between a short list and\n # all necessary projects. The details may change over time.\n # Projects that are already selected should remain in the queryset.\n self.fields['projects'].queryset = self.fields['projects'].queryset \\\n .filter(projectcontainer=None)\\\n .filter(Q(containers=self.instance) |\n (Q(containers=None) &\n Q(is_archived=False) &\n Q(is_public=True)))\\\n .order_by('name')\n\n class Meta:\n model = models.ProjectContainer\n fields = ['projects']\n required_for_project_publish = ['projects']\n widgets = {\n 'projects': Select2MultipleWidget,\n }\n", "path": "meinberlin/apps/projectcontainers/forms.py"}, {"content": "from django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.dashboard import mixins as dashboard_mixins\nfrom adhocracy4.dashboard.blueprints import ProjectBlueprint\nfrom adhocracy4.dashboard.components.forms.views import \\\n ProjectComponentFormView\nfrom adhocracy4.dashboard.views import ProjectCreateView\n\nfrom . import forms\nfrom . import models\n\n\nclass ContainerCreateView(ProjectCreateView):\n model = models.ProjectContainer\n slug_url_kwarg = 'project_slug'\n form_class = forms.ContainerCreateForm\n template_name = 'meinberlin_projectcontainers/container_create_form.html'\n success_message = _('Container successfully created.')\n\n blueprint = ProjectBlueprint(\n title=_('Container'),\n description=_(\n 'A container contains multiple projects.'\n ),\n content=[],\n image='',\n settings_model=None,\n )\n\n\nclass ContainerBasicFormView(ProjectComponentFormView):\n model = models.ProjectContainer\n\n @property\n def project(self):\n project = super().project\n return project.projectcontainer\n\n def get_object(self, queryset=None):\n return self.project\n\n\nclass ContainerProjectsView(ProjectComponentFormView):\n model = models.ProjectContainer\n\n @property\n def project(self):\n project = super().project\n return project.projectcontainer\n\n def get_object(self, queryset=None):\n return self.project\n\n\nclass ContainerListView(dashboard_mixins.DashboardBaseMixin,\n generic.ListView):\n model = models.ProjectContainer\n paginate_by = 12\n template_name = 'meinberlin_projectcontainers/container_list.html'\n permission_required = 'a4projects.add_project'\n menu_item = 'project'\n\n def get_queryset(self):\n return super().get_queryset().filter(\n organisation=self.organisation\n )\n\n def get_permission_object(self):\n return self.organisation\n", "path": "meinberlin/apps/projectcontainers/views.py"}]}
1,886
444
gh_patches_debug_6613
rasdani/github-patches
git_diff
CTFd__CTFd-2515
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Getting dynamic challenges by ID does not return decay function When getting a dynamic challenge from `GET /api/v1/challenges/<challenge-id>`, the challenge does not return its decay function. This seems to be caused by [this](https://github.com/CTFd/CTFd/blob/master/CTFd/plugins/dynamic_challenges/__init__.py#L60-L90) function which does not include `challenge.function`. This seems be have been missed out when implementing different decay functions for dynamic challenges. This does not have any impact on normal usage of CTFd, but I see no reason not to include the function in the response. </issue> <code> [start of CTFd/plugins/dynamic_challenges/__init__.py] 1 from flask import Blueprint 2 3 from CTFd.models import Challenges, db 4 from CTFd.plugins import register_plugin_assets_directory 5 from CTFd.plugins.challenges import CHALLENGE_CLASSES, BaseChallenge 6 from CTFd.plugins.dynamic_challenges.decay import DECAY_FUNCTIONS, logarithmic 7 from CTFd.plugins.migrations import upgrade 8 9 10 class DynamicChallenge(Challenges): 11 __mapper_args__ = {"polymorphic_identity": "dynamic"} 12 id = db.Column( 13 db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE"), primary_key=True 14 ) 15 initial = db.Column(db.Integer, default=0) 16 minimum = db.Column(db.Integer, default=0) 17 decay = db.Column(db.Integer, default=0) 18 function = db.Column(db.String(32), default="logarithmic") 19 20 def __init__(self, *args, **kwargs): 21 super(DynamicChallenge, self).__init__(**kwargs) 22 self.value = kwargs["initial"] 23 24 25 class DynamicValueChallenge(BaseChallenge): 26 id = "dynamic" # Unique identifier used to register challenges 27 name = "dynamic" # Name of a challenge type 28 templates = ( 29 { # Handlebars templates used for each aspect of challenge editing & viewing 30 "create": "/plugins/dynamic_challenges/assets/create.html", 31 "update": "/plugins/dynamic_challenges/assets/update.html", 32 "view": "/plugins/dynamic_challenges/assets/view.html", 33 } 34 ) 35 scripts = { # Scripts that are loaded when a template is loaded 36 "create": "/plugins/dynamic_challenges/assets/create.js", 37 "update": "/plugins/dynamic_challenges/assets/update.js", 38 "view": "/plugins/dynamic_challenges/assets/view.js", 39 } 40 # Route at which files are accessible. This must be registered using register_plugin_assets_directory() 41 route = "/plugins/dynamic_challenges/assets/" 42 # Blueprint used to access the static_folder directory. 43 blueprint = Blueprint( 44 "dynamic_challenges", 45 __name__, 46 template_folder="templates", 47 static_folder="assets", 48 ) 49 challenge_model = DynamicChallenge 50 51 @classmethod 52 def calculate_value(cls, challenge): 53 f = DECAY_FUNCTIONS.get(challenge.function, logarithmic) 54 value = f(challenge) 55 56 challenge.value = value 57 db.session.commit() 58 return challenge 59 60 @classmethod 61 def read(cls, challenge): 62 """ 63 This method is in used to access the data of a challenge in a format processable by the front end. 64 65 :param challenge: 66 :return: Challenge object, data dictionary to be returned to the user 67 """ 68 challenge = DynamicChallenge.query.filter_by(id=challenge.id).first() 69 data = { 70 "id": challenge.id, 71 "name": challenge.name, 72 "value": challenge.value, 73 "initial": challenge.initial, 74 "decay": challenge.decay, 75 "minimum": challenge.minimum, 76 "description": challenge.description, 77 "connection_info": challenge.connection_info, 78 "next_id": challenge.next_id, 79 "category": challenge.category, 80 "state": challenge.state, 81 "max_attempts": challenge.max_attempts, 82 "type": challenge.type, 83 "type_data": { 84 "id": cls.id, 85 "name": cls.name, 86 "templates": cls.templates, 87 "scripts": cls.scripts, 88 }, 89 } 90 return data 91 92 @classmethod 93 def update(cls, challenge, request): 94 """ 95 This method is used to update the information associated with a challenge. This should be kept strictly to the 96 Challenges table and any child tables. 97 98 :param challenge: 99 :param request: 100 :return: 101 """ 102 data = request.form or request.get_json() 103 104 for attr, value in data.items(): 105 # We need to set these to floats so that the next operations don't operate on strings 106 if attr in ("initial", "minimum", "decay"): 107 value = float(value) 108 setattr(challenge, attr, value) 109 110 return DynamicValueChallenge.calculate_value(challenge) 111 112 @classmethod 113 def solve(cls, user, team, challenge, request): 114 super().solve(user, team, challenge, request) 115 116 DynamicValueChallenge.calculate_value(challenge) 117 118 119 def load(app): 120 upgrade(plugin_name="dynamic_challenges") 121 CHALLENGE_CLASSES["dynamic"] = DynamicValueChallenge 122 register_plugin_assets_directory( 123 app, base_path="/plugins/dynamic_challenges/assets/" 124 ) 125 [end of CTFd/plugins/dynamic_challenges/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/CTFd/plugins/dynamic_challenges/__init__.py b/CTFd/plugins/dynamic_challenges/__init__.py --- a/CTFd/plugins/dynamic_challenges/__init__.py +++ b/CTFd/plugins/dynamic_challenges/__init__.py @@ -73,6 +73,7 @@ "initial": challenge.initial, "decay": challenge.decay, "minimum": challenge.minimum, + "function": challenge.function, "description": challenge.description, "connection_info": challenge.connection_info, "next_id": challenge.next_id,
{"golden_diff": "diff --git a/CTFd/plugins/dynamic_challenges/__init__.py b/CTFd/plugins/dynamic_challenges/__init__.py\n--- a/CTFd/plugins/dynamic_challenges/__init__.py\n+++ b/CTFd/plugins/dynamic_challenges/__init__.py\n@@ -73,6 +73,7 @@\n \"initial\": challenge.initial,\n \"decay\": challenge.decay,\n \"minimum\": challenge.minimum,\n+ \"function\": challenge.function,\n \"description\": challenge.description,\n \"connection_info\": challenge.connection_info,\n \"next_id\": challenge.next_id,\n", "issue": "Getting dynamic challenges by ID does not return decay function\nWhen getting a dynamic challenge from `GET /api/v1/challenges/<challenge-id>`, the challenge does not return its decay function.\r\n\r\nThis seems to be caused by [this](https://github.com/CTFd/CTFd/blob/master/CTFd/plugins/dynamic_challenges/__init__.py#L60-L90) function which does not include `challenge.function`.\r\n\r\nThis seems be have been missed out when implementing different decay functions for dynamic challenges. This does not have any impact on normal usage of CTFd, but I see no reason not to include the function in the response.\r\n\n", "before_files": [{"content": "from flask import Blueprint\n\nfrom CTFd.models import Challenges, db\nfrom CTFd.plugins import register_plugin_assets_directory\nfrom CTFd.plugins.challenges import CHALLENGE_CLASSES, BaseChallenge\nfrom CTFd.plugins.dynamic_challenges.decay import DECAY_FUNCTIONS, logarithmic\nfrom CTFd.plugins.migrations import upgrade\n\n\nclass DynamicChallenge(Challenges):\n __mapper_args__ = {\"polymorphic_identity\": \"dynamic\"}\n id = db.Column(\n db.Integer, db.ForeignKey(\"challenges.id\", ondelete=\"CASCADE\"), primary_key=True\n )\n initial = db.Column(db.Integer, default=0)\n minimum = db.Column(db.Integer, default=0)\n decay = db.Column(db.Integer, default=0)\n function = db.Column(db.String(32), default=\"logarithmic\")\n\n def __init__(self, *args, **kwargs):\n super(DynamicChallenge, self).__init__(**kwargs)\n self.value = kwargs[\"initial\"]\n\n\nclass DynamicValueChallenge(BaseChallenge):\n id = \"dynamic\" # Unique identifier used to register challenges\n name = \"dynamic\" # Name of a challenge type\n templates = (\n { # Handlebars templates used for each aspect of challenge editing & viewing\n \"create\": \"/plugins/dynamic_challenges/assets/create.html\",\n \"update\": \"/plugins/dynamic_challenges/assets/update.html\",\n \"view\": \"/plugins/dynamic_challenges/assets/view.html\",\n }\n )\n scripts = { # Scripts that are loaded when a template is loaded\n \"create\": \"/plugins/dynamic_challenges/assets/create.js\",\n \"update\": \"/plugins/dynamic_challenges/assets/update.js\",\n \"view\": \"/plugins/dynamic_challenges/assets/view.js\",\n }\n # Route at which files are accessible. This must be registered using register_plugin_assets_directory()\n route = \"/plugins/dynamic_challenges/assets/\"\n # Blueprint used to access the static_folder directory.\n blueprint = Blueprint(\n \"dynamic_challenges\",\n __name__,\n template_folder=\"templates\",\n static_folder=\"assets\",\n )\n challenge_model = DynamicChallenge\n\n @classmethod\n def calculate_value(cls, challenge):\n f = DECAY_FUNCTIONS.get(challenge.function, logarithmic)\n value = f(challenge)\n\n challenge.value = value\n db.session.commit()\n return challenge\n\n @classmethod\n def read(cls, challenge):\n \"\"\"\n This method is in used to access the data of a challenge in a format processable by the front end.\n\n :param challenge:\n :return: Challenge object, data dictionary to be returned to the user\n \"\"\"\n challenge = DynamicChallenge.query.filter_by(id=challenge.id).first()\n data = {\n \"id\": challenge.id,\n \"name\": challenge.name,\n \"value\": challenge.value,\n \"initial\": challenge.initial,\n \"decay\": challenge.decay,\n \"minimum\": challenge.minimum,\n \"description\": challenge.description,\n \"connection_info\": challenge.connection_info,\n \"next_id\": challenge.next_id,\n \"category\": challenge.category,\n \"state\": challenge.state,\n \"max_attempts\": challenge.max_attempts,\n \"type\": challenge.type,\n \"type_data\": {\n \"id\": cls.id,\n \"name\": cls.name,\n \"templates\": cls.templates,\n \"scripts\": cls.scripts,\n },\n }\n return data\n\n @classmethod\n def update(cls, challenge, request):\n \"\"\"\n This method is used to update the information associated with a challenge. This should be kept strictly to the\n Challenges table and any child tables.\n\n :param challenge:\n :param request:\n :return:\n \"\"\"\n data = request.form or request.get_json()\n\n for attr, value in data.items():\n # We need to set these to floats so that the next operations don't operate on strings\n if attr in (\"initial\", \"minimum\", \"decay\"):\n value = float(value)\n setattr(challenge, attr, value)\n\n return DynamicValueChallenge.calculate_value(challenge)\n\n @classmethod\n def solve(cls, user, team, challenge, request):\n super().solve(user, team, challenge, request)\n\n DynamicValueChallenge.calculate_value(challenge)\n\n\ndef load(app):\n upgrade(plugin_name=\"dynamic_challenges\")\n CHALLENGE_CLASSES[\"dynamic\"] = DynamicValueChallenge\n register_plugin_assets_directory(\n app, base_path=\"/plugins/dynamic_challenges/assets/\"\n )\n", "path": "CTFd/plugins/dynamic_challenges/__init__.py"}]}
1,908
128
gh_patches_debug_9313
rasdani/github-patches
git_diff
facebookresearch__hydra-1364
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Ax-Plugin] Add support for Python 3.9 Python 3.9 support pending on scikit 2.4.0 release. Relevant comment: https://github.com/scikit-learn/scikit-learn/issues/18621#issuecomment-733078676 Related to #1062 </issue> <code> [start of plugins/hydra_ax_sweeper/setup.py] 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 # type: ignore 3 from setuptools import find_namespace_packages, setup 4 5 with open("README.md", "r") as fh: 6 LONG_DESC = fh.read() 7 setup( 8 name="hydra-ax-sweeper", 9 version="1.1.0rc1", 10 author="Omry Yadan, Shagun Sodhani", 11 author_email="[email protected], [email protected]", 12 description="Hydra Ax Sweeper plugin", 13 long_description=LONG_DESC, 14 long_description_content_type="text/markdown", 15 url="https://github.com/facebookresearch/hydra/", 16 packages=find_namespace_packages(include=["hydra_plugins.*"]), 17 classifiers=[ 18 "License :: OSI Approved :: MIT License", 19 "Programming Language :: Python :: 3.7", 20 "Programming Language :: Python :: 3.8", 21 # "Programming Language :: Python :: 3.9", 22 "Operating System :: POSIX :: Linux", 23 "Operating System :: MacOS", 24 "Development Status :: 4 - Beta", 25 ], 26 install_requires=["hydra-core>=1.0.0", "ax-platform>=0.1.13"], 27 include_package_data=True, 28 ) 29 [end of plugins/hydra_ax_sweeper/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plugins/hydra_ax_sweeper/setup.py b/plugins/hydra_ax_sweeper/setup.py --- a/plugins/hydra_ax_sweeper/setup.py +++ b/plugins/hydra_ax_sweeper/setup.py @@ -18,7 +18,7 @@ "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", - # "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.9", "Operating System :: POSIX :: Linux", "Operating System :: MacOS", "Development Status :: 4 - Beta",
{"golden_diff": "diff --git a/plugins/hydra_ax_sweeper/setup.py b/plugins/hydra_ax_sweeper/setup.py\n--- a/plugins/hydra_ax_sweeper/setup.py\n+++ b/plugins/hydra_ax_sweeper/setup.py\n@@ -18,7 +18,7 @@\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n- # \"Programming Language :: Python :: 3.9\",\n+ \"Programming Language :: Python :: 3.9\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS\",\n \"Development Status :: 4 - Beta\",\n", "issue": "[Ax-Plugin] Add support for Python 3.9\nPython 3.9 support pending on scikit 2.4.0 release. Relevant comment: https://github.com/scikit-learn/scikit-learn/issues/18621#issuecomment-733078676\r\n\r\nRelated to #1062 \n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# type: ignore\nfrom setuptools import find_namespace_packages, setup\n\nwith open(\"README.md\", \"r\") as fh:\n LONG_DESC = fh.read()\n setup(\n name=\"hydra-ax-sweeper\",\n version=\"1.1.0rc1\",\n author=\"Omry Yadan, Shagun Sodhani\",\n author_email=\"[email protected], [email protected]\",\n description=\"Hydra Ax Sweeper plugin\",\n long_description=LONG_DESC,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/hydra/\",\n packages=find_namespace_packages(include=[\"hydra_plugins.*\"]),\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n # \"Programming Language :: Python :: 3.9\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS\",\n \"Development Status :: 4 - Beta\",\n ],\n install_requires=[\"hydra-core>=1.0.0\", \"ax-platform>=0.1.13\"],\n include_package_data=True,\n )\n", "path": "plugins/hydra_ax_sweeper/setup.py"}]}
943
152
gh_patches_debug_34844
rasdani/github-patches
git_diff
pyjanitor-devs__pyjanitor-375
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [ENH] Naming conventions I wanted to discuss naming conventions for the various functions and arguments for consistency. `expand_column` has a parameter `column` whereas `add_column` had `col_name`. Also, is `_column` necessary in each function? Would it be ok to just have an `add()` or `transform()` method? In general I'm more on the side of more verbose function names, but just wanted to throw the idea out there. Similarly, when following the format of `limit_column_characters` functions like `change_type` should probably be named `change_column_type`. I'm not married to any of this (except the function arguments, those should be consistent) but wanted to get peoples' ideas. </issue> <code> [start of janitor/io.py] 1 import os 2 from glob import glob 3 4 import pandas as pd 5 6 7 def read_csvs(filespath: str, seperate_df: bool = False, **kwargs): 8 """ 9 :param filespath: The filepath pattern matching the CSVs files. 10 Accepts regular expressions, with or without csv extension. 11 :param seperate_df: If False (default) returns a single Dataframe 12 with the concatenation of the csv files. 13 If True, returns a dictionary of seperate dataframes 14 for each CSV file. 15 :param kwargs: Keyword arguments to pass into the 16 original pandas `read_csv`. 17 """ 18 # Sanitize input 19 assert filespath is not None 20 assert len(filespath) != 0 21 22 # Read the csv files 23 dfs = { 24 os.path.basename(f): pd.read_csv(f, **kwargs) for f in glob(filespath) 25 } 26 # Check if dataframes have been read 27 if len(dfs) == 0: 28 raise ValueError("No CSV files to read with the given filespath") 29 # Concatenate the dataframes if requested (default) 30 col_names = list(dfs.values())[0].columns 31 if not seperate_df: 32 # If columns do not match raise an error 33 for df in dfs.values(): 34 if not all(df.columns == col_names): 35 raise ValueError( 36 "Columns in input CSV files do not match." 37 "Files cannot be concatenated" 38 ) 39 return pd.concat(list(dfs.values()), ignore_index=True, sort=False) 40 else: 41 return dfs 42 [end of janitor/io.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/janitor/io.py b/janitor/io.py --- a/janitor/io.py +++ b/janitor/io.py @@ -1,34 +1,49 @@ import os from glob import glob +from typing import Iterable, Union import pandas as pd +from .utils import deprecated_alias -def read_csvs(filespath: str, seperate_df: bool = False, **kwargs): + +@deprecated_alias(seperate_df="separate_df", filespath="files_path") +def read_csvs( + files_path: Union[str, Iterable[str]], separate_df: bool = False, **kwargs +) -> Union[pd.DataFrame, dict]: """ - :param filespath: The filepath pattern matching the CSVs files. + :param files_path: The filepath pattern matching the CSVs files. Accepts regular expressions, with or without csv extension. - :param seperate_df: If False (default) returns a single Dataframe + Also accepts iterable of file paths. + :param separate_df: If False (default) returns a single Dataframe with the concatenation of the csv files. - If True, returns a dictionary of seperate dataframes + If True, returns a dictionary of separate dataframes for each CSV file. :param kwargs: Keyword arguments to pass into the original pandas `read_csv`. """ # Sanitize input - assert filespath is not None - assert len(filespath) != 0 + assert files_path is not None + assert len(files_path) != 0 # Read the csv files - dfs = { - os.path.basename(f): pd.read_csv(f, **kwargs) for f in glob(filespath) - } + # String to file/folder or file pattern provided + if isinstance(files_path, str): + dfs = { + os.path.basename(f): pd.read_csv(f, **kwargs) + for f in glob(files_path) + } + # Iterable of file paths provided + else: + dfs = { + os.path.basename(f): pd.read_csv(f, **kwargs) for f in files_path + } # Check if dataframes have been read if len(dfs) == 0: - raise ValueError("No CSV files to read with the given filespath") + raise ValueError("No CSV files to read with the given `files_path`") # Concatenate the dataframes if requested (default) col_names = list(dfs.values())[0].columns - if not seperate_df: + if not separate_df: # If columns do not match raise an error for df in dfs.values(): if not all(df.columns == col_names):
{"golden_diff": "diff --git a/janitor/io.py b/janitor/io.py\n--- a/janitor/io.py\n+++ b/janitor/io.py\n@@ -1,34 +1,49 @@\n import os\n from glob import glob\n+from typing import Iterable, Union\n \n import pandas as pd\n \n+from .utils import deprecated_alias\n \n-def read_csvs(filespath: str, seperate_df: bool = False, **kwargs):\n+\n+@deprecated_alias(seperate_df=\"separate_df\", filespath=\"files_path\")\n+def read_csvs(\n+ files_path: Union[str, Iterable[str]], separate_df: bool = False, **kwargs\n+) -> Union[pd.DataFrame, dict]:\n \"\"\"\n- :param filespath: The filepath pattern matching the CSVs files.\n+ :param files_path: The filepath pattern matching the CSVs files.\n Accepts regular expressions, with or without csv extension.\n- :param seperate_df: If False (default) returns a single Dataframe\n+ Also accepts iterable of file paths.\n+ :param separate_df: If False (default) returns a single Dataframe\n with the concatenation of the csv files.\n- If True, returns a dictionary of seperate dataframes\n+ If True, returns a dictionary of separate dataframes\n for each CSV file.\n :param kwargs: Keyword arguments to pass into the\n original pandas `read_csv`.\n \"\"\"\n # Sanitize input\n- assert filespath is not None\n- assert len(filespath) != 0\n+ assert files_path is not None\n+ assert len(files_path) != 0\n \n # Read the csv files\n- dfs = {\n- os.path.basename(f): pd.read_csv(f, **kwargs) for f in glob(filespath)\n- }\n+ # String to file/folder or file pattern provided\n+ if isinstance(files_path, str):\n+ dfs = {\n+ os.path.basename(f): pd.read_csv(f, **kwargs)\n+ for f in glob(files_path)\n+ }\n+ # Iterable of file paths provided\n+ else:\n+ dfs = {\n+ os.path.basename(f): pd.read_csv(f, **kwargs) for f in files_path\n+ }\n # Check if dataframes have been read\n if len(dfs) == 0:\n- raise ValueError(\"No CSV files to read with the given filespath\")\n+ raise ValueError(\"No CSV files to read with the given `files_path`\")\n # Concatenate the dataframes if requested (default)\n col_names = list(dfs.values())[0].columns\n- if not seperate_df:\n+ if not separate_df:\n # If columns do not match raise an error\n for df in dfs.values():\n if not all(df.columns == col_names):\n", "issue": "[ENH] Naming conventions\nI wanted to discuss naming conventions for the various functions and arguments for consistency. `expand_column` has a parameter `column` whereas `add_column` had `col_name`.\r\n\r\nAlso, is `_column` necessary in each function? Would it be ok to just have an `add()` or `transform()` method? In general I'm more on the side of more verbose function names, but just wanted to throw the idea out there.\r\n\r\nSimilarly, when following the format of `limit_column_characters` functions like `change_type` should probably be named `change_column_type`.\r\n\r\nI'm not married to any of this (except the function arguments, those should be consistent) but wanted to get peoples' ideas.\n", "before_files": [{"content": "import os\nfrom glob import glob\n\nimport pandas as pd\n\n\ndef read_csvs(filespath: str, seperate_df: bool = False, **kwargs):\n \"\"\"\n :param filespath: The filepath pattern matching the CSVs files.\n Accepts regular expressions, with or without csv extension.\n :param seperate_df: If False (default) returns a single Dataframe\n with the concatenation of the csv files.\n If True, returns a dictionary of seperate dataframes\n for each CSV file.\n :param kwargs: Keyword arguments to pass into the\n original pandas `read_csv`.\n \"\"\"\n # Sanitize input\n assert filespath is not None\n assert len(filespath) != 0\n\n # Read the csv files\n dfs = {\n os.path.basename(f): pd.read_csv(f, **kwargs) for f in glob(filespath)\n }\n # Check if dataframes have been read\n if len(dfs) == 0:\n raise ValueError(\"No CSV files to read with the given filespath\")\n # Concatenate the dataframes if requested (default)\n col_names = list(dfs.values())[0].columns\n if not seperate_df:\n # If columns do not match raise an error\n for df in dfs.values():\n if not all(df.columns == col_names):\n raise ValueError(\n \"Columns in input CSV files do not match.\"\n \"Files cannot be concatenated\"\n )\n return pd.concat(list(dfs.values()), ignore_index=True, sort=False)\n else:\n return dfs\n", "path": "janitor/io.py"}]}
1,089
602
gh_patches_debug_25000
rasdani/github-patches
git_diff
adfinis__timed-backend-938
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> bug: remaining effort on initial report creation is buggy ## Scenario Create a report and add an estimated remaining effort to it. ## Expected behavior It updates the remaining effort of the certain task in the statistics view. ## Actual behavior After submitting the report the remaining effort does **not** update in stats view. But after editing the rem. effort and saving the report again in the timesheet, it will update the values in the stats view. The attributes are fine in both requests to the backend. I suspect that a the `POST` and `PATCH` requests have different handling of the remaining effort attribute. Maybe the `POST` does not properly trigger the update? ![image](https://user-images.githubusercontent.com/10029904/219358043-268b4856-1b2e-430a-b4f0-e9891d1f799e.png) </issue> <code> [start of timed/tracking/signals.py] 1 from django.db.models import Sum 2 from django.db.models.signals import pre_save 3 from django.dispatch import receiver 4 5 from timed.tracking.models import Report 6 7 8 @receiver(pre_save, sender=Report) 9 def update_rejected_on_reports(sender, instance, **kwargs): 10 """Unreject report when the task changes.""" 11 # Check if the report is being created or updated 12 if instance.pk and instance.rejected: 13 report = Report.objects.get(id=instance.id) 14 if report.task_id != instance.task_id: 15 instance.rejected = False 16 17 18 @receiver(pre_save, sender=Report) 19 def update_most_recent_remaining_effort(sender, instance, **kwargs): 20 """Update remaining effort on task, if remaining effort tracking is active. 21 22 Update most_recent_remaining_effort on task and total_remaining_effort on project 23 only if remaining effort on report has changed. 24 Any other change on report should not trigger this signal. 25 """ 26 if kwargs.get("raw", False): # pragma: no cover 27 return 28 29 if not instance.pk: 30 return 31 if instance.task.project.remaining_effort_tracking is not True: 32 return 33 34 if instance.remaining_effort != Report.objects.get(id=instance.id).remaining_effort: 35 task = instance.task 36 task.most_recent_remaining_effort = instance.remaining_effort 37 task.save() 38 39 project = task.project 40 total_remaining_effort = ( 41 project.tasks.all() 42 .aggregate(sum_remaining=Sum("most_recent_remaining_effort")) 43 .get("sum_remaining") 44 ) 45 project.total_remaining_effort = total_remaining_effort 46 project.save() 47 [end of timed/tracking/signals.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/timed/tracking/signals.py b/timed/tracking/signals.py --- a/timed/tracking/signals.py +++ b/timed/tracking/signals.py @@ -26,21 +26,30 @@ if kwargs.get("raw", False): # pragma: no cover return - if not instance.pk: - return if instance.task.project.remaining_effort_tracking is not True: return + # update most_recent_remaining_effort and total_remaining_effort on report creation + if not instance.pk: + update_remaining_effort(instance) + return + + # check if remaining effort has changed on report update if instance.remaining_effort != Report.objects.get(id=instance.id).remaining_effort: - task = instance.task - task.most_recent_remaining_effort = instance.remaining_effort - task.save() - - project = task.project - total_remaining_effort = ( - project.tasks.all() - .aggregate(sum_remaining=Sum("most_recent_remaining_effort")) - .get("sum_remaining") - ) - project.total_remaining_effort = total_remaining_effort - project.save() + update_remaining_effort(instance) + + +def update_remaining_effort(report): + task = report.task + project = task.project + + task.most_recent_remaining_effort = report.remaining_effort + task.save() + + total_remaining_effort = ( + task.project.tasks.all() + .aggregate(sum_remaining=Sum("most_recent_remaining_effort")) + .get("sum_remaining") + ) + project.total_remaining_effort = total_remaining_effort + project.save()
{"golden_diff": "diff --git a/timed/tracking/signals.py b/timed/tracking/signals.py\n--- a/timed/tracking/signals.py\n+++ b/timed/tracking/signals.py\n@@ -26,21 +26,30 @@\n if kwargs.get(\"raw\", False): # pragma: no cover\n return\n \n- if not instance.pk:\n- return\n if instance.task.project.remaining_effort_tracking is not True:\n return\n \n+ # update most_recent_remaining_effort and total_remaining_effort on report creation\n+ if not instance.pk:\n+ update_remaining_effort(instance)\n+ return\n+\n+ # check if remaining effort has changed on report update\n if instance.remaining_effort != Report.objects.get(id=instance.id).remaining_effort:\n- task = instance.task\n- task.most_recent_remaining_effort = instance.remaining_effort\n- task.save()\n-\n- project = task.project\n- total_remaining_effort = (\n- project.tasks.all()\n- .aggregate(sum_remaining=Sum(\"most_recent_remaining_effort\"))\n- .get(\"sum_remaining\")\n- )\n- project.total_remaining_effort = total_remaining_effort\n- project.save()\n+ update_remaining_effort(instance)\n+\n+\n+def update_remaining_effort(report):\n+ task = report.task\n+ project = task.project\n+\n+ task.most_recent_remaining_effort = report.remaining_effort\n+ task.save()\n+\n+ total_remaining_effort = (\n+ task.project.tasks.all()\n+ .aggregate(sum_remaining=Sum(\"most_recent_remaining_effort\"))\n+ .get(\"sum_remaining\")\n+ )\n+ project.total_remaining_effort = total_remaining_effort\n+ project.save()\n", "issue": "bug: remaining effort on initial report creation is buggy\n## Scenario\r\nCreate a report and add an estimated remaining effort to it.\r\n\r\n## Expected behavior\r\nIt updates the remaining effort of the certain task in the statistics view.\r\n\r\n## Actual behavior\r\nAfter submitting the report the remaining effort does **not** update in stats view. But after editing the rem. effort and saving the report again in the timesheet, it will update the values in the stats view.\r\n\r\nThe attributes are fine in both requests to the backend. I suspect that a the `POST` and `PATCH` requests have different handling of the remaining effort attribute. Maybe the `POST` does not properly trigger the update?\r\n![image](https://user-images.githubusercontent.com/10029904/219358043-268b4856-1b2e-430a-b4f0-e9891d1f799e.png)\r\n\n", "before_files": [{"content": "from django.db.models import Sum\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver\n\nfrom timed.tracking.models import Report\n\n\n@receiver(pre_save, sender=Report)\ndef update_rejected_on_reports(sender, instance, **kwargs):\n \"\"\"Unreject report when the task changes.\"\"\"\n # Check if the report is being created or updated\n if instance.pk and instance.rejected:\n report = Report.objects.get(id=instance.id)\n if report.task_id != instance.task_id:\n instance.rejected = False\n\n\n@receiver(pre_save, sender=Report)\ndef update_most_recent_remaining_effort(sender, instance, **kwargs):\n \"\"\"Update remaining effort on task, if remaining effort tracking is active.\n\n Update most_recent_remaining_effort on task and total_remaining_effort on project\n only if remaining effort on report has changed.\n Any other change on report should not trigger this signal.\n \"\"\"\n if kwargs.get(\"raw\", False): # pragma: no cover\n return\n\n if not instance.pk:\n return\n if instance.task.project.remaining_effort_tracking is not True:\n return\n\n if instance.remaining_effort != Report.objects.get(id=instance.id).remaining_effort:\n task = instance.task\n task.most_recent_remaining_effort = instance.remaining_effort\n task.save()\n\n project = task.project\n total_remaining_effort = (\n project.tasks.all()\n .aggregate(sum_remaining=Sum(\"most_recent_remaining_effort\"))\n .get(\"sum_remaining\")\n )\n project.total_remaining_effort = total_remaining_effort\n project.save()\n", "path": "timed/tracking/signals.py"}]}
1,169
377
gh_patches_debug_964
rasdani/github-patches
git_diff
mars-project__mars-1631
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] `Cannot find serializable class` raised when calling set_label When calling DataFrame.columns=xxxx in distributed environment, a KeyError is raised with message `Cannot find serializable class for type_id 1517314310`. </issue> <code> [start of mars/dataframe/indexing/__init__.py] 1 # Copyright 1999-2020 Alibaba Group Holding Ltd. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 16 def _install(): 17 from pandas.util import cache_readonly 18 from ..operands import DATAFRAME_TYPE, SERIES_TYPE, INDEX_TYPE 19 from .at import at 20 from .getitem import dataframe_getitem, series_getitem 21 from .iat import iat 22 from .iloc import iloc, head, tail 23 from .insert import df_insert 24 from .loc import loc 25 from .rename import df_rename, series_rename, index_rename 26 from .reset_index import df_reset_index, series_reset_index 27 from .set_index import set_index 28 from .setitem import dataframe_setitem 29 from .reindex import reindex 30 from .where import mask, where 31 32 for cls in DATAFRAME_TYPE + SERIES_TYPE: 33 setattr(cls, 'iloc', cache_readonly(iloc)) 34 setattr(cls, 'loc', cache_readonly(loc)) 35 setattr(cls, 'iat', cache_readonly(iat)) 36 setattr(cls, 'at', cache_readonly(at)) 37 setattr(cls, 'reindex', reindex) 38 setattr(cls, 'head', head) 39 setattr(cls, 'tail', tail) 40 setattr(cls, 'mask', mask) 41 setattr(cls, 'where', where) 42 43 for cls in DATAFRAME_TYPE: 44 setattr(cls, 'set_index', set_index) 45 setattr(cls, '__getitem__', dataframe_getitem) 46 setattr(cls, '__setitem__', dataframe_setitem) 47 setattr(cls, 'insert', df_insert) 48 setattr(cls, 'reset_index', df_reset_index) 49 setattr(cls, 'rename', df_rename) 50 51 for cls in SERIES_TYPE: 52 setattr(cls, '__getitem__', series_getitem) 53 setattr(cls, 'reset_index', series_reset_index) 54 setattr(cls, 'rename', series_rename) 55 56 for cls in INDEX_TYPE: 57 setattr(cls, 'rename', index_rename) 58 59 60 _install() 61 del _install 62 [end of mars/dataframe/indexing/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mars/dataframe/indexing/__init__.py b/mars/dataframe/indexing/__init__.py --- a/mars/dataframe/indexing/__init__.py +++ b/mars/dataframe/indexing/__init__.py @@ -56,6 +56,10 @@ for cls in INDEX_TYPE: setattr(cls, 'rename', index_rename) + # make sure operand is registered + from .set_label import DataFrameSetLabel + del DataFrameSetLabel + _install() del _install
{"golden_diff": "diff --git a/mars/dataframe/indexing/__init__.py b/mars/dataframe/indexing/__init__.py\n--- a/mars/dataframe/indexing/__init__.py\n+++ b/mars/dataframe/indexing/__init__.py\n@@ -56,6 +56,10 @@\n for cls in INDEX_TYPE:\n setattr(cls, 'rename', index_rename)\n \n+ # make sure operand is registered\n+ from .set_label import DataFrameSetLabel\n+ del DataFrameSetLabel\n+\n \n _install()\n del _install\n", "issue": "[BUG] `Cannot find serializable class` raised when calling set_label\nWhen calling DataFrame.columns=xxxx in distributed environment, a KeyError is raised with message `Cannot find serializable class for type_id 1517314310`.\n", "before_files": [{"content": "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef _install():\n from pandas.util import cache_readonly\n from ..operands import DATAFRAME_TYPE, SERIES_TYPE, INDEX_TYPE\n from .at import at\n from .getitem import dataframe_getitem, series_getitem\n from .iat import iat\n from .iloc import iloc, head, tail\n from .insert import df_insert\n from .loc import loc\n from .rename import df_rename, series_rename, index_rename\n from .reset_index import df_reset_index, series_reset_index\n from .set_index import set_index\n from .setitem import dataframe_setitem\n from .reindex import reindex\n from .where import mask, where\n\n for cls in DATAFRAME_TYPE + SERIES_TYPE:\n setattr(cls, 'iloc', cache_readonly(iloc))\n setattr(cls, 'loc', cache_readonly(loc))\n setattr(cls, 'iat', cache_readonly(iat))\n setattr(cls, 'at', cache_readonly(at))\n setattr(cls, 'reindex', reindex)\n setattr(cls, 'head', head)\n setattr(cls, 'tail', tail)\n setattr(cls, 'mask', mask)\n setattr(cls, 'where', where)\n\n for cls in DATAFRAME_TYPE:\n setattr(cls, 'set_index', set_index)\n setattr(cls, '__getitem__', dataframe_getitem)\n setattr(cls, '__setitem__', dataframe_setitem)\n setattr(cls, 'insert', df_insert)\n setattr(cls, 'reset_index', df_reset_index)\n setattr(cls, 'rename', df_rename)\n\n for cls in SERIES_TYPE:\n setattr(cls, '__getitem__', series_getitem)\n setattr(cls, 'reset_index', series_reset_index)\n setattr(cls, 'rename', series_rename)\n\n for cls in INDEX_TYPE:\n setattr(cls, 'rename', index_rename)\n\n\n_install()\ndel _install\n", "path": "mars/dataframe/indexing/__init__.py"}]}
1,251
120
gh_patches_debug_18021
rasdani/github-patches
git_diff
dj-stripe__dj-stripe-348
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Django 1.10 support https://github.com/kavdev/dj-stripe/blob/master/djstripe/__init__.py#L19 `'1.10'` is less than `'1.7.x'` </issue> <code> [start of djstripe/__init__.py] 1 from __future__ import unicode_literals 2 import warnings 3 4 from django import get_version as get_django_version 5 6 __title__ = "dj-stripe" 7 __summary__ = "Django + Stripe Made Easy" 8 __uri__ = "https://github.com/kavdev/dj-stripe/" 9 10 __version__ = "0.9.0.dev0" 11 12 __author__ = "Alexander Kavanaugh" 13 __email__ = "[email protected]" 14 15 __license__ = "BSD" 16 __license__ = "License :: OSI Approved :: BSD License" 17 __copyright__ = "Copyright 2016 Alexander Kavanaugh" 18 19 if get_django_version() <= '1.7.x': 20 msg = "dj-stripe deprecation notice: Django 1.7 and lower are no longer\n" \ 21 "supported. Please upgrade to Django 1.8 or higher.\n" \ 22 "Reference: https://github.com/kavdev/dj-stripe/issues/275" 23 warnings.warn(msg) 24 [end of djstripe/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/djstripe/__init__.py b/djstripe/__init__.py --- a/djstripe/__init__.py +++ b/djstripe/__init__.py @@ -1,7 +1,7 @@ from __future__ import unicode_literals import warnings -from django import get_version as get_django_version +from django import VERSION as django_version __title__ = "dj-stripe" __summary__ = "Django + Stripe Made Easy" @@ -16,7 +16,7 @@ __license__ = "License :: OSI Approved :: BSD License" __copyright__ = "Copyright 2016 Alexander Kavanaugh" -if get_django_version() <= '1.7.x': +if django_version < (1, 8): msg = "dj-stripe deprecation notice: Django 1.7 and lower are no longer\n" \ "supported. Please upgrade to Django 1.8 or higher.\n" \ "Reference: https://github.com/kavdev/dj-stripe/issues/275"
{"golden_diff": "diff --git a/djstripe/__init__.py b/djstripe/__init__.py\n--- a/djstripe/__init__.py\n+++ b/djstripe/__init__.py\n@@ -1,7 +1,7 @@\n from __future__ import unicode_literals\n import warnings\n \n-from django import get_version as get_django_version\n+from django import VERSION as django_version\n \n __title__ = \"dj-stripe\"\n __summary__ = \"Django + Stripe Made Easy\"\n@@ -16,7 +16,7 @@\n __license__ = \"License :: OSI Approved :: BSD License\"\n __copyright__ = \"Copyright 2016 Alexander Kavanaugh\"\n \n-if get_django_version() <= '1.7.x':\n+if django_version < (1, 8):\n msg = \"dj-stripe deprecation notice: Django 1.7 and lower are no longer\\n\" \\\n \"supported. Please upgrade to Django 1.8 or higher.\\n\" \\\n \"Reference: https://github.com/kavdev/dj-stripe/issues/275\"\n", "issue": "Django 1.10 support\nhttps://github.com/kavdev/dj-stripe/blob/master/djstripe/__init__.py#L19\n\n`'1.10'` is less than `'1.7.x'`\n\n", "before_files": [{"content": "from __future__ import unicode_literals\nimport warnings\n\nfrom django import get_version as get_django_version\n\n__title__ = \"dj-stripe\"\n__summary__ = \"Django + Stripe Made Easy\"\n__uri__ = \"https://github.com/kavdev/dj-stripe/\"\n\n__version__ = \"0.9.0.dev0\"\n\n__author__ = \"Alexander Kavanaugh\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD\"\n__license__ = \"License :: OSI Approved :: BSD License\"\n__copyright__ = \"Copyright 2016 Alexander Kavanaugh\"\n\nif get_django_version() <= '1.7.x':\n msg = \"dj-stripe deprecation notice: Django 1.7 and lower are no longer\\n\" \\\n \"supported. Please upgrade to Django 1.8 or higher.\\n\" \\\n \"Reference: https://github.com/kavdev/dj-stripe/issues/275\"\n warnings.warn(msg)\n", "path": "djstripe/__init__.py"}]}
844
230
gh_patches_debug_4821
rasdani/github-patches
git_diff
numpy__numpy-13306
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Indicate attempted allocation size when constructing an array gives a MemoryError I've seen multiple questions now of the form "why does this give a MemoryError" when the reason is the user ended up allocating a bigger array than expected. It would be helpful to include the shape and dtype in the error message. </issue> <code> [start of numpy/core/_exceptions.py] 1 """ 2 Various richly-typed exceptions, that also help us deal with string formatting 3 in python where it's easier. 4 5 By putting the formatting in `__str__`, we also avoid paying the cost for 6 users who silence the exceptions. 7 """ 8 from numpy.core.overrides import set_module 9 10 def _unpack_tuple(tup): 11 if len(tup) == 1: 12 return tup[0] 13 else: 14 return tup 15 16 17 def _display_as_base(cls): 18 """ 19 A decorator that makes an exception class look like its base. 20 21 We use this to hide subclasses that are implementation details - the user 22 should catch the base type, which is what the traceback will show them. 23 24 Classes decorated with this decorator are subject to removal without a 25 deprecation warning. 26 """ 27 assert issubclass(cls, Exception) 28 cls.__name__ = cls.__base__.__name__ 29 cls.__qualname__ = cls.__base__.__qualname__ 30 return cls 31 32 33 class UFuncTypeError(TypeError): 34 """ Base class for all ufunc exceptions """ 35 def __init__(self, ufunc): 36 self.ufunc = ufunc 37 38 39 @_display_as_base 40 class _UFuncNoLoopError(UFuncTypeError): 41 """ Thrown when a ufunc loop cannot be found """ 42 def __init__(self, ufunc, dtypes): 43 super().__init__(ufunc) 44 self.dtypes = tuple(dtypes) 45 46 def __str__(self): 47 return ( 48 "ufunc {!r} did not contain a loop with signature matching types " 49 "{!r} -> {!r}" 50 ).format( 51 self.ufunc.__name__, 52 _unpack_tuple(self.dtypes[:self.ufunc.nin]), 53 _unpack_tuple(self.dtypes[self.ufunc.nin:]) 54 ) 55 56 57 @_display_as_base 58 class _UFuncCastingError(UFuncTypeError): 59 def __init__(self, ufunc, casting, from_, to): 60 super().__init__(ufunc) 61 self.casting = casting 62 self.from_ = from_ 63 self.to = to 64 65 66 @_display_as_base 67 class _UFuncInputCastingError(_UFuncCastingError): 68 """ Thrown when a ufunc input cannot be casted """ 69 def __init__(self, ufunc, casting, from_, to, i): 70 super().__init__(ufunc, casting, from_, to) 71 self.in_i = i 72 73 def __str__(self): 74 # only show the number if more than one input exists 75 i_str = "{} ".format(self.in_i) if self.ufunc.nin != 1 else "" 76 return ( 77 "Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting " 78 "rule {!r}" 79 ).format( 80 self.ufunc.__name__, i_str, self.from_, self.to, self.casting 81 ) 82 83 84 @_display_as_base 85 class _UFuncOutputCastingError(_UFuncCastingError): 86 """ Thrown when a ufunc output cannot be casted """ 87 def __init__(self, ufunc, casting, from_, to, i): 88 super().__init__(ufunc, casting, from_, to) 89 self.out_i = i 90 91 def __str__(self): 92 # only show the number if more than one output exists 93 i_str = "{} ".format(self.out_i) if self.ufunc.nout != 1 else "" 94 return ( 95 "Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting " 96 "rule {!r}" 97 ).format( 98 self.ufunc.__name__, i_str, self.from_, self.to, self.casting 99 ) 100 101 102 # Exception used in shares_memory() 103 @set_module('numpy') 104 class TooHardError(RuntimeError): 105 pass 106 107 108 @set_module('numpy') 109 class AxisError(ValueError, IndexError): 110 """ Axis supplied was invalid. """ 111 def __init__(self, axis, ndim=None, msg_prefix=None): 112 # single-argument form just delegates to base class 113 if ndim is None and msg_prefix is None: 114 msg = axis 115 116 # do the string formatting here, to save work in the C code 117 else: 118 msg = ("axis {} is out of bounds for array of dimension {}" 119 .format(axis, ndim)) 120 if msg_prefix is not None: 121 msg = "{}: {}".format(msg_prefix, msg) 122 123 super(AxisError, self).__init__(msg) 124 [end of numpy/core/_exceptions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/numpy/core/_exceptions.py b/numpy/core/_exceptions.py --- a/numpy/core/_exceptions.py +++ b/numpy/core/_exceptions.py @@ -121,3 +121,15 @@ msg = "{}: {}".format(msg_prefix, msg) super(AxisError, self).__init__(msg) + + +@_display_as_base +class _ArrayMemoryError(MemoryError): + """ Thrown when an array cannot be allocated""" + def __init__(self, shape, dtype): + self.shape = shape + self.dtype = dtype + + def __str__(self): + return "Unable to allocate array with shape {} and data type {}".format(self.shape, self.dtype) +
{"golden_diff": "diff --git a/numpy/core/_exceptions.py b/numpy/core/_exceptions.py\n--- a/numpy/core/_exceptions.py\n+++ b/numpy/core/_exceptions.py\n@@ -121,3 +121,15 @@\n msg = \"{}: {}\".format(msg_prefix, msg)\n \n super(AxisError, self).__init__(msg)\n+\n+\n+@_display_as_base\n+class _ArrayMemoryError(MemoryError):\n+ \"\"\" Thrown when an array cannot be allocated\"\"\"\n+ def __init__(self, shape, dtype):\n+ self.shape = shape\n+ self.dtype = dtype\n+\n+ def __str__(self):\n+ return \"Unable to allocate array with shape {} and data type {}\".format(self.shape, self.dtype)\n+\n", "issue": "Indicate attempted allocation size when constructing an array gives a MemoryError\nI've seen multiple questions now of the form \"why does this give a MemoryError\" when the reason is the user ended up allocating a bigger array than expected.\r\n\r\nIt would be helpful to include the shape and dtype in the error message.\n", "before_files": [{"content": "\"\"\"\nVarious richly-typed exceptions, that also help us deal with string formatting\nin python where it's easier.\n\nBy putting the formatting in `__str__`, we also avoid paying the cost for\nusers who silence the exceptions.\n\"\"\"\nfrom numpy.core.overrides import set_module\n\ndef _unpack_tuple(tup):\n if len(tup) == 1:\n return tup[0]\n else:\n return tup\n\n\ndef _display_as_base(cls):\n \"\"\"\n A decorator that makes an exception class look like its base.\n\n We use this to hide subclasses that are implementation details - the user\n should catch the base type, which is what the traceback will show them.\n\n Classes decorated with this decorator are subject to removal without a\n deprecation warning.\n \"\"\"\n assert issubclass(cls, Exception)\n cls.__name__ = cls.__base__.__name__\n cls.__qualname__ = cls.__base__.__qualname__\n return cls\n\n\nclass UFuncTypeError(TypeError):\n \"\"\" Base class for all ufunc exceptions \"\"\"\n def __init__(self, ufunc):\n self.ufunc = ufunc\n\n\n@_display_as_base\nclass _UFuncNoLoopError(UFuncTypeError):\n \"\"\" Thrown when a ufunc loop cannot be found \"\"\"\n def __init__(self, ufunc, dtypes):\n super().__init__(ufunc)\n self.dtypes = tuple(dtypes)\n\n def __str__(self):\n return (\n \"ufunc {!r} did not contain a loop with signature matching types \"\n \"{!r} -> {!r}\"\n ).format(\n self.ufunc.__name__,\n _unpack_tuple(self.dtypes[:self.ufunc.nin]),\n _unpack_tuple(self.dtypes[self.ufunc.nin:])\n )\n\n\n@_display_as_base\nclass _UFuncCastingError(UFuncTypeError):\n def __init__(self, ufunc, casting, from_, to):\n super().__init__(ufunc)\n self.casting = casting\n self.from_ = from_\n self.to = to\n\n\n@_display_as_base\nclass _UFuncInputCastingError(_UFuncCastingError):\n \"\"\" Thrown when a ufunc input cannot be casted \"\"\"\n def __init__(self, ufunc, casting, from_, to, i):\n super().__init__(ufunc, casting, from_, to)\n self.in_i = i\n\n def __str__(self):\n # only show the number if more than one input exists\n i_str = \"{} \".format(self.in_i) if self.ufunc.nin != 1 else \"\"\n return (\n \"Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting \"\n \"rule {!r}\"\n ).format(\n self.ufunc.__name__, i_str, self.from_, self.to, self.casting\n )\n\n\n@_display_as_base\nclass _UFuncOutputCastingError(_UFuncCastingError):\n \"\"\" Thrown when a ufunc output cannot be casted \"\"\"\n def __init__(self, ufunc, casting, from_, to, i):\n super().__init__(ufunc, casting, from_, to)\n self.out_i = i\n\n def __str__(self):\n # only show the number if more than one output exists\n i_str = \"{} \".format(self.out_i) if self.ufunc.nout != 1 else \"\"\n return (\n \"Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting \"\n \"rule {!r}\"\n ).format(\n self.ufunc.__name__, i_str, self.from_, self.to, self.casting\n )\n\n\n# Exception used in shares_memory()\n@set_module('numpy')\nclass TooHardError(RuntimeError):\n pass\n\n\n@set_module('numpy')\nclass AxisError(ValueError, IndexError):\n \"\"\" Axis supplied was invalid. \"\"\"\n def __init__(self, axis, ndim=None, msg_prefix=None):\n # single-argument form just delegates to base class\n if ndim is None and msg_prefix is None:\n msg = axis\n\n # do the string formatting here, to save work in the C code\n else:\n msg = (\"axis {} is out of bounds for array of dimension {}\"\n .format(axis, ndim))\n if msg_prefix is not None:\n msg = \"{}: {}\".format(msg_prefix, msg)\n\n super(AxisError, self).__init__(msg)\n", "path": "numpy/core/_exceptions.py"}]}
1,824
166
gh_patches_debug_12594
rasdani/github-patches
git_diff
optuna__optuna-1600
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update Kubernetes example of "simple" to avoid potential errors https://github.com/optuna/optuna/blob/a5a55a6354b3c60dd7e3a08adcbc3818e90599f0/examples/kubernetes/simple/sklearn_distributed.py#L45-L54 Like https://github.com/optuna/optuna/pull/1536, we should create a study before running the script. </issue> <code> [start of examples/kubernetes/simple/sklearn_distributed.py] 1 """ 2 Optuna distributed optimization example that optimizes 3 an sklearn classifier for the Iris dataset on Kubernetes. 4 5 This example's code is mostly the same as the sklearn_simple.py example, except for two things: 6 7 1 - It gives a name to the study and sets load_if_exists to True 8 in order to avoid errors when the code is run from multiple workers. 9 10 2 - It sets the storage address to the postgres pod deployed with the workers. 11 12 """ 13 import os 14 15 import sklearn.datasets 16 import sklearn.ensemble 17 import sklearn.model_selection 18 import sklearn.svm 19 20 import optuna 21 22 23 # FYI: Objective functions can take additional arguments 24 # (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args). 25 def objective(trial): 26 iris = sklearn.datasets.load_iris() 27 x, y = iris.data, iris.target 28 29 classifier_name = trial.suggest_categorical("classifier", ["SVC", "RandomForest"]) 30 if classifier_name == "SVC": 31 svc_c = trial.suggest_float("svc_c", 1e-10, 1e10, log=True) 32 classifier_obj = sklearn.svm.SVC(C=svc_c, gamma="auto") 33 else: 34 rf_max_depth = trial.suggest_int("rf_max_depth", 2, 32, log=True) 35 classifier_obj = sklearn.ensemble.RandomForestClassifier( 36 max_depth=rf_max_depth, n_estimators=10 37 ) 38 39 score = sklearn.model_selection.cross_val_score(classifier_obj, x, y, n_jobs=-1, cv=3) 40 accuracy = score.mean() 41 return accuracy 42 43 44 if __name__ == "__main__": 45 study = optuna.create_study( 46 direction="maximize", 47 study_name="kubernetes", 48 storage="postgresql://{}:{}@postgres:5432/{}".format( 49 os.environ["POSTGRES_USER"], 50 os.environ["POSTGRES_PASSWORD"], 51 os.environ["POSTGRES_DB"], 52 ), 53 load_if_exists=True, 54 ) 55 study.optimize(objective, n_trials=20) 56 print(study.best_trial) 57 [end of examples/kubernetes/simple/sklearn_distributed.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/kubernetes/simple/sklearn_distributed.py b/examples/kubernetes/simple/sklearn_distributed.py --- a/examples/kubernetes/simple/sklearn_distributed.py +++ b/examples/kubernetes/simple/sklearn_distributed.py @@ -42,15 +42,13 @@ if __name__ == "__main__": - study = optuna.create_study( - direction="maximize", + study = optuna.load_study( study_name="kubernetes", storage="postgresql://{}:{}@postgres:5432/{}".format( os.environ["POSTGRES_USER"], os.environ["POSTGRES_PASSWORD"], os.environ["POSTGRES_DB"], ), - load_if_exists=True, ) study.optimize(objective, n_trials=20) print(study.best_trial)
{"golden_diff": "diff --git a/examples/kubernetes/simple/sklearn_distributed.py b/examples/kubernetes/simple/sklearn_distributed.py\n--- a/examples/kubernetes/simple/sklearn_distributed.py\n+++ b/examples/kubernetes/simple/sklearn_distributed.py\n@@ -42,15 +42,13 @@\n \n \n if __name__ == \"__main__\":\n- study = optuna.create_study(\n- direction=\"maximize\",\n+ study = optuna.load_study(\n study_name=\"kubernetes\",\n storage=\"postgresql://{}:{}@postgres:5432/{}\".format(\n os.environ[\"POSTGRES_USER\"],\n os.environ[\"POSTGRES_PASSWORD\"],\n os.environ[\"POSTGRES_DB\"],\n ),\n- load_if_exists=True,\n )\n study.optimize(objective, n_trials=20)\n print(study.best_trial)\n", "issue": "Update Kubernetes example of \"simple\" to avoid potential errors\nhttps://github.com/optuna/optuna/blob/a5a55a6354b3c60dd7e3a08adcbc3818e90599f0/examples/kubernetes/simple/sklearn_distributed.py#L45-L54\r\n\r\nLike https://github.com/optuna/optuna/pull/1536, we should create a study before running the script.\n", "before_files": [{"content": "\"\"\"\nOptuna distributed optimization example that optimizes\nan sklearn classifier for the Iris dataset on Kubernetes.\n\nThis example's code is mostly the same as the sklearn_simple.py example, except for two things:\n\n1 - It gives a name to the study and sets load_if_exists to True\nin order to avoid errors when the code is run from multiple workers.\n\n2 - It sets the storage address to the postgres pod deployed with the workers.\n\n\"\"\"\nimport os\n\nimport sklearn.datasets\nimport sklearn.ensemble\nimport sklearn.model_selection\nimport sklearn.svm\n\nimport optuna\n\n\n# FYI: Objective functions can take additional arguments\n# (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).\ndef objective(trial):\n iris = sklearn.datasets.load_iris()\n x, y = iris.data, iris.target\n\n classifier_name = trial.suggest_categorical(\"classifier\", [\"SVC\", \"RandomForest\"])\n if classifier_name == \"SVC\":\n svc_c = trial.suggest_float(\"svc_c\", 1e-10, 1e10, log=True)\n classifier_obj = sklearn.svm.SVC(C=svc_c, gamma=\"auto\")\n else:\n rf_max_depth = trial.suggest_int(\"rf_max_depth\", 2, 32, log=True)\n classifier_obj = sklearn.ensemble.RandomForestClassifier(\n max_depth=rf_max_depth, n_estimators=10\n )\n\n score = sklearn.model_selection.cross_val_score(classifier_obj, x, y, n_jobs=-1, cv=3)\n accuracy = score.mean()\n return accuracy\n\n\nif __name__ == \"__main__\":\n study = optuna.create_study(\n direction=\"maximize\",\n study_name=\"kubernetes\",\n storage=\"postgresql://{}:{}@postgres:5432/{}\".format(\n os.environ[\"POSTGRES_USER\"],\n os.environ[\"POSTGRES_PASSWORD\"],\n os.environ[\"POSTGRES_DB\"],\n ),\n load_if_exists=True,\n )\n study.optimize(objective, n_trials=20)\n print(study.best_trial)\n", "path": "examples/kubernetes/simple/sklearn_distributed.py"}]}
1,203
176
gh_patches_debug_1285
rasdani/github-patches
git_diff
openai__gym-2633
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Bug Report] Empty print version warning **Describe the bug** When I import gym, there's an empty line printed. It's because of this line: https://github.com/openai/gym/blob/master/gym/__init__.py#L30 Either it's a bug, because `notice` shouldn't be an empty string, or the check should be `if notice:` which is false for both `None` and `""` (empty string). Currently it's cluttering the logs at best, or masking some other issue. **Code example** ```python import gym ``` **System Info** Describe the characteristic of your environment: Latest gym installed from pip, Ubuntu 20.04, Python 3.9.7 ### Checklist - [x] I have checked that there is no similar [issue](https://github.com/openai/gym/issues) in the repo (**required**) </issue> <code> [start of gym/__init__.py] 1 from gym import error 2 from gym.version import VERSION as __version__ 3 4 from gym.core import ( 5 Env, 6 Wrapper, 7 ObservationWrapper, 8 ActionWrapper, 9 RewardWrapper, 10 ) 11 from gym.spaces import Space 12 from gym.envs import make, spec, register 13 from gym import logger 14 from gym import vector 15 from gym import wrappers 16 import os 17 18 19 __all__ = ["Env", "Space", "Wrapper", "make", "spec", "register"] 20 21 os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "hide" 22 23 try: 24 import gym_notices.notices as notices 25 import sys 26 27 # print version warning if necessary 28 notice = notices.notices.get(__version__) 29 if notice is not None: 30 print(notice, file=sys.stderr) 31 32 except Exception: # nosec 33 pass 34 [end of gym/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gym/__init__.py b/gym/__init__.py --- a/gym/__init__.py +++ b/gym/__init__.py @@ -26,7 +26,7 @@ # print version warning if necessary notice = notices.notices.get(__version__) - if notice is not None: + if notice: print(notice, file=sys.stderr) except Exception: # nosec
{"golden_diff": "diff --git a/gym/__init__.py b/gym/__init__.py\n--- a/gym/__init__.py\n+++ b/gym/__init__.py\n@@ -26,7 +26,7 @@\n \n # print version warning if necessary\n notice = notices.notices.get(__version__)\n- if notice is not None:\n+ if notice:\n print(notice, file=sys.stderr)\n \n except Exception: # nosec\n", "issue": "[Bug Report] Empty print version warning \n**Describe the bug**\r\nWhen I import gym, there's an empty line printed.\r\nIt's because of this line: https://github.com/openai/gym/blob/master/gym/__init__.py#L30\r\n\r\nEither it's a bug, because `notice` shouldn't be an empty string, or the check should be `if notice:` which is false for both `None` and `\"\"` (empty string).\r\n\r\nCurrently it's cluttering the logs at best, or masking some other issue.\r\n\r\n**Code example**\r\n```python\r\nimport gym\r\n```\r\n\r\n**System Info**\r\nDescribe the characteristic of your environment:\r\nLatest gym installed from pip, Ubuntu 20.04, Python 3.9.7\r\n\r\n\r\n### Checklist\r\n\r\n- [x] I have checked that there is no similar [issue](https://github.com/openai/gym/issues) in the repo (**required**)\r\n\n", "before_files": [{"content": "from gym import error\nfrom gym.version import VERSION as __version__\n\nfrom gym.core import (\n Env,\n Wrapper,\n ObservationWrapper,\n ActionWrapper,\n RewardWrapper,\n)\nfrom gym.spaces import Space\nfrom gym.envs import make, spec, register\nfrom gym import logger\nfrom gym import vector\nfrom gym import wrappers\nimport os\n\n\n__all__ = [\"Env\", \"Space\", \"Wrapper\", \"make\", \"spec\", \"register\"]\n\nos.environ[\"PYGAME_HIDE_SUPPORT_PROMPT\"] = \"hide\"\n\ntry:\n import gym_notices.notices as notices\n import sys\n\n # print version warning if necessary\n notice = notices.notices.get(__version__)\n if notice is not None:\n print(notice, file=sys.stderr)\n\nexcept Exception: # nosec\n pass\n", "path": "gym/__init__.py"}]}
967
99
gh_patches_debug_15758
rasdani/github-patches
git_diff
plone__Products.CMFPlone-3133
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> mail_password form crashes if user is not found Visiting /mali_password without providing a userid will simply crash. And if mail_password_form is submitted and a user is not found, or the user doesn't have an email configured, it will crash as well. </issue> <code> [start of Products/CMFPlone/browser/login/mail_password.py] 1 # -*- coding: utf-8 -*- 2 from AccessControl import Unauthorized 3 from Products.CMFPlone import PloneMessageFactory as _ 4 from Products.Five import BrowserView 5 from Products.statusmessages.interfaces import IStatusMessage 6 7 8 class MailPasswordView(BrowserView): 9 10 def __call__(self): 11 try: 12 response = self.context.portal_registration.mailPassword( 13 self.request.form.get('userid', ''), 14 self.request, 15 ) 16 except ValueError as e: 17 try: 18 msg = _(str(e)) 19 except Unauthorized: 20 # If we are not allowed to tell the user, what is wrong, he 21 # should get an error message and contact the admins 22 raise e 23 IStatusMessage(self.request).add(msg) 24 self.request.response.redirect( 25 self.context.absolute_url() + 'mail_password_form' 26 ) 27 return response 28 [end of Products/CMFPlone/browser/login/mail_password.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/Products/CMFPlone/browser/login/mail_password.py b/Products/CMFPlone/browser/login/mail_password.py --- a/Products/CMFPlone/browser/login/mail_password.py +++ b/Products/CMFPlone/browser/login/mail_password.py @@ -8,6 +8,7 @@ class MailPasswordView(BrowserView): def __call__(self): + response = None try: response = self.context.portal_registration.mailPassword( self.request.form.get('userid', ''), @@ -22,6 +23,6 @@ raise e IStatusMessage(self.request).add(msg) self.request.response.redirect( - self.context.absolute_url() + 'mail_password_form' + self.context.absolute_url() + '/mail_password_form' ) return response
{"golden_diff": "diff --git a/Products/CMFPlone/browser/login/mail_password.py b/Products/CMFPlone/browser/login/mail_password.py\n--- a/Products/CMFPlone/browser/login/mail_password.py\n+++ b/Products/CMFPlone/browser/login/mail_password.py\n@@ -8,6 +8,7 @@\n class MailPasswordView(BrowserView):\n \n def __call__(self):\n+ response = None\n try:\n response = self.context.portal_registration.mailPassword(\n self.request.form.get('userid', ''),\n@@ -22,6 +23,6 @@\n raise e\n IStatusMessage(self.request).add(msg)\n self.request.response.redirect(\n- self.context.absolute_url() + 'mail_password_form'\n+ self.context.absolute_url() + '/mail_password_form'\n )\n return response\n", "issue": "mail_password form crashes if user is not found\nVisiting /mali_password without providing a userid will simply crash. And if mail_password_form is submitted and a user is not found, or the user doesn't have an email configured, it will crash as well.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom AccessControl import Unauthorized\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.Five import BrowserView\nfrom Products.statusmessages.interfaces import IStatusMessage\n\n\nclass MailPasswordView(BrowserView):\n\n def __call__(self):\n try:\n response = self.context.portal_registration.mailPassword(\n self.request.form.get('userid', ''),\n self.request,\n )\n except ValueError as e:\n try:\n msg = _(str(e))\n except Unauthorized:\n # If we are not allowed to tell the user, what is wrong, he\n # should get an error message and contact the admins\n raise e\n IStatusMessage(self.request).add(msg)\n self.request.response.redirect(\n self.context.absolute_url() + 'mail_password_form'\n )\n return response\n", "path": "Products/CMFPlone/browser/login/mail_password.py"}]}
828
179
gh_patches_debug_13309
rasdani/github-patches
git_diff
HypothesisWorks__hypothesis-1524
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> note should print when verbosity is set to at least verbose We have the `note` function that prints its argument only on the final example, but actually it should probably print it for *every* example when `verbosity >= Verbosity.verbose`, so that people can actually see the tests as they run. </issue> <code> [start of hypothesis-python/src/hypothesis/control.py] 1 # coding=utf-8 2 # 3 # This file is part of Hypothesis, which may be found at 4 # https://github.com/HypothesisWorks/hypothesis-python 5 # 6 # Most of this work is copyright (C) 2013-2018 David R. MacIver 7 # ([email protected]), but it contains contributions by others. See 8 # CONTRIBUTING.rst for a full list of people who may hold copyright, and 9 # consult the git log if you need to determine who owns an individual 10 # contribution. 11 # 12 # This Source Code Form is subject to the terms of the Mozilla Public License, 13 # v. 2.0. If a copy of the MPL was not distributed with this file, You can 14 # obtain one at http://mozilla.org/MPL/2.0/. 15 # 16 # END HEADER 17 18 from __future__ import division, print_function, absolute_import 19 20 import traceback 21 22 from hypothesis.errors import CleanupFailed, InvalidArgument, \ 23 UnsatisfiedAssumption 24 from hypothesis.reporting import report 25 from hypothesis.utils.dynamicvariables import DynamicVariable 26 27 if False: 28 from typing import Any, AnyStr # noqa 29 30 31 def reject(): 32 raise UnsatisfiedAssumption() 33 34 35 def assume(condition): 36 # type: (Any) -> bool 37 """Calling ``assume`` is like an :ref:`assert <python:assert>` that marks 38 the example as bad, rather than failing the test. 39 40 This allows you to specify properties that you *assume* will be 41 true, and let Hypothesis try to avoid similar examples in future. 42 """ 43 if not condition: 44 raise UnsatisfiedAssumption() 45 return True 46 47 48 _current_build_context = DynamicVariable(None) 49 50 51 def current_build_context(): 52 context = _current_build_context.value 53 if context is None: 54 raise InvalidArgument( 55 u'No build context registered') 56 return context 57 58 59 class BuildContext(object): 60 61 def __init__(self, data, is_final=False, close_on_capture=True): 62 self.data = data 63 self.tasks = [] 64 self.is_final = is_final 65 self.close_on_capture = close_on_capture 66 self.close_on_del = False 67 self.notes = [] 68 69 def __enter__(self): 70 self.assign_variable = _current_build_context.with_value(self) 71 self.assign_variable.__enter__() 72 return self 73 74 def __exit__(self, exc_type, exc_value, tb): 75 self.assign_variable.__exit__(exc_type, exc_value, tb) 76 if self.close() and exc_type is None: 77 raise CleanupFailed() 78 79 def local(self): 80 return _current_build_context.with_value(self) 81 82 def close(self): 83 any_failed = False 84 for task in self.tasks: 85 try: 86 task() 87 except BaseException: 88 any_failed = True 89 report(traceback.format_exc()) 90 return any_failed 91 92 93 def cleanup(teardown): 94 """Register a function to be called when the current test has finished 95 executing. Any exceptions thrown in teardown will be printed but not 96 rethrown. 97 98 Inside a test this isn't very interesting, because you can just use 99 a finally block, but note that you can use this inside map, flatmap, 100 etc. in order to e.g. insist that a value is closed at the end. 101 """ 102 context = _current_build_context.value 103 if context is None: 104 raise InvalidArgument( 105 u'Cannot register cleanup outside of build context') 106 context.tasks.append(teardown) 107 108 109 def note(value): 110 # type: (AnyStr) -> None 111 """Report this value in the final execution.""" 112 context = _current_build_context.value 113 if context is None: 114 raise InvalidArgument( 115 'Cannot make notes outside of a test') 116 context.notes.append(value) 117 if context.is_final: 118 report(value) 119 120 121 def event(value): 122 # type: (AnyStr) -> None 123 """Record an event that occurred this test. Statistics on number of test 124 runs with each event will be reported at the end if you run Hypothesis in 125 statistics reporting mode. 126 127 Events should be strings or convertible to them. 128 """ 129 context = _current_build_context.value 130 if context is None: 131 raise InvalidArgument( 132 'Cannot make record events outside of a test') 133 134 if context.data is not None: 135 context.data.note_event(value) 136 [end of hypothesis-python/src/hypothesis/control.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/hypothesis-python/src/hypothesis/control.py b/hypothesis-python/src/hypothesis/control.py --- a/hypothesis-python/src/hypothesis/control.py +++ b/hypothesis-python/src/hypothesis/control.py @@ -19,6 +19,7 @@ import traceback +from hypothesis import Verbosity, settings from hypothesis.errors import CleanupFailed, InvalidArgument, \ UnsatisfiedAssumption from hypothesis.reporting import report @@ -114,7 +115,7 @@ raise InvalidArgument( 'Cannot make notes outside of a test') context.notes.append(value) - if context.is_final: + if context.is_final or settings.default.verbosity >= Verbosity.verbose: report(value)
{"golden_diff": "diff --git a/hypothesis-python/src/hypothesis/control.py b/hypothesis-python/src/hypothesis/control.py\n--- a/hypothesis-python/src/hypothesis/control.py\n+++ b/hypothesis-python/src/hypothesis/control.py\n@@ -19,6 +19,7 @@\n \n import traceback\n \n+from hypothesis import Verbosity, settings\n from hypothesis.errors import CleanupFailed, InvalidArgument, \\\n UnsatisfiedAssumption\n from hypothesis.reporting import report\n@@ -114,7 +115,7 @@\n raise InvalidArgument(\n 'Cannot make notes outside of a test')\n context.notes.append(value)\n- if context.is_final:\n+ if context.is_final or settings.default.verbosity >= Verbosity.verbose:\n report(value)\n", "issue": "note should print when verbosity is set to at least verbose \nWe have the `note` function that prints its argument only on the final example, but actually it should probably print it for *every* example when `verbosity >= Verbosity.verbose`, so that people can actually see the tests as they run. \n", "before_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2018 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport traceback\n\nfrom hypothesis.errors import CleanupFailed, InvalidArgument, \\\n UnsatisfiedAssumption\nfrom hypothesis.reporting import report\nfrom hypothesis.utils.dynamicvariables import DynamicVariable\n\nif False:\n from typing import Any, AnyStr # noqa\n\n\ndef reject():\n raise UnsatisfiedAssumption()\n\n\ndef assume(condition):\n # type: (Any) -> bool\n \"\"\"Calling ``assume`` is like an :ref:`assert <python:assert>` that marks\n the example as bad, rather than failing the test.\n\n This allows you to specify properties that you *assume* will be\n true, and let Hypothesis try to avoid similar examples in future.\n \"\"\"\n if not condition:\n raise UnsatisfiedAssumption()\n return True\n\n\n_current_build_context = DynamicVariable(None)\n\n\ndef current_build_context():\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n u'No build context registered')\n return context\n\n\nclass BuildContext(object):\n\n def __init__(self, data, is_final=False, close_on_capture=True):\n self.data = data\n self.tasks = []\n self.is_final = is_final\n self.close_on_capture = close_on_capture\n self.close_on_del = False\n self.notes = []\n\n def __enter__(self):\n self.assign_variable = _current_build_context.with_value(self)\n self.assign_variable.__enter__()\n return self\n\n def __exit__(self, exc_type, exc_value, tb):\n self.assign_variable.__exit__(exc_type, exc_value, tb)\n if self.close() and exc_type is None:\n raise CleanupFailed()\n\n def local(self):\n return _current_build_context.with_value(self)\n\n def close(self):\n any_failed = False\n for task in self.tasks:\n try:\n task()\n except BaseException:\n any_failed = True\n report(traceback.format_exc())\n return any_failed\n\n\ndef cleanup(teardown):\n \"\"\"Register a function to be called when the current test has finished\n executing. Any exceptions thrown in teardown will be printed but not\n rethrown.\n\n Inside a test this isn't very interesting, because you can just use\n a finally block, but note that you can use this inside map, flatmap,\n etc. in order to e.g. insist that a value is closed at the end.\n \"\"\"\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n u'Cannot register cleanup outside of build context')\n context.tasks.append(teardown)\n\n\ndef note(value):\n # type: (AnyStr) -> None\n \"\"\"Report this value in the final execution.\"\"\"\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n 'Cannot make notes outside of a test')\n context.notes.append(value)\n if context.is_final:\n report(value)\n\n\ndef event(value):\n # type: (AnyStr) -> None\n \"\"\"Record an event that occurred this test. Statistics on number of test\n runs with each event will be reported at the end if you run Hypothesis in\n statistics reporting mode.\n\n Events should be strings or convertible to them.\n \"\"\"\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n 'Cannot make record events outside of a test')\n\n if context.data is not None:\n context.data.note_event(value)\n", "path": "hypothesis-python/src/hypothesis/control.py"}]}
1,847
163
gh_patches_debug_670
rasdani/github-patches
git_diff
pytorch__examples-182
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Division error Training a model for `fast-neural-style` raises a RuntimeError from variable division during input normalization. - python2.7 - torch==0.1.12.post2 - torchvision==0.1.8 ```` Traceback (most recent call last): File "neural_style/neural_style.py", line 226, in <module> main() File "neural_style/neural_style.py", line 220, in main train(args) File "neural_style/neural_style.py", line 65, in train style_v = utils.normalize_batch(style_v) File "/home/paperspace/embro/neural_style/utils.py", line 42, in normalize_batch batch /= Variable(std) File "/usr/local/lib/python2.7/dist-packages/torch/autograd/variable.py", line 793, in __idiv__ return self.div_(other) File "/usr/local/lib/python2.7/dist-packages/torch/autograd/variable.py", line 323, in div_ raise RuntimeError("div_ only supports scalar multiplication") ```` </issue> <code> [start of fast_neural_style/neural_style/utils.py] 1 import torch 2 from PIL import Image 3 from torch.autograd import Variable 4 5 6 def load_image(filename, size=None, scale=None): 7 img = Image.open(filename) 8 if size is not None: 9 img = img.resize((size, size), Image.ANTIALIAS) 10 elif scale is not None: 11 img = img.resize((int(img.size[0] / scale), int(img.size[1] / scale)), Image.ANTIALIAS) 12 return img 13 14 15 def save_image(filename, data): 16 img = data.clone().clamp(0, 255).numpy() 17 img = img.transpose(1, 2, 0).astype("uint8") 18 img = Image.fromarray(img) 19 img.save(filename) 20 21 22 def gram_matrix(y): 23 (b, ch, h, w) = y.size() 24 features = y.view(b, ch, w * h) 25 features_t = features.transpose(1, 2) 26 gram = features.bmm(features_t) / (ch * h * w) 27 return gram 28 29 30 def normalize_batch(batch): 31 # normalize using imagenet mean and std 32 mean = batch.data.new(batch.data.size()) 33 std = batch.data.new(batch.data.size()) 34 mean[:, 0, :, :] = 0.485 35 mean[:, 1, :, :] = 0.456 36 mean[:, 2, :, :] = 0.406 37 std[:, 0, :, :] = 0.229 38 std[:, 1, :, :] = 0.224 39 std[:, 2, :, :] = 0.225 40 batch = torch.div(batch, 255.0) 41 batch -= Variable(mean) 42 batch /= Variable(std) 43 return batch 44 [end of fast_neural_style/neural_style/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/fast_neural_style/neural_style/utils.py b/fast_neural_style/neural_style/utils.py --- a/fast_neural_style/neural_style/utils.py +++ b/fast_neural_style/neural_style/utils.py @@ -39,5 +39,5 @@ std[:, 2, :, :] = 0.225 batch = torch.div(batch, 255.0) batch -= Variable(mean) - batch /= Variable(std) + batch = batch / Variable(std) return batch
{"golden_diff": "diff --git a/fast_neural_style/neural_style/utils.py b/fast_neural_style/neural_style/utils.py\n--- a/fast_neural_style/neural_style/utils.py\n+++ b/fast_neural_style/neural_style/utils.py\n@@ -39,5 +39,5 @@\n std[:, 2, :, :] = 0.225\n batch = torch.div(batch, 255.0)\n batch -= Variable(mean)\n- batch /= Variable(std)\n+ batch = batch / Variable(std)\n return batch\n", "issue": "Division error\nTraining a model for `fast-neural-style` raises a RuntimeError from variable division during input normalization. \r\n\r\n- python2.7\r\n- torch==0.1.12.post2\r\n- torchvision==0.1.8\r\n\r\n````\r\nTraceback (most recent call last):\r\n File \"neural_style/neural_style.py\", line 226, in <module>\r\n main()\r\n File \"neural_style/neural_style.py\", line 220, in main\r\n train(args)\r\n File \"neural_style/neural_style.py\", line 65, in train\r\n style_v = utils.normalize_batch(style_v)\r\n File \"/home/paperspace/embro/neural_style/utils.py\", line 42, in normalize_batch\r\n batch /= Variable(std)\r\n File \"/usr/local/lib/python2.7/dist-packages/torch/autograd/variable.py\", line 793, in __idiv__\r\n return self.div_(other)\r\n File \"/usr/local/lib/python2.7/dist-packages/torch/autograd/variable.py\", line 323, in div_\r\n raise RuntimeError(\"div_ only supports scalar multiplication\")\r\n````\r\n\r\n\r\n\n", "before_files": [{"content": "import torch\nfrom PIL import Image\nfrom torch.autograd import Variable\n\n\ndef load_image(filename, size=None, scale=None):\n img = Image.open(filename)\n if size is not None:\n img = img.resize((size, size), Image.ANTIALIAS)\n elif scale is not None:\n img = img.resize((int(img.size[0] / scale), int(img.size[1] / scale)), Image.ANTIALIAS)\n return img\n\n\ndef save_image(filename, data):\n img = data.clone().clamp(0, 255).numpy()\n img = img.transpose(1, 2, 0).astype(\"uint8\")\n img = Image.fromarray(img)\n img.save(filename)\n\n\ndef gram_matrix(y):\n (b, ch, h, w) = y.size()\n features = y.view(b, ch, w * h)\n features_t = features.transpose(1, 2)\n gram = features.bmm(features_t) / (ch * h * w)\n return gram\n\n\ndef normalize_batch(batch):\n # normalize using imagenet mean and std\n mean = batch.data.new(batch.data.size())\n std = batch.data.new(batch.data.size())\n mean[:, 0, :, :] = 0.485\n mean[:, 1, :, :] = 0.456\n mean[:, 2, :, :] = 0.406\n std[:, 0, :, :] = 0.229\n std[:, 1, :, :] = 0.224\n std[:, 2, :, :] = 0.225\n batch = torch.div(batch, 255.0)\n batch -= Variable(mean)\n batch /= Variable(std)\n return batch\n", "path": "fast_neural_style/neural_style/utils.py"}]}
1,264
117
gh_patches_debug_34378
rasdani/github-patches
git_diff
nipy__nipype-3637
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CSVReader does not handle quoted fields or tsv files ### Summary I want to add functionality to the CSVReader so it can handle tsv files. While checking that out, I noticed that we're just splitting on `,`, which is not ideal. ### Actual behavior Parsing CSV files with fields that contain quoted text with ',' characters does not work correctly. TSV files are not supported. ### Expected behavior Quoted fields should be correctly parsed if they contain the delimiter. ### How to replicate the behavior Run the script below. ### Script/Workflow details ``` from nipype.interfaces import utility reader = utility.CSVReader() lines = ["foo,\"hello,world\",300.1\n"] with open("testcsv.csv", "w") as fid: fid.writelines(lines) fid.flush() reader.inputs.in_file = "testcsv.csv" out = reader.run() assert out.outputs.column_1 == ["hello, world"] ``` ### Platform details: <!-- Please run the following code from your shell and place the output between the triple ticks, below. python -c "import nipype; from pprint import pprint; pprint(nipype.get_info())" --> ``` {'commit_hash': 'c46a957bd', 'commit_source': 'repository', 'networkx_version': '3.2.1', 'nibabel_version': '5.2.1', 'nipype_version': '1.8.7.dev0', 'numpy_version': '1.26.4', 'pkg_path': '/home/USER/Desktop/nipype/nipype', 'scipy_version': '1.12.0', 'sys_executable': '/home/USER/Desktop/nipype/.venv/bin/python', 'sys_platform': 'linux', 'sys_version': '3.9.18 (main, Feb 1 2024, 11:48:51) \n[GCC 11.4.0]', 'traits_version': '6.3.2'} ``` ### Execution environment - My python environment outside container </issue> <code> [start of nipype/interfaces/utility/csv.py] 1 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- 2 # vi: set ft=python sts=4 ts=4 sw=4 et: 3 """CSV Handling utilities 4 """ 5 from ..base import traits, TraitedSpec, DynamicTraitedSpec, File, BaseInterface 6 from ..io import add_traits 7 8 9 class CSVReaderInputSpec(DynamicTraitedSpec, TraitedSpec): 10 in_file = File( 11 exists=True, mandatory=True, desc="Input comma-seperated value (CSV) file" 12 ) 13 header = traits.Bool( 14 False, usedefault=True, desc="True if the first line is a column header" 15 ) 16 17 18 class CSVReader(BaseInterface): 19 """ 20 Examples 21 -------- 22 23 >>> reader = CSVReader() # doctest: +SKIP 24 >>> reader.inputs.in_file = 'noHeader.csv' # doctest: +SKIP 25 >>> out = reader.run() # doctest: +SKIP 26 >>> out.outputs.column_0 == ['foo', 'bar', 'baz'] # doctest: +SKIP 27 True 28 >>> out.outputs.column_1 == ['hello', 'world', 'goodbye'] # doctest: +SKIP 29 True 30 >>> out.outputs.column_2 == ['300.1', '5', '0.3'] # doctest: +SKIP 31 True 32 33 >>> reader = CSVReader() # doctest: +SKIP 34 >>> reader.inputs.in_file = 'header.csv' # doctest: +SKIP 35 >>> reader.inputs.header = True # doctest: +SKIP 36 >>> out = reader.run() # doctest: +SKIP 37 >>> out.outputs.files == ['foo', 'bar', 'baz'] # doctest: +SKIP 38 True 39 >>> out.outputs.labels == ['hello', 'world', 'goodbye'] # doctest: +SKIP 40 True 41 >>> out.outputs.erosion == ['300.1', '5', '0.3'] # doctest: +SKIP 42 True 43 44 """ 45 46 input_spec = CSVReaderInputSpec 47 output_spec = DynamicTraitedSpec 48 _always_run = True 49 50 def _append_entry(self, outputs, entry): 51 for key, value in zip(self._outfields, entry): 52 outputs[key].append(value) 53 return outputs 54 55 def _parse_line(self, line): 56 line = line.replace("\n", "") 57 entry = [x.strip() for x in line.split(",")] 58 return entry 59 60 def _get_outfields(self): 61 with open(self.inputs.in_file) as fid: 62 entry = self._parse_line(fid.readline()) 63 if self.inputs.header: 64 self._outfields = tuple(entry) 65 else: 66 self._outfields = tuple(["column_" + str(x) for x in range(len(entry))]) 67 return self._outfields 68 69 def _run_interface(self, runtime): 70 self._get_outfields() 71 return runtime 72 73 def _outputs(self): 74 return self._add_output_traits(super()._outputs()) 75 76 def _add_output_traits(self, base): 77 return add_traits(base, self._get_outfields()) 78 79 def _list_outputs(self): 80 outputs = self.output_spec().get() 81 isHeader = True 82 for key in self._outfields: 83 outputs[key] = [] # initialize outfields 84 with open(self.inputs.in_file) as fid: 85 for line in fid.readlines(): 86 if self.inputs.header and isHeader: # skip header line 87 isHeader = False 88 continue 89 entry = self._parse_line(line) 90 outputs = self._append_entry(outputs, entry) 91 return outputs 92 [end of nipype/interfaces/utility/csv.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nipype/interfaces/utility/csv.py b/nipype/interfaces/utility/csv.py --- a/nipype/interfaces/utility/csv.py +++ b/nipype/interfaces/utility/csv.py @@ -2,6 +2,7 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """CSV Handling utilities """ +import csv from ..base import traits, TraitedSpec, DynamicTraitedSpec, File, BaseInterface from ..io import add_traits @@ -13,6 +14,7 @@ header = traits.Bool( False, usedefault=True, desc="True if the first line is a column header" ) + delimiter = traits.String(",", usedefault=True, desc="Delimiter to use.") class CSVReader(BaseInterface): @@ -52,14 +54,11 @@ outputs[key].append(value) return outputs - def _parse_line(self, line): - line = line.replace("\n", "") - entry = [x.strip() for x in line.split(",")] - return entry - def _get_outfields(self): with open(self.inputs.in_file) as fid: - entry = self._parse_line(fid.readline()) + reader = csv.reader(fid, delimiter=self.inputs.delimiter) + + entry = next(reader) if self.inputs.header: self._outfields = tuple(entry) else: @@ -82,10 +81,10 @@ for key in self._outfields: outputs[key] = [] # initialize outfields with open(self.inputs.in_file) as fid: - for line in fid.readlines(): + reader = csv.reader(fid, delimiter=self.inputs.delimiter) + for entry in reader: if self.inputs.header and isHeader: # skip header line isHeader = False continue - entry = self._parse_line(line) outputs = self._append_entry(outputs, entry) return outputs
{"golden_diff": "diff --git a/nipype/interfaces/utility/csv.py b/nipype/interfaces/utility/csv.py\n--- a/nipype/interfaces/utility/csv.py\n+++ b/nipype/interfaces/utility/csv.py\n@@ -2,6 +2,7 @@\n # vi: set ft=python sts=4 ts=4 sw=4 et:\n \"\"\"CSV Handling utilities\n \"\"\"\n+import csv\n from ..base import traits, TraitedSpec, DynamicTraitedSpec, File, BaseInterface\n from ..io import add_traits\n \n@@ -13,6 +14,7 @@\n header = traits.Bool(\n False, usedefault=True, desc=\"True if the first line is a column header\"\n )\n+ delimiter = traits.String(\",\", usedefault=True, desc=\"Delimiter to use.\")\n \n \n class CSVReader(BaseInterface):\n@@ -52,14 +54,11 @@\n outputs[key].append(value)\n return outputs\n \n- def _parse_line(self, line):\n- line = line.replace(\"\\n\", \"\")\n- entry = [x.strip() for x in line.split(\",\")]\n- return entry\n-\n def _get_outfields(self):\n with open(self.inputs.in_file) as fid:\n- entry = self._parse_line(fid.readline())\n+ reader = csv.reader(fid, delimiter=self.inputs.delimiter)\n+\n+ entry = next(reader)\n if self.inputs.header:\n self._outfields = tuple(entry)\n else:\n@@ -82,10 +81,10 @@\n for key in self._outfields:\n outputs[key] = [] # initialize outfields\n with open(self.inputs.in_file) as fid:\n- for line in fid.readlines():\n+ reader = csv.reader(fid, delimiter=self.inputs.delimiter)\n+ for entry in reader:\n if self.inputs.header and isHeader: # skip header line\n isHeader = False\n continue\n- entry = self._parse_line(line)\n outputs = self._append_entry(outputs, entry)\n return outputs\n", "issue": "CSVReader does not handle quoted fields or tsv files\n### Summary\r\nI want to add functionality to the CSVReader so it can handle tsv files. While checking that out, I noticed that we're just splitting on `,`, which is not ideal.\r\n\r\n### Actual behavior\r\nParsing CSV files with fields that contain quoted text with ',' characters does not work correctly. TSV files are not supported.\r\n\r\n\r\n### Expected behavior\r\nQuoted fields should be correctly parsed if they contain the delimiter.\r\n\r\n### How to replicate the behavior\r\nRun the script below.\r\n\r\n### Script/Workflow details\r\n\r\n```\r\nfrom nipype.interfaces import utility\r\n\r\nreader = utility.CSVReader()\r\n\r\nlines = [\"foo,\\\"hello,world\\\",300.1\\n\"]\r\n\r\nwith open(\"testcsv.csv\", \"w\") as fid:\r\n fid.writelines(lines)\r\n fid.flush()\r\n\r\nreader.inputs.in_file = \"testcsv.csv\"\r\nout = reader.run()\r\n\r\nassert out.outputs.column_1 == [\"hello, world\"]\r\n```\r\n\r\n### Platform details:\r\n\r\n<!-- Please run the following code from your shell and place the output between the triple ticks, below.\r\npython -c \"import nipype; from pprint import pprint; pprint(nipype.get_info())\"\r\n-->\r\n\r\n```\r\n{'commit_hash': 'c46a957bd',\r\n 'commit_source': 'repository',\r\n 'networkx_version': '3.2.1',\r\n 'nibabel_version': '5.2.1',\r\n 'nipype_version': '1.8.7.dev0',\r\n 'numpy_version': '1.26.4',\r\n 'pkg_path': '/home/USER/Desktop/nipype/nipype',\r\n 'scipy_version': '1.12.0',\r\n 'sys_executable': '/home/USER/Desktop/nipype/.venv/bin/python',\r\n 'sys_platform': 'linux',\r\n 'sys_version': '3.9.18 (main, Feb 1 2024, 11:48:51) \\n[GCC 11.4.0]',\r\n 'traits_version': '6.3.2'}\r\n\r\n```\r\n\r\n### Execution environment\r\n- My python environment outside container\r\n\n", "before_files": [{"content": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"CSV Handling utilities\n\"\"\"\nfrom ..base import traits, TraitedSpec, DynamicTraitedSpec, File, BaseInterface\nfrom ..io import add_traits\n\n\nclass CSVReaderInputSpec(DynamicTraitedSpec, TraitedSpec):\n in_file = File(\n exists=True, mandatory=True, desc=\"Input comma-seperated value (CSV) file\"\n )\n header = traits.Bool(\n False, usedefault=True, desc=\"True if the first line is a column header\"\n )\n\n\nclass CSVReader(BaseInterface):\n \"\"\"\n Examples\n --------\n\n >>> reader = CSVReader() # doctest: +SKIP\n >>> reader.inputs.in_file = 'noHeader.csv' # doctest: +SKIP\n >>> out = reader.run() # doctest: +SKIP\n >>> out.outputs.column_0 == ['foo', 'bar', 'baz'] # doctest: +SKIP\n True\n >>> out.outputs.column_1 == ['hello', 'world', 'goodbye'] # doctest: +SKIP\n True\n >>> out.outputs.column_2 == ['300.1', '5', '0.3'] # doctest: +SKIP\n True\n\n >>> reader = CSVReader() # doctest: +SKIP\n >>> reader.inputs.in_file = 'header.csv' # doctest: +SKIP\n >>> reader.inputs.header = True # doctest: +SKIP\n >>> out = reader.run() # doctest: +SKIP\n >>> out.outputs.files == ['foo', 'bar', 'baz'] # doctest: +SKIP\n True\n >>> out.outputs.labels == ['hello', 'world', 'goodbye'] # doctest: +SKIP\n True\n >>> out.outputs.erosion == ['300.1', '5', '0.3'] # doctest: +SKIP\n True\n\n \"\"\"\n\n input_spec = CSVReaderInputSpec\n output_spec = DynamicTraitedSpec\n _always_run = True\n\n def _append_entry(self, outputs, entry):\n for key, value in zip(self._outfields, entry):\n outputs[key].append(value)\n return outputs\n\n def _parse_line(self, line):\n line = line.replace(\"\\n\", \"\")\n entry = [x.strip() for x in line.split(\",\")]\n return entry\n\n def _get_outfields(self):\n with open(self.inputs.in_file) as fid:\n entry = self._parse_line(fid.readline())\n if self.inputs.header:\n self._outfields = tuple(entry)\n else:\n self._outfields = tuple([\"column_\" + str(x) for x in range(len(entry))])\n return self._outfields\n\n def _run_interface(self, runtime):\n self._get_outfields()\n return runtime\n\n def _outputs(self):\n return self._add_output_traits(super()._outputs())\n\n def _add_output_traits(self, base):\n return add_traits(base, self._get_outfields())\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n isHeader = True\n for key in self._outfields:\n outputs[key] = [] # initialize outfields\n with open(self.inputs.in_file) as fid:\n for line in fid.readlines():\n if self.inputs.header and isHeader: # skip header line\n isHeader = False\n continue\n entry = self._parse_line(line)\n outputs = self._append_entry(outputs, entry)\n return outputs\n", "path": "nipype/interfaces/utility/csv.py"}]}
1,968
429
gh_patches_debug_22105
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-4902
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Protect against web/api 5xx responses When a build is attempted, but a web/api instance throws a 5xx response, a number of strange behaviors can result: - Builds will get stuck in triggered state - Builds will fail randomly when updating the api fails - 5xx responses will be returned from the web servers to users Part of the resolution to this may be defensive protection around intermittent 5xx responses. This may take some operation changes as well though, as our load balancer should really assume a 5xx response is enough to dislodge the server from the lb group. Raised from #2255 </issue> <code> [start of readthedocs/restapi/client.py] 1 # -*- coding: utf-8 -*- 2 3 """Simple client to access our API with Slumber credentials.""" 4 5 from __future__ import ( 6 absolute_import, 7 division, 8 print_function, 9 unicode_literals, 10 ) 11 12 import logging 13 14 import requests 15 from django.conf import settings 16 from requests_toolbelt.adapters import host_header_ssl 17 from rest_framework.renderers import JSONRenderer 18 from slumber import API, serialize 19 20 log = logging.getLogger(__name__) 21 22 PRODUCTION_DOMAIN = getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org') 23 API_HOST = getattr(settings, 'SLUMBER_API_HOST', 'https://readthedocs.org') 24 USER = getattr(settings, 'SLUMBER_USERNAME', None) 25 PASS = getattr(settings, 'SLUMBER_PASSWORD', None) 26 27 28 class DrfJsonSerializer(serialize.JsonSerializer): 29 30 """Additional serialization help from the DRF renderer""" 31 32 key = 'json-drf' 33 34 def dumps(self, data): 35 """Used to be able to render datetime objects.""" 36 return JSONRenderer().render(data) 37 38 39 def setup_api(): 40 session = requests.Session() 41 if API_HOST.startswith('https'): 42 # Only use the HostHeaderSSLAdapter for HTTPS connections 43 adapter_class = host_header_ssl.HostHeaderSSLAdapter 44 else: 45 adapter_class = requests.adapters.HTTPAdapter 46 47 session.mount( 48 API_HOST, 49 adapter_class(max_retries=3), 50 ) 51 session.headers.update({'Host': PRODUCTION_DOMAIN}) 52 api_config = { 53 'base_url': '%s/api/v2/' % API_HOST, 54 'serializer': serialize.Serializer( 55 default='json-drf', 56 serializers=[ 57 serialize.JsonSerializer(), 58 DrfJsonSerializer(), 59 ], 60 ), 61 'session': session, 62 } 63 if USER and PASS: 64 log.debug( 65 'Using slumber v2 with user %s, pointed at %s', 66 USER, 67 API_HOST, 68 ) 69 session.auth = (USER, PASS) 70 else: 71 log.warning('SLUMBER_USERNAME/PASSWORD settings are not set') 72 return API(**api_config) 73 74 75 api = setup_api() 76 [end of readthedocs/restapi/client.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/readthedocs/restapi/client.py b/readthedocs/restapi/client.py --- a/readthedocs/restapi/client.py +++ b/readthedocs/restapi/client.py @@ -13,6 +13,7 @@ import requests from django.conf import settings +from requests.packages.urllib3.util.retry import Retry # noqa from requests_toolbelt.adapters import host_header_ssl from rest_framework.renderers import JSONRenderer from slumber import API, serialize @@ -44,9 +45,21 @@ else: adapter_class = requests.adapters.HTTPAdapter + # Define a retry mechanism trying to attempt to not fail in the first + # error. Builders hit this issue frequently because the webs are high loaded + retry = Retry( + total=3, + read=3, + connect=3, + status=3, + backoff_factor=0.5, # 0.5, 1, 2 seconds + method_whitelist=('GET', 'PUT', 'PATCH', 'POST'), + status_forcelist=(408, 413, 429, 500, 502, 503, 504), + ) + session.mount( API_HOST, - adapter_class(max_retries=3), + adapter_class(max_retries=retry), ) session.headers.update({'Host': PRODUCTION_DOMAIN}) api_config = {
{"golden_diff": "diff --git a/readthedocs/restapi/client.py b/readthedocs/restapi/client.py\n--- a/readthedocs/restapi/client.py\n+++ b/readthedocs/restapi/client.py\n@@ -13,6 +13,7 @@\n \n import requests\n from django.conf import settings\n+from requests.packages.urllib3.util.retry import Retry # noqa\n from requests_toolbelt.adapters import host_header_ssl\n from rest_framework.renderers import JSONRenderer\n from slumber import API, serialize\n@@ -44,9 +45,21 @@\n else:\n adapter_class = requests.adapters.HTTPAdapter\n \n+ # Define a retry mechanism trying to attempt to not fail in the first\n+ # error. Builders hit this issue frequently because the webs are high loaded\n+ retry = Retry(\n+ total=3,\n+ read=3,\n+ connect=3,\n+ status=3,\n+ backoff_factor=0.5, # 0.5, 1, 2 seconds\n+ method_whitelist=('GET', 'PUT', 'PATCH', 'POST'),\n+ status_forcelist=(408, 413, 429, 500, 502, 503, 504),\n+ )\n+\n session.mount(\n API_HOST,\n- adapter_class(max_retries=3),\n+ adapter_class(max_retries=retry),\n )\n session.headers.update({'Host': PRODUCTION_DOMAIN})\n api_config = {\n", "issue": "Protect against web/api 5xx responses\nWhen a build is attempted, but a web/api instance throws a 5xx response, a number of strange behaviors can result:\n- Builds will get stuck in triggered state\n- Builds will fail randomly when updating the api fails\n- 5xx responses will be returned from the web servers to users\n\nPart of the resolution to this may be defensive protection around intermittent 5xx responses. This may take some operation changes as well though, as our load balancer should really assume a 5xx response is enough to dislodge the server from the lb group.\n\nRaised from #2255 \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Simple client to access our API with Slumber credentials.\"\"\"\n\nfrom __future__ import (\n absolute_import,\n division,\n print_function,\n unicode_literals,\n)\n\nimport logging\n\nimport requests\nfrom django.conf import settings\nfrom requests_toolbelt.adapters import host_header_ssl\nfrom rest_framework.renderers import JSONRenderer\nfrom slumber import API, serialize\n\nlog = logging.getLogger(__name__)\n\nPRODUCTION_DOMAIN = getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org')\nAPI_HOST = getattr(settings, 'SLUMBER_API_HOST', 'https://readthedocs.org')\nUSER = getattr(settings, 'SLUMBER_USERNAME', None)\nPASS = getattr(settings, 'SLUMBER_PASSWORD', None)\n\n\nclass DrfJsonSerializer(serialize.JsonSerializer):\n\n \"\"\"Additional serialization help from the DRF renderer\"\"\"\n\n key = 'json-drf'\n\n def dumps(self, data):\n \"\"\"Used to be able to render datetime objects.\"\"\"\n return JSONRenderer().render(data)\n\n\ndef setup_api():\n session = requests.Session()\n if API_HOST.startswith('https'):\n # Only use the HostHeaderSSLAdapter for HTTPS connections\n adapter_class = host_header_ssl.HostHeaderSSLAdapter\n else:\n adapter_class = requests.adapters.HTTPAdapter\n\n session.mount(\n API_HOST,\n adapter_class(max_retries=3),\n )\n session.headers.update({'Host': PRODUCTION_DOMAIN})\n api_config = {\n 'base_url': '%s/api/v2/' % API_HOST,\n 'serializer': serialize.Serializer(\n default='json-drf',\n serializers=[\n serialize.JsonSerializer(),\n DrfJsonSerializer(),\n ],\n ),\n 'session': session,\n }\n if USER and PASS:\n log.debug(\n 'Using slumber v2 with user %s, pointed at %s',\n USER,\n API_HOST,\n )\n session.auth = (USER, PASS)\n else:\n log.warning('SLUMBER_USERNAME/PASSWORD settings are not set')\n return API(**api_config)\n\n\napi = setup_api()\n", "path": "readthedocs/restapi/client.py"}]}
1,258
327
gh_patches_debug_6546
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-2645
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spider texas_roadhouse is broken During the global build at 2021-08-11-14-42-19, spider **texas_roadhouse** failed with **0 features** and **0 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/logs/texas_roadhouse.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/output/texas_roadhouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/output/texas_roadhouse.geojson)) </issue> <code> [start of locations/spiders/texas_roadhouse.py] 1 import json 2 import scrapy 3 from locations.items import GeojsonPointItem 4 from locations.hours import OpeningHours 5 6 7 class TexasRoadhouseSpider(scrapy.Spider): 8 name = "texas_roadhouse" 9 item_attributes = { 'brand': "Texas Roadhouse", 'brand_wikidata': "Q7707945" } 10 allowed_domains = ["www.texasroadhouse.com"] 11 start_urls = ( 12 'https://www.texasroadhouse.com/sitemap.xml', 13 ) 14 15 def parse_hours(self, store_hours): 16 opening_hours = OpeningHours() 17 18 for weekday in store_hours: 19 # convert day from full Monday to Mo, etc 20 day = weekday.get('day')[:2] 21 open_time = weekday.get('hours').get('openTime') 22 close_time = weekday.get('hours').get('closeTime') 23 opening_hours.add_range(day=day, 24 open_time=open_time, 25 close_time=close_time, 26 time_format='%I:%M%p') 27 28 return opening_hours.as_opening_hours() 29 30 def parse(self, response): 31 response.selector.remove_namespaces() 32 city_urls = response.xpath('//url/loc/text()').extract() 33 for path in city_urls: 34 if path.startswith('https://www.texasroadhouse.com/locations/'): 35 yield scrapy.Request( 36 path.strip(), 37 callback=self.parse_store, 38 ) 39 40 def parse_store(self, response): 41 data = json.loads(response.xpath('//script/text()').extract_first()[22:-1]) 42 43 properties = { 44 'lat': data['latitude'], 45 'lon': data['longitude'], 46 'ref': data['url'], 47 'name': data['name'], 48 'addr_full': data['address1'], 49 'city': data['city'], 50 'state': data['state'], 51 'postcode': data['postalCode'], 52 'country': data['countryCode'], 53 'phone': data['telephone'], 54 'website': response.urljoin(data['url']), 55 'opening_hours': self.parse_hours(data['schedule']), 56 } 57 58 yield GeojsonPointItem(**properties) 59 [end of locations/spiders/texas_roadhouse.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/texas_roadhouse.py b/locations/spiders/texas_roadhouse.py --- a/locations/spiders/texas_roadhouse.py +++ b/locations/spiders/texas_roadhouse.py @@ -6,7 +6,10 @@ class TexasRoadhouseSpider(scrapy.Spider): name = "texas_roadhouse" - item_attributes = { 'brand': "Texas Roadhouse", 'brand_wikidata': "Q7707945" } + item_attributes = { + 'brand': "Texas Roadhouse", + 'brand_wikidata': "Q7707945", + } allowed_domains = ["www.texasroadhouse.com"] start_urls = ( 'https://www.texasroadhouse.com/sitemap.xml',
{"golden_diff": "diff --git a/locations/spiders/texas_roadhouse.py b/locations/spiders/texas_roadhouse.py\n--- a/locations/spiders/texas_roadhouse.py\n+++ b/locations/spiders/texas_roadhouse.py\n@@ -6,7 +6,10 @@\n \n class TexasRoadhouseSpider(scrapy.Spider):\n name = \"texas_roadhouse\"\n- item_attributes = { 'brand': \"Texas Roadhouse\", 'brand_wikidata': \"Q7707945\" }\n+ item_attributes = {\n+ 'brand': \"Texas Roadhouse\",\n+ 'brand_wikidata': \"Q7707945\",\n+ }\n allowed_domains = [\"www.texasroadhouse.com\"]\n start_urls = (\n 'https://www.texasroadhouse.com/sitemap.xml',\n", "issue": "Spider texas_roadhouse is broken\nDuring the global build at 2021-08-11-14-42-19, spider **texas_roadhouse** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/logs/texas_roadhouse.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/output/texas_roadhouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/output/texas_roadhouse.geojson))\n", "before_files": [{"content": "import json\nimport scrapy\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass TexasRoadhouseSpider(scrapy.Spider):\n name = \"texas_roadhouse\"\n item_attributes = { 'brand': \"Texas Roadhouse\", 'brand_wikidata': \"Q7707945\" }\n allowed_domains = [\"www.texasroadhouse.com\"]\n start_urls = (\n 'https://www.texasroadhouse.com/sitemap.xml',\n )\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n\n for weekday in store_hours:\n # convert day from full Monday to Mo, etc\n day = weekday.get('day')[:2]\n open_time = weekday.get('hours').get('openTime')\n close_time = weekday.get('hours').get('closeTime')\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%I:%M%p')\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n for path in city_urls:\n if path.startswith('https://www.texasroadhouse.com/locations/'):\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n\n def parse_store(self, response):\n data = json.loads(response.xpath('//script/text()').extract_first()[22:-1])\n\n properties = {\n 'lat': data['latitude'],\n 'lon': data['longitude'],\n 'ref': data['url'],\n 'name': data['name'],\n 'addr_full': data['address1'],\n 'city': data['city'],\n 'state': data['state'],\n 'postcode': data['postalCode'],\n 'country': data['countryCode'],\n 'phone': data['telephone'],\n 'website': response.urljoin(data['url']),\n 'opening_hours': self.parse_hours(data['schedule']),\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/texas_roadhouse.py"}]}
1,295
184
gh_patches_debug_4587
rasdani/github-patches
git_diff
ipython__ipython-11722
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Missing ship numpy testing decorator ``` Traceback (most recent call last): File "/Users/mbussonnier/dev/cpython/test/bin/iptest", line 6, in <module> from IPython.testing.iptestcontroller import main File "/Users/mbussonnier/dev/cpython/test/lib/python3.8/site-packages/IPython/testing/iptestcontroller.py", line 23, in <module> from .iptest import ( File "/Users/mbussonnier/dev/cpython/test/lib/python3.8/site-packages/IPython/testing/iptest.py", line 40, in <module> from IPython.external.decorators import KnownFailure, dec ImportError: cannot import name 'dec' from 'IPython.external.decorators' (/Users/mbussonnier/dev/cpython/test/lib/python3.8/site-packages/IPython/external/decorators/__init__.py) ``` Seem like `dec` is not define ini out `_decorator.py` Appologies for shortness boarding a plane </issue> <code> [start of IPython/external/decorators/__init__.py] 1 try: 2 from numpy.testing import * 3 from numpy.testing import dec 4 from numpy.testing.noseclasses import KnownFailure 5 except ImportError: 6 from ._decorators import * 7 try: 8 from ._numpy_testing_noseclasses import KnownFailure 9 except ImportError: 10 pass 11 [end of IPython/external/decorators/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/IPython/external/decorators/__init__.py b/IPython/external/decorators/__init__.py --- a/IPython/external/decorators/__init__.py +++ b/IPython/external/decorators/__init__.py @@ -1,9 +1,7 @@ try: - from numpy.testing import * - from numpy.testing import dec - from numpy.testing.noseclasses import KnownFailure + from numpy.testing.noseclasses import KnownFailure, knownfailureif except ImportError: - from ._decorators import * + from ._decorators import knownfailureif try: from ._numpy_testing_noseclasses import KnownFailure except ImportError:
{"golden_diff": "diff --git a/IPython/external/decorators/__init__.py b/IPython/external/decorators/__init__.py\n--- a/IPython/external/decorators/__init__.py\n+++ b/IPython/external/decorators/__init__.py\n@@ -1,9 +1,7 @@\n try:\n- from numpy.testing import *\n- from numpy.testing import dec\n- from numpy.testing.noseclasses import KnownFailure\n+ from numpy.testing.noseclasses import KnownFailure, knownfailureif\n except ImportError:\n- from ._decorators import *\n+ from ._decorators import knownfailureif\n try:\n from ._numpy_testing_noseclasses import KnownFailure\n except ImportError:\n", "issue": "Missing ship numpy testing decorator\n```\r\nTraceback (most recent call last):\r\n File \"/Users/mbussonnier/dev/cpython/test/bin/iptest\", line 6, in <module>\r\n from IPython.testing.iptestcontroller import main\r\n File \"/Users/mbussonnier/dev/cpython/test/lib/python3.8/site-packages/IPython/testing/iptestcontroller.py\", line 23, in <module>\r\n from .iptest import (\r\n File \"/Users/mbussonnier/dev/cpython/test/lib/python3.8/site-packages/IPython/testing/iptest.py\", line 40, in <module>\r\n from IPython.external.decorators import KnownFailure, dec\r\nImportError: cannot import name 'dec' from 'IPython.external.decorators' (/Users/mbussonnier/dev/cpython/test/lib/python3.8/site-packages/IPython/external/decorators/__init__.py)\r\n```\r\n\r\nSeem like `dec` is not define ini out `_decorator.py` \r\n\r\n\r\nAppologies for shortness boarding a plane\n", "before_files": [{"content": "try:\n from numpy.testing import *\n from numpy.testing import dec\n from numpy.testing.noseclasses import KnownFailure\nexcept ImportError:\n from ._decorators import *\n try:\n from ._numpy_testing_noseclasses import KnownFailure\n except ImportError:\n pass\n", "path": "IPython/external/decorators/__init__.py"}]}
840
151
gh_patches_debug_17485
rasdani/github-patches
git_diff
kymatio__kymatio-183
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [MAINT] AttributeError: module 'kymatio' has no attribute '__version__' This came up in #172 PEP 396 has introduced the convention of exposing the constant `__version__` to the top-level module At the moment this is in `kymatio.version.version` (so that `version.py` can be loaded in `setup.py` without importing the whole `kymatio` module) https://www.python.org/dev/peps/pep-0396/ Will submit a PR later </issue> <code> [start of kymatio/__init__.py] 1 __all__ = [ 2 'Scattering3D', 3 'Scattering2D', 4 'Scattering1D' 5 ] 6 7 from .scattering2d.scattering2d import Scattering2D 8 from .scattering1d.scattering1d import Scattering1D 9 from .scattering3d.scattering3d import Scattering3D 10 11 # Make sure that DeprecationWarning within this package always gets printed 12 ### Snippet copied from sklearn.__init__ 13 import warnings 14 import re 15 warnings.filterwarnings('always', category=DeprecationWarning, 16 module=r'^{0}.*'.format(re.escape(__name__))) 17 ### End Snippet 18 19 20 [end of kymatio/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kymatio/__init__.py b/kymatio/__init__.py --- a/kymatio/__init__.py +++ b/kymatio/__init__.py @@ -1,13 +1,3 @@ -__all__ = [ - 'Scattering3D', - 'Scattering2D', - 'Scattering1D' - ] - -from .scattering2d.scattering2d import Scattering2D -from .scattering1d.scattering1d import Scattering1D -from .scattering3d.scattering3d import Scattering3D - # Make sure that DeprecationWarning within this package always gets printed ### Snippet copied from sklearn.__init__ import warnings @@ -16,4 +6,14 @@ module=r'^{0}.*'.format(re.escape(__name__))) ### End Snippet +__all__ = [ + 'Scattering1D', + 'Scattering2D', + 'Scattering3D' + ] + +from .scattering1d.scattering1d import Scattering1D +from .scattering2d.scattering2d import Scattering2D +from .scattering3d.scattering3d import Scattering3D +from .version import version as __version__
{"golden_diff": "diff --git a/kymatio/__init__.py b/kymatio/__init__.py\n--- a/kymatio/__init__.py\n+++ b/kymatio/__init__.py\n@@ -1,13 +1,3 @@\n-__all__ = [\n- 'Scattering3D',\n- 'Scattering2D',\n- 'Scattering1D'\n- ]\n-\n-from .scattering2d.scattering2d import Scattering2D\n-from .scattering1d.scattering1d import Scattering1D\n-from .scattering3d.scattering3d import Scattering3D\n-\n # Make sure that DeprecationWarning within this package always gets printed\n ### Snippet copied from sklearn.__init__\n import warnings\n@@ -16,4 +6,14 @@\n module=r'^{0}.*'.format(re.escape(__name__)))\n ### End Snippet\n \n+__all__ = [\n+ 'Scattering1D',\n+ 'Scattering2D',\n+ 'Scattering3D'\n+ ]\n+\n+from .scattering1d.scattering1d import Scattering1D\n+from .scattering2d.scattering2d import Scattering2D\n+from .scattering3d.scattering3d import Scattering3D\n \n+from .version import version as __version__\n", "issue": "[MAINT] AttributeError: module 'kymatio' has no attribute '__version__'\nThis came up in #172\r\n\r\nPEP 396 has introduced the convention of exposing the constant `__version__` to the top-level module\r\nAt the moment this is in `kymatio.version.version` (so that `version.py` can be loaded in `setup.py` without importing the whole `kymatio` module)\r\nhttps://www.python.org/dev/peps/pep-0396/\r\n\r\nWill submit a PR later\r\n\n", "before_files": [{"content": "__all__ = [\n 'Scattering3D',\n 'Scattering2D',\n 'Scattering1D'\n ]\n\nfrom .scattering2d.scattering2d import Scattering2D\nfrom .scattering1d.scattering1d import Scattering1D\nfrom .scattering3d.scattering3d import Scattering3D\n\n# Make sure that DeprecationWarning within this package always gets printed\n### Snippet copied from sklearn.__init__\nimport warnings\nimport re\nwarnings.filterwarnings('always', category=DeprecationWarning,\n module=r'^{0}.*'.format(re.escape(__name__)))\n### End Snippet\n\n\n", "path": "kymatio/__init__.py"}]}
832
290
gh_patches_debug_17251
rasdani/github-patches
git_diff
strawberry-graphql__strawberry-183
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ASGI app broken with asyncio Our ASGI app is currently broken with async io stuff. The implementation I made needs a bit of love :) We might have a look at how Ariadne is doing this :) Here's a test that fails: ```python import typing import pytest import strawberry from starlette.testclient import TestClient from strawberry.asgi import GraphQL @pytest.fixture def async_schema(): @strawberry.type class Query: @strawberry.field async def hello(self, info, name: typing.Optional[str] = None) -> str: return f"Hello {name or 'world'}" return strawberry.Schema(Query) @pytest.fixture def test_client(async_schema): app = GraphQL(async_schema) return TestClient(app) def test_simple_query(schema, test_client): response = test_client.post("/", json={"query": "{ hello }"}) assert response.json() == {"data": {"hello": "Hello world"}} ``` </issue> <code> [start of strawberry/graphql.py] 1 import typing 2 3 from graphql import ( 4 ExecutionResult, 5 GraphQLError, 6 GraphQLSchema, 7 execute as graphql_excute, 8 parse, 9 ) 10 from graphql.subscription import subscribe as graphql_subscribe 11 from graphql.type import validate_schema 12 from graphql.validation import validate 13 14 from .middleware import DirectivesMiddleware 15 16 17 async def execute( 18 schema: GraphQLSchema, 19 query: str, 20 context_value: typing.Any = None, 21 variable_values: typing.Dict[str, typing.Any] = None, 22 operation_name: str = None, 23 ): 24 schema_validation_errors = validate_schema(schema) 25 if schema_validation_errors: 26 return ExecutionResult(data=None, errors=schema_validation_errors) 27 28 try: 29 document = parse(query) 30 except GraphQLError as error: 31 return ExecutionResult(data=None, errors=[error]) 32 except Exception as error: 33 error = GraphQLError(str(error), original_error=error) 34 return ExecutionResult(data=None, errors=[error]) 35 36 validation_errors = validate(schema, document) 37 38 if validation_errors: 39 return ExecutionResult(data=None, errors=validation_errors) 40 41 return graphql_excute( 42 schema, 43 parse(query), 44 middleware=[DirectivesMiddleware()], 45 variable_values=variable_values, 46 operation_name=operation_name, 47 context_value=context_value, 48 ) 49 50 51 async def subscribe( 52 schema: GraphQLSchema, 53 query: str, 54 root_value: typing.Any = None, 55 context_value: typing.Any = None, 56 variable_values: typing.Dict[str, typing.Any] = None, 57 operation_name: str = None, 58 ) -> typing.Union[typing.AsyncIterator[ExecutionResult], ExecutionResult]: 59 document = parse(query) 60 61 return await graphql_subscribe( 62 schema=schema, 63 document=document, 64 root_value=root_value, 65 context_value=context_value, 66 variable_values=variable_values, 67 operation_name=operation_name, 68 ) 69 [end of strawberry/graphql.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/strawberry/graphql.py b/strawberry/graphql.py --- a/strawberry/graphql.py +++ b/strawberry/graphql.py @@ -1,4 +1,5 @@ import typing +from inspect import isawaitable from graphql import ( ExecutionResult, @@ -38,7 +39,7 @@ if validation_errors: return ExecutionResult(data=None, errors=validation_errors) - return graphql_excute( + result = graphql_excute( schema, parse(query), middleware=[DirectivesMiddleware()], @@ -46,6 +47,9 @@ operation_name=operation_name, context_value=context_value, ) + if isawaitable(result): + result = await typing.cast(typing.Awaitable[ExecutionResult], result) + return result async def subscribe(
{"golden_diff": "diff --git a/strawberry/graphql.py b/strawberry/graphql.py\n--- a/strawberry/graphql.py\n+++ b/strawberry/graphql.py\n@@ -1,4 +1,5 @@\n import typing\n+from inspect import isawaitable\n \n from graphql import (\n ExecutionResult,\n@@ -38,7 +39,7 @@\n if validation_errors:\n return ExecutionResult(data=None, errors=validation_errors)\n \n- return graphql_excute(\n+ result = graphql_excute(\n schema,\n parse(query),\n middleware=[DirectivesMiddleware()],\n@@ -46,6 +47,9 @@\n operation_name=operation_name,\n context_value=context_value,\n )\n+ if isawaitable(result):\n+ result = await typing.cast(typing.Awaitable[ExecutionResult], result)\n+ return result\n \n \n async def subscribe(\n", "issue": "ASGI app broken with asyncio\nOur ASGI app is currently broken with async io stuff. The implementation I made needs a bit of love :) We might have a look at how Ariadne is doing this :)\r\n\r\nHere's a test that fails:\r\n\r\n```python\r\nimport typing\r\n\r\nimport pytest\r\n\r\nimport strawberry\r\nfrom starlette.testclient import TestClient\r\nfrom strawberry.asgi import GraphQL\r\n\r\n\r\[email protected]\r\ndef async_schema():\r\n @strawberry.type\r\n class Query:\r\n @strawberry.field\r\n async def hello(self, info, name: typing.Optional[str] = None) -> str:\r\n return f\"Hello {name or 'world'}\"\r\n\r\n return strawberry.Schema(Query)\r\n\r\n\r\[email protected]\r\ndef test_client(async_schema):\r\n app = GraphQL(async_schema)\r\n\r\n return TestClient(app)\r\n\r\n\r\ndef test_simple_query(schema, test_client):\r\n response = test_client.post(\"/\", json={\"query\": \"{ hello }\"})\r\n\r\n assert response.json() == {\"data\": {\"hello\": \"Hello world\"}}\r\n```\n", "before_files": [{"content": "import typing\n\nfrom graphql import (\n ExecutionResult,\n GraphQLError,\n GraphQLSchema,\n execute as graphql_excute,\n parse,\n)\nfrom graphql.subscription import subscribe as graphql_subscribe\nfrom graphql.type import validate_schema\nfrom graphql.validation import validate\n\nfrom .middleware import DirectivesMiddleware\n\n\nasync def execute(\n schema: GraphQLSchema,\n query: str,\n context_value: typing.Any = None,\n variable_values: typing.Dict[str, typing.Any] = None,\n operation_name: str = None,\n):\n schema_validation_errors = validate_schema(schema)\n if schema_validation_errors:\n return ExecutionResult(data=None, errors=schema_validation_errors)\n\n try:\n document = parse(query)\n except GraphQLError as error:\n return ExecutionResult(data=None, errors=[error])\n except Exception as error:\n error = GraphQLError(str(error), original_error=error)\n return ExecutionResult(data=None, errors=[error])\n\n validation_errors = validate(schema, document)\n\n if validation_errors:\n return ExecutionResult(data=None, errors=validation_errors)\n\n return graphql_excute(\n schema,\n parse(query),\n middleware=[DirectivesMiddleware()],\n variable_values=variable_values,\n operation_name=operation_name,\n context_value=context_value,\n )\n\n\nasync def subscribe(\n schema: GraphQLSchema,\n query: str,\n root_value: typing.Any = None,\n context_value: typing.Any = None,\n variable_values: typing.Dict[str, typing.Any] = None,\n operation_name: str = None,\n) -> typing.Union[typing.AsyncIterator[ExecutionResult], ExecutionResult]:\n document = parse(query)\n\n return await graphql_subscribe(\n schema=schema,\n document=document,\n root_value=root_value,\n context_value=context_value,\n variable_values=variable_values,\n operation_name=operation_name,\n )\n", "path": "strawberry/graphql.py"}]}
1,263
188
gh_patches_debug_31569
rasdani/github-patches
git_diff
joke2k__faker-74
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Honor Environment LANG I am currently using a wrapper for fake-factory to be able to choose the output but it would be great it would become part of fake-factory core. This is the script i have in my path: https://gist.github.com/makefu/9101269 usage: $ LANG=de_DE.utf-8 faker address Davide-Kaul-Weg 175 94892 Königs Wusterhausen </issue> <code> [start of faker/__main__.py] 1 from __future__ import unicode_literals 2 from __future__ import print_function 3 import sys 4 5 if sys.version < '3': 6 text_type = unicode 7 binary_type = str 8 else: 9 text_type = str 10 binary_type = bytes 11 12 13 def print_provider(doc, provider, formatters, excludes=None): 14 if excludes is None: 15 excludes = [] 16 17 print() 18 print("### faker.providers.{0}".format(doc.get_provider_name(provider))) 19 print() 20 21 for signature, example in formatters.items(): 22 if signature in excludes: 23 continue 24 try: 25 lines = text_type(example).expandtabs().splitlines() 26 except UnicodeEncodeError: 27 raise Exception('error on "{0}" with value "{1}"'.format(signature, example)) 28 margin = max(30, doc.max_name_len+1) 29 remains = 150 - margin 30 separator = '#' 31 for line in lines: 32 for i in range(0, (len(line) // remains) + 1): 33 print("\t{fake:<{margin}}{separator} {example}".format( 34 fake=signature, 35 separator=separator, 36 example=line[i*remains:(i+1)*remains], 37 margin=margin 38 )) 39 signature = separator = ' ' 40 41 42 def main(provider_or_field=None, *args): 43 from faker import Faker, Factory, documentor, DEFAULT_LOCALE, AVAILABLE_LOCALES 44 fake = Faker(locale=DEFAULT_LOCALE) 45 46 from faker.providers import BaseProvider 47 base_provider_formatters = [f for f in dir(BaseProvider)] 48 49 if provider_or_field: 50 if '.' in provider_or_field: 51 parts = provider_or_field.split('.') 52 locale = parts[-2] if parts[-2] in AVAILABLE_LOCALES else DEFAULT_LOCALE 53 fake = Factory.create(locale, providers=[parts[-1]]) 54 doc = documentor.Documentor(fake) 55 doc.already_generated = base_provider_formatters 56 print_provider(doc, fake.get_providers()[0], doc.get_provider_formatters(fake.get_providers()[0])) 57 else: 58 try: 59 print(fake.format(provider_or_field, *args)) 60 except AttributeError: 61 print('No faker found for "{0}({1})"'.format(provider_or_field, args)) 62 63 else: 64 doc = documentor.Documentor(fake) 65 66 formatters = doc.get_formatters(with_args=True, with_defaults=True) 67 68 for provider, fakers in formatters: 69 70 print_provider(doc, provider, fakers) 71 72 for lang in AVAILABLE_LOCALES: 73 if lang == DEFAULT_LOCALE: 74 continue 75 print() 76 print('## LANGUAGE {0}'.format(lang)) 77 fake = Faker(locale=lang) 78 d = documentor.Documentor(fake) 79 80 for p, fs in d.get_formatters(with_args=True, with_defaults=True, locale=lang, 81 excludes=base_provider_formatters): 82 print_provider(d, p, fs) 83 84 85 if __name__ == "__main__": 86 if sys.stdout.encoding is None: 87 print("please set python env PYTHONIOENCODING=UTF-8, example: " 88 "export PYTHONIOENCODING=UTF-8, when write to stdout", file=sys.stderr) 89 exit(1) 90 main(*sys.argv[1:]) [end of faker/__main__.py] [start of setup.py] 1 #!/usr/bin/env python 2 3 import os 4 from setuptools import setup, find_packages 5 6 here = os.path.abspath(os.path.dirname(__file__)) 7 README = open(os.path.join(here, 'README.md')).read() 8 NEWS = open(os.path.join(here, 'NEWS.txt')).read() 9 10 11 version = '0.3.2' 12 13 setup(name='fake-factory', 14 version=version, 15 description="Faker is a Python package that generates fake data for you.", 16 long_description=README + '\n\n' + NEWS, 17 classifiers=[ 18 # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers 19 'Development Status :: 3 - Alpha', 20 'Environment :: Console', 21 'Intended Audience :: Developers', 22 'Programming Language :: Python', 23 'Programming Language :: Python :: 2', 24 'Programming Language :: Python :: 3', 25 'Topic :: Software Development :: Libraries :: Python Modules', 26 'Topic :: Software Development :: Testing', 27 'Topic :: Utilities', 28 'License :: OSI Approved :: MIT License' 29 ], 30 keywords='faker fixtures data test mock generator', 31 author='joke2k', 32 author_email='[email protected]', 33 url='http://github.com/joke2k/faker', 34 license='MIT License', 35 packages=find_packages(exclude=['*.tests']), 36 platforms=["any"], 37 test_suite='faker.tests', 38 ) 39 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/faker/__main__.py b/faker/__main__.py --- a/faker/__main__.py +++ b/faker/__main__.py @@ -1,5 +1,9 @@ from __future__ import unicode_literals from __future__ import print_function + +from faker import Faker, Factory, documentor, AVAILABLE_LOCALES, DEFAULT_LOCALE + +import os import sys if sys.version < '3': @@ -40,7 +44,6 @@ def main(provider_or_field=None, *args): - from faker import Faker, Factory, documentor, DEFAULT_LOCALE, AVAILABLE_LOCALES fake = Faker(locale=DEFAULT_LOCALE) from faker.providers import BaseProvider @@ -82,6 +85,28 @@ print_provider(d, p, fs) +def command(*args): + try: + f = args[0] + except IndexError: + f = '--help' + + if f in ["--help", "-h"]: + main() + sys.exit() + + locale = os.environ['LANG'].split('.')[0] + + if locale not in AVAILABLE_LOCALES: + locale = DEFAULT_LOCALE + fake = Faker(locale=locale) + + try: + print(fake.format(f, *args[1:])) + except AttributeError: + print('No faker found for "{0}"'.format(f)) + + if __name__ == "__main__": if sys.stdout.encoding is None: print("please set python env PYTHONIOENCODING=UTF-8, example: " diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -14,6 +14,7 @@ version=version, description="Faker is a Python package that generates fake data for you.", long_description=README + '\n\n' + NEWS, + scripts=['bin/faker'], classifiers=[ # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers 'Development Status :: 3 - Alpha',
{"golden_diff": "diff --git a/faker/__main__.py b/faker/__main__.py\n--- a/faker/__main__.py\n+++ b/faker/__main__.py\n@@ -1,5 +1,9 @@\n from __future__ import unicode_literals\n from __future__ import print_function\n+\n+from faker import Faker, Factory, documentor, AVAILABLE_LOCALES, DEFAULT_LOCALE\n+\n+import os\n import sys\n \n if sys.version < '3':\n@@ -40,7 +44,6 @@\n \n \n def main(provider_or_field=None, *args):\n- from faker import Faker, Factory, documentor, DEFAULT_LOCALE, AVAILABLE_LOCALES\n fake = Faker(locale=DEFAULT_LOCALE)\n \n from faker.providers import BaseProvider\n@@ -82,6 +85,28 @@\n print_provider(d, p, fs)\n \n \n+def command(*args):\n+ try:\n+ f = args[0]\n+ except IndexError:\n+ f = '--help'\n+\n+ if f in [\"--help\", \"-h\"]:\n+ main()\n+ sys.exit()\n+\n+ locale = os.environ['LANG'].split('.')[0]\n+ \n+ if locale not in AVAILABLE_LOCALES:\n+ locale = DEFAULT_LOCALE\n+ fake = Faker(locale=locale)\n+\n+ try:\n+ print(fake.format(f, *args[1:]))\n+ except AttributeError:\n+ print('No faker found for \"{0}\"'.format(f))\n+\n+\n if __name__ == \"__main__\":\n if sys.stdout.encoding is None:\n print(\"please set python env PYTHONIOENCODING=UTF-8, example: \"\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,6 +14,7 @@\n version=version,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README + '\\n\\n' + NEWS,\n+ scripts=['bin/faker'],\n classifiers=[\n # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n", "issue": "Honor Environment LANG\nI am currently using a wrapper for fake-factory to be able to choose the output but it would be great it would become part of fake-factory core.\n\nThis is the script i have in my path: https://gist.github.com/makefu/9101269\n\nusage: \n$ LANG=de_DE.utf-8 faker address\n Davide-Kaul-Weg 175\n 94892 K\u00f6nigs Wusterhausen\n\n", "before_files": [{"content": "from __future__ import unicode_literals\nfrom __future__ import print_function\nimport sys\n\nif sys.version < '3':\n text_type = unicode\n binary_type = str\nelse:\n text_type = str\n binary_type = bytes\n\n\ndef print_provider(doc, provider, formatters, excludes=None):\n if excludes is None:\n excludes = []\n\n print()\n print(\"### faker.providers.{0}\".format(doc.get_provider_name(provider)))\n print()\n\n for signature, example in formatters.items():\n if signature in excludes:\n continue\n try:\n lines = text_type(example).expandtabs().splitlines()\n except UnicodeEncodeError:\n raise Exception('error on \"{0}\" with value \"{1}\"'.format(signature, example))\n margin = max(30, doc.max_name_len+1)\n remains = 150 - margin\n separator = '#'\n for line in lines:\n for i in range(0, (len(line) // remains) + 1):\n print(\"\\t{fake:<{margin}}{separator} {example}\".format(\n fake=signature,\n separator=separator,\n example=line[i*remains:(i+1)*remains],\n margin=margin\n ))\n signature = separator = ' '\n\n\ndef main(provider_or_field=None, *args):\n from faker import Faker, Factory, documentor, DEFAULT_LOCALE, AVAILABLE_LOCALES\n fake = Faker(locale=DEFAULT_LOCALE)\n\n from faker.providers import BaseProvider\n base_provider_formatters = [f for f in dir(BaseProvider)]\n\n if provider_or_field:\n if '.' in provider_or_field:\n parts = provider_or_field.split('.')\n locale = parts[-2] if parts[-2] in AVAILABLE_LOCALES else DEFAULT_LOCALE\n fake = Factory.create(locale, providers=[parts[-1]])\n doc = documentor.Documentor(fake)\n doc.already_generated = base_provider_formatters\n print_provider(doc, fake.get_providers()[0], doc.get_provider_formatters(fake.get_providers()[0]))\n else:\n try:\n print(fake.format(provider_or_field, *args))\n except AttributeError:\n print('No faker found for \"{0}({1})\"'.format(provider_or_field, args))\n\n else:\n doc = documentor.Documentor(fake)\n\n formatters = doc.get_formatters(with_args=True, with_defaults=True)\n\n for provider, fakers in formatters:\n\n print_provider(doc, provider, fakers)\n\n for lang in AVAILABLE_LOCALES:\n if lang == DEFAULT_LOCALE:\n continue\n print()\n print('## LANGUAGE {0}'.format(lang))\n fake = Faker(locale=lang)\n d = documentor.Documentor(fake)\n\n for p, fs in d.get_formatters(with_args=True, with_defaults=True, locale=lang,\n excludes=base_provider_formatters):\n print_provider(d, p, fs)\n\n\nif __name__ == \"__main__\":\n if sys.stdout.encoding is None:\n print(\"please set python env PYTHONIOENCODING=UTF-8, example: \"\n \"export PYTHONIOENCODING=UTF-8, when write to stdout\", file=sys.stderr)\n exit(1)\n main(*sys.argv[1:])", "path": "faker/__main__.py"}, {"content": "#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(here, 'README.md')).read()\nNEWS = open(os.path.join(here, 'NEWS.txt')).read()\n\n\nversion = '0.3.2'\n\nsetup(name='fake-factory',\n version=version,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README + '\\n\\n' + NEWS,\n classifiers=[\n # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License'\n ],\n keywords='faker fixtures data test mock generator',\n author='joke2k',\n author_email='[email protected]',\n url='http://github.com/joke2k/faker',\n license='MIT License',\n packages=find_packages(exclude=['*.tests']),\n platforms=[\"any\"],\n test_suite='faker.tests',\n )\n", "path": "setup.py"}]}
1,892
465
gh_patches_debug_35374
rasdani/github-patches
git_diff
googleapis__google-cloud-python-3156
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Language: support mention type in Entity.mentions. [Currently](https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/language/google/cloud/language/entity.py#L79) the mentions property of an entity is only a list of strings whereas it should be a list of objects containing the mention text and mention type. Furthermore, this change should add mention_type information to the mention documentation. </issue> <code> [start of language/google/cloud/language/entity.py] 1 # Copyright 2016-2017 Google Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Definition for Google Cloud Natural Language API entities. 16 17 An entity is used to describe a proper name extracted from text. 18 """ 19 20 21 class EntityType(object): 22 """List of possible entity types.""" 23 24 UNKNOWN = 'UNKNOWN' 25 """Unknown entity type.""" 26 27 PERSON = 'PERSON' 28 """Person entity type.""" 29 30 LOCATION = 'LOCATION' 31 """Location entity type.""" 32 33 ORGANIZATION = 'ORGANIZATION' 34 """Organization entity type.""" 35 36 EVENT = 'EVENT' 37 """Event entity type.""" 38 39 WORK_OF_ART = 'WORK_OF_ART' 40 """Work of art entity type.""" 41 42 CONSUMER_GOOD = 'CONSUMER_GOOD' 43 """Consumer good entity type.""" 44 45 OTHER = 'OTHER' 46 """Other entity type (i.e. known but not classified).""" 47 48 49 class Entity(object): 50 """A Google Cloud Natural Language API entity. 51 52 Represents a phrase in text that is a known entity, such as a person, 53 an organization, or location. The API associates information, such as 54 salience and mentions, with entities. 55 56 .. _Entity message: https://cloud.google.com/natural-language/\ 57 reference/rest/v1/Entity 58 .. _EntityType enum: https://cloud.google.com/natural-language/\ 59 reference/rest/v1/Entity#Type 60 61 See `Entity message`_. 62 63 :type name: str 64 :param name: The name / phrase identified as the entity. 65 66 :type entity_type: str 67 :param entity_type: The type of the entity. See `EntityType enum`_. 68 69 :type metadata: dict 70 :param metadata: The metadata associated with the entity. 71 Wikipedia URLs and Knowledge Graph MIDs are 72 provided, if available. The associated keys are 73 "wikipedia_url" and "mid", respectively. 74 75 :type salience: float 76 :param salience: The prominence of the entity / phrase within the text 77 containing it. 78 79 :type mentions: list 80 :param mentions: List of strings that mention the entity. 81 """ 82 83 def __init__(self, name, entity_type, metadata, salience, mentions): 84 self.name = name 85 self.entity_type = entity_type 86 self.metadata = metadata 87 self.salience = salience 88 self.mentions = mentions 89 90 @classmethod 91 def from_api_repr(cls, payload): 92 """Convert an Entity from the JSON API into an :class:`Entity`. 93 94 :param payload: dict 95 :type payload: The value from the backend. 96 97 :rtype: :class:`Entity` 98 :returns: The entity parsed from the API representation. 99 """ 100 name = payload['name'] 101 entity_type = payload['type'] 102 metadata = payload['metadata'] 103 salience = payload['salience'] 104 mentions = [value['text']['content'] 105 for value in payload['mentions']] 106 return cls(name, entity_type, metadata, salience, mentions) 107 [end of language/google/cloud/language/entity.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/language/google/cloud/language/entity.py b/language/google/cloud/language/entity.py --- a/language/google/cloud/language/entity.py +++ b/language/google/cloud/language/entity.py @@ -46,6 +46,80 @@ """Other entity type (i.e. known but not classified).""" +class MentionType(object): + """List of possible mention types.""" + + TYPE_UNKNOWN = 'TYPE_UNKNOWN' + """Unknown mention type""" + + PROPER = 'PROPER' + """Proper name""" + + COMMON = 'COMMON' + """Common noun (or noun compound)""" + + +class Mention(object): + """A Google Cloud Natural Language API mention. + + Represents a mention for an entity in the text. Currently, proper noun + mentions are supported. + """ + def __init__(self, text, mention_type): + self.text = text + self.mention_type = mention_type + + def __str__(self): + return str(self.text) + + @classmethod + def from_api_repr(cls, payload): + """Convert a Mention from the JSON API into an :class:`Mention`. + + :param payload: dict + :type payload: The value from the backend. + + :rtype: :class:`Mention` + :returns: The mention parsed from the API representation. + """ + text = TextSpan.from_api_repr(payload['text']) + mention_type = payload['type'] + return cls(text, mention_type) + + +class TextSpan(object): + """A span of text from Google Cloud Natural Language API. + + Represents a word or phrase of text, as well as its offset + from the original document. + """ + def __init__(self, content, begin_offset): + self.content = content + self.begin_offset = begin_offset + + def __str__(self): + """Return the string representation of this TextSpan. + + :rtype: str + :returns: The text content + """ + return self.content + + @classmethod + def from_api_repr(cls, payload): + """Convert a TextSpan from the JSON API into an :class:`TextSpan`. + + :param payload: dict + :type payload: The value from the backend. + + :rtype: :class:`TextSpan` + :returns: The text span parsed from the API representation. + """ + content = payload['content'] + begin_offset = payload['beginOffset'] + return cls(content=content, begin_offset=begin_offset) + + class Entity(object): """A Google Cloud Natural Language API entity. @@ -101,6 +175,5 @@ entity_type = payload['type'] metadata = payload['metadata'] salience = payload['salience'] - mentions = [value['text']['content'] - for value in payload['mentions']] + mentions = [Mention.from_api_repr(val) for val in payload['mentions']] return cls(name, entity_type, metadata, salience, mentions)
{"golden_diff": "diff --git a/language/google/cloud/language/entity.py b/language/google/cloud/language/entity.py\n--- a/language/google/cloud/language/entity.py\n+++ b/language/google/cloud/language/entity.py\n@@ -46,6 +46,80 @@\n \"\"\"Other entity type (i.e. known but not classified).\"\"\"\n \n \n+class MentionType(object):\n+ \"\"\"List of possible mention types.\"\"\"\n+\n+ TYPE_UNKNOWN = 'TYPE_UNKNOWN'\n+ \"\"\"Unknown mention type\"\"\"\n+\n+ PROPER = 'PROPER'\n+ \"\"\"Proper name\"\"\"\n+\n+ COMMON = 'COMMON'\n+ \"\"\"Common noun (or noun compound)\"\"\"\n+\n+\n+class Mention(object):\n+ \"\"\"A Google Cloud Natural Language API mention.\n+\n+ Represents a mention for an entity in the text. Currently, proper noun\n+ mentions are supported.\n+ \"\"\"\n+ def __init__(self, text, mention_type):\n+ self.text = text\n+ self.mention_type = mention_type\n+\n+ def __str__(self):\n+ return str(self.text)\n+\n+ @classmethod\n+ def from_api_repr(cls, payload):\n+ \"\"\"Convert a Mention from the JSON API into an :class:`Mention`.\n+\n+ :param payload: dict\n+ :type payload: The value from the backend.\n+\n+ :rtype: :class:`Mention`\n+ :returns: The mention parsed from the API representation.\n+ \"\"\"\n+ text = TextSpan.from_api_repr(payload['text'])\n+ mention_type = payload['type']\n+ return cls(text, mention_type)\n+\n+\n+class TextSpan(object):\n+ \"\"\"A span of text from Google Cloud Natural Language API.\n+\n+ Represents a word or phrase of text, as well as its offset\n+ from the original document.\n+ \"\"\"\n+ def __init__(self, content, begin_offset):\n+ self.content = content\n+ self.begin_offset = begin_offset\n+\n+ def __str__(self):\n+ \"\"\"Return the string representation of this TextSpan.\n+\n+ :rtype: str\n+ :returns: The text content\n+ \"\"\"\n+ return self.content\n+\n+ @classmethod\n+ def from_api_repr(cls, payload):\n+ \"\"\"Convert a TextSpan from the JSON API into an :class:`TextSpan`.\n+\n+ :param payload: dict\n+ :type payload: The value from the backend.\n+\n+ :rtype: :class:`TextSpan`\n+ :returns: The text span parsed from the API representation.\n+ \"\"\"\n+ content = payload['content']\n+ begin_offset = payload['beginOffset']\n+ return cls(content=content, begin_offset=begin_offset)\n+\n+\n class Entity(object):\n \"\"\"A Google Cloud Natural Language API entity.\n \n@@ -101,6 +175,5 @@\n entity_type = payload['type']\n metadata = payload['metadata']\n salience = payload['salience']\n- mentions = [value['text']['content']\n- for value in payload['mentions']]\n+ mentions = [Mention.from_api_repr(val) for val in payload['mentions']]\n return cls(name, entity_type, metadata, salience, mentions)\n", "issue": "Language: support mention type in Entity.mentions.\n[Currently](https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/language/google/cloud/language/entity.py#L79) the mentions property of an entity is only a list of strings whereas it should be a list of objects containing the mention text and mention type.\r\n\r\nFurthermore, this change should add mention_type information to the mention documentation.\n", "before_files": [{"content": "# Copyright 2016-2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Definition for Google Cloud Natural Language API entities.\n\nAn entity is used to describe a proper name extracted from text.\n\"\"\"\n\n\nclass EntityType(object):\n \"\"\"List of possible entity types.\"\"\"\n\n UNKNOWN = 'UNKNOWN'\n \"\"\"Unknown entity type.\"\"\"\n\n PERSON = 'PERSON'\n \"\"\"Person entity type.\"\"\"\n\n LOCATION = 'LOCATION'\n \"\"\"Location entity type.\"\"\"\n\n ORGANIZATION = 'ORGANIZATION'\n \"\"\"Organization entity type.\"\"\"\n\n EVENT = 'EVENT'\n \"\"\"Event entity type.\"\"\"\n\n WORK_OF_ART = 'WORK_OF_ART'\n \"\"\"Work of art entity type.\"\"\"\n\n CONSUMER_GOOD = 'CONSUMER_GOOD'\n \"\"\"Consumer good entity type.\"\"\"\n\n OTHER = 'OTHER'\n \"\"\"Other entity type (i.e. known but not classified).\"\"\"\n\n\nclass Entity(object):\n \"\"\"A Google Cloud Natural Language API entity.\n\n Represents a phrase in text that is a known entity, such as a person,\n an organization, or location. The API associates information, such as\n salience and mentions, with entities.\n\n .. _Entity message: https://cloud.google.com/natural-language/\\\n reference/rest/v1/Entity\n .. _EntityType enum: https://cloud.google.com/natural-language/\\\n reference/rest/v1/Entity#Type\n\n See `Entity message`_.\n\n :type name: str\n :param name: The name / phrase identified as the entity.\n\n :type entity_type: str\n :param entity_type: The type of the entity. See `EntityType enum`_.\n\n :type metadata: dict\n :param metadata: The metadata associated with the entity.\n Wikipedia URLs and Knowledge Graph MIDs are\n provided, if available. The associated keys are\n \"wikipedia_url\" and \"mid\", respectively.\n\n :type salience: float\n :param salience: The prominence of the entity / phrase within the text\n containing it.\n\n :type mentions: list\n :param mentions: List of strings that mention the entity.\n \"\"\"\n\n def __init__(self, name, entity_type, metadata, salience, mentions):\n self.name = name\n self.entity_type = entity_type\n self.metadata = metadata\n self.salience = salience\n self.mentions = mentions\n\n @classmethod\n def from_api_repr(cls, payload):\n \"\"\"Convert an Entity from the JSON API into an :class:`Entity`.\n\n :param payload: dict\n :type payload: The value from the backend.\n\n :rtype: :class:`Entity`\n :returns: The entity parsed from the API representation.\n \"\"\"\n name = payload['name']\n entity_type = payload['type']\n metadata = payload['metadata']\n salience = payload['salience']\n mentions = [value['text']['content']\n for value in payload['mentions']]\n return cls(name, entity_type, metadata, salience, mentions)\n", "path": "language/google/cloud/language/entity.py"}]}
1,599
690
gh_patches_debug_1416
rasdani/github-patches
git_diff
frappe__frappe-4871
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `Insufficient Permission for Chat Profile` on load Use Case: 1. Create New User without System Manager Role 2. Log-in to System 3. Error Pops-up when system is trying to create Chat Profile. I tried replicating it to a user that was once has a System Manager role and it was not popping up. It was because the user has already Chat Profile. Here is the log. Hope this helps! ![profile](https://user-images.githubusercontent.com/21003054/35091472-ba84aa68-fc77-11e7-860e-789961c1a09e.png) Any help is greatly appreciated. I will also try to create an issue for this here in discuss. Thanks, Dori </issue> <code> [start of frappe/chat/doctype/chat_profile/chat_profile.py] 1 # imports - module imports 2 from frappe.model.document import Document 3 from frappe import _ 4 import frappe 5 6 # imports - frappe module imports 7 from frappe.core.doctype.version.version import get_diff 8 from frappe.chat.doctype.chat_room import chat_room 9 from frappe.chat.util import ( 10 safe_json_loads, 11 filter_dict, 12 dictify 13 ) 14 15 session = frappe.session 16 17 class ChatProfile(Document): 18 def before_save(self): 19 if not self.is_new(): 20 self.get_doc_before_save() 21 22 def on_update(self): 23 if not self.is_new(): 24 b, a = self.get_doc_before_save(), self 25 diff = dictify(get_diff(a, b)) 26 if diff: 27 user = session.user 28 29 fields = [changed[0] for changed in diff.changed] 30 31 if 'status' in fields: 32 rooms = chat_room.get(user, filters = ['Chat Room', 'type', '=', 'Direct']) 33 update = dict(user = user, data = dict(status = self.status)) 34 35 for room in rooms: 36 frappe.publish_realtime('frappe.chat.profile:update', update, room = room.name, after_commit = True) 37 38 if 'enable_chat' in fields: 39 update = dict(user = user, data = dict(enable_chat = bool(self.enable_chat))) 40 frappe.publish_realtime('frappe.chat.profile:update', update, user = user, after_commit = True) 41 42 def authenticate(user): 43 if user != session.user: 44 frappe.throw(_("Sorry, you're not authorized.")) 45 46 @frappe.whitelist() 47 def get(user, fields = None): 48 duser = frappe.get_doc('User', user) 49 dprof = frappe.get_doc('Chat Profile', user) 50 51 # If you're adding something here, make sure the client recieves it. 52 profile = dict( 53 # User 54 name = duser.name, 55 email = duser.email, 56 first_name = duser.first_name, 57 last_name = duser.last_name, 58 username = duser.username, 59 avatar = duser.user_image, 60 bio = duser.bio, 61 # Chat Profile 62 status = dprof.status, 63 chat_background = dprof.chat_background, 64 message_preview = bool(dprof.message_preview), 65 notification_tones = bool(dprof.notification_tones), 66 conversation_tones = bool(dprof.conversation_tones), 67 enable_chat = bool(dprof.enable_chat) 68 ) 69 profile = filter_dict(profile, fields) 70 71 return dictify(profile) 72 73 @frappe.whitelist() 74 def create(user, exists_ok = False, fields = None): 75 authenticate(user) 76 77 exists_ok, fields = safe_json_loads(exists_ok, fields) 78 79 if frappe.db.exists('Chat Profile', user): 80 if not exists_ok: 81 frappe.throw(_('Chat Profile for User {user} exists.'.format(user = user))) 82 else: 83 dprof = frappe.new_doc('Chat Profile') 84 dprof.user = user 85 dprof.save() 86 87 profile = get(user, fields = fields) 88 89 return profile 90 91 @frappe.whitelist() 92 def update(user, data): 93 authenticate(user) 94 95 data = safe_json_loads(data) 96 97 dprof = frappe.get_doc('Chat Profile', user) 98 dprof.update(data) 99 dprof.save(ignore_permissions = True) [end of frappe/chat/doctype/chat_profile/chat_profile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/frappe/chat/doctype/chat_profile/chat_profile.py b/frappe/chat/doctype/chat_profile/chat_profile.py --- a/frappe/chat/doctype/chat_profile/chat_profile.py +++ b/frappe/chat/doctype/chat_profile/chat_profile.py @@ -82,7 +82,7 @@ else: dprof = frappe.new_doc('Chat Profile') dprof.user = user - dprof.save() + dprof.save(ignore_permissions = True) profile = get(user, fields = fields)
{"golden_diff": "diff --git a/frappe/chat/doctype/chat_profile/chat_profile.py b/frappe/chat/doctype/chat_profile/chat_profile.py\n--- a/frappe/chat/doctype/chat_profile/chat_profile.py\n+++ b/frappe/chat/doctype/chat_profile/chat_profile.py\n@@ -82,7 +82,7 @@\n else:\n dprof = frappe.new_doc('Chat Profile')\n dprof.user = user\n- dprof.save()\n+ dprof.save(ignore_permissions = True)\n \n profile = get(user, fields = fields)\n", "issue": "`Insufficient Permission for Chat Profile` on load\nUse Case:\r\n\r\n1. Create New User without System Manager Role\r\n2. Log-in to System\r\n3. Error Pops-up when system is trying to create Chat Profile.\r\n\r\nI tried replicating it to a user that was once has a System Manager role and it was not popping up. It was because the user has already Chat Profile.\r\n\r\nHere is the log. Hope this helps!\r\n\r\n![profile](https://user-images.githubusercontent.com/21003054/35091472-ba84aa68-fc77-11e7-860e-789961c1a09e.png)\r\n\r\n\r\nAny help is greatly appreciated. I will also try to create an issue for this here in discuss.\r\n\r\nThanks,\r\n\r\nDori\n", "before_files": [{"content": "# imports - module imports\nfrom frappe.model.document import Document\nfrom frappe import _\nimport frappe\n\n# imports - frappe module imports\nfrom frappe.core.doctype.version.version import get_diff\nfrom frappe.chat.doctype.chat_room import chat_room\nfrom frappe.chat.util import (\n safe_json_loads,\n filter_dict,\n dictify\n)\n\nsession = frappe.session\n\nclass ChatProfile(Document):\n def before_save(self):\n if not self.is_new():\n self.get_doc_before_save()\n\n def on_update(self):\n if not self.is_new():\n b, a = self.get_doc_before_save(), self\n diff = dictify(get_diff(a, b))\n if diff:\n user = session.user\n\n fields = [changed[0] for changed in diff.changed]\n\n if 'status' in fields:\n rooms = chat_room.get(user, filters = ['Chat Room', 'type', '=', 'Direct'])\n update = dict(user = user, data = dict(status = self.status))\n\n for room in rooms:\n frappe.publish_realtime('frappe.chat.profile:update', update, room = room.name, after_commit = True)\n\n if 'enable_chat' in fields:\n update = dict(user = user, data = dict(enable_chat = bool(self.enable_chat)))\n frappe.publish_realtime('frappe.chat.profile:update', update, user = user, after_commit = True)\n\ndef authenticate(user):\n if user != session.user:\n frappe.throw(_(\"Sorry, you're not authorized.\"))\n\[email protected]()\ndef get(user, fields = None):\n duser = frappe.get_doc('User', user)\n dprof = frappe.get_doc('Chat Profile', user)\n\n # If you're adding something here, make sure the client recieves it.\n profile = dict(\n # User\n name = duser.name,\n email = duser.email,\n first_name = duser.first_name,\n last_name = duser.last_name,\n username = duser.username,\n avatar = duser.user_image,\n bio = duser.bio,\n # Chat Profile\n status = dprof.status,\n chat_background = dprof.chat_background,\n message_preview = bool(dprof.message_preview),\n notification_tones = bool(dprof.notification_tones),\n conversation_tones = bool(dprof.conversation_tones),\n enable_chat = bool(dprof.enable_chat)\n )\n profile = filter_dict(profile, fields)\n\n return dictify(profile)\n\[email protected]()\ndef create(user, exists_ok = False, fields = None):\n authenticate(user)\n\n exists_ok, fields = safe_json_loads(exists_ok, fields)\n\n if frappe.db.exists('Chat Profile', user):\n if not exists_ok:\n frappe.throw(_('Chat Profile for User {user} exists.'.format(user = user)))\n else:\n dprof = frappe.new_doc('Chat Profile')\n dprof.user = user\n dprof.save()\n\n profile = get(user, fields = fields)\n\n return profile\n\[email protected]()\ndef update(user, data):\n authenticate(user)\n\n data = safe_json_loads(data)\n\n dprof = frappe.get_doc('Chat Profile', user)\n dprof.update(data)\n dprof.save(ignore_permissions = True)", "path": "frappe/chat/doctype/chat_profile/chat_profile.py"}]}
1,649
115
gh_patches_debug_23168
rasdani/github-patches
git_diff
feast-dev__feast-983
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Go SDK: go get by Version Tag Does Not Work ## Problem & Expected Behavour `go get` to install the Feast Go SDK should work by version tag: ``` go get github.com/feast-dev/feast/sdk/[email protected] ``` Returns: ``` go get github.com/feast-dev/feast/sdk/[email protected]: module github.com/feast-dev/[email protected] found, but does not contain package github.com/feast-dev/feast/sdk/go ``` Instead of installing the Go SDK at release version tag v0.5.1. ## Proposed Solution - Update [setup.py](https://github.com/feast-dev/feast/blob/89883d418b4935d595585689d63d246ad133cabe/sdk/python/setup.py#L86) to support non semver tags. (ie `sdk/go/v0.5.1`) - Introduce additional tag `sdk/go/v<VERSION>` each release. This tag will allow us to pull the go module by a versioned tag. [Reference](https://github.com/golang/go/wiki/Modules#faqs--multi-module-repositories) </issue> <code> [start of sdk/python/setup.py] 1 # Copyright 2019 The Feast Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os 16 import subprocess 17 18 from setuptools import find_packages, setup 19 20 NAME = "feast" 21 DESCRIPTION = "Python SDK for Feast" 22 URL = "https://github.com/feast-dev/feast" 23 AUTHOR = "Feast" 24 REQUIRES_PYTHON = ">=3.6.0" 25 26 REQUIRED = [ 27 "Click==7.*", 28 "google-api-core==1.20.*", 29 "google-auth<2.0dev,>=1.14.0", 30 "google-cloud-bigquery==1.18.*", 31 "google-cloud-storage==1.20.*", 32 "google-cloud-core==1.0.*", 33 "googleapis-common-protos==1.*", 34 "google-cloud-bigquery-storage==0.7.*", 35 "grpcio==1.*", 36 "pandas~=1.0.0", 37 "pandavro==1.5.*", 38 "protobuf>=3.10", 39 "PyYAML==5.1.*", 40 "fastavro>=0.22.11,<0.23", 41 "kafka-python==1.*", 42 "tabulate==0.8.*", 43 "toml==0.10.*", 44 "tqdm==4.*", 45 "pyarrow<0.16.0,>=0.15.1", 46 "numpy", 47 "google", 48 "confluent_kafka", 49 ] 50 51 # README file from Feast repo root directory 52 repo_root = ( 53 subprocess.Popen(["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE) 54 .communicate()[0] 55 .rstrip() 56 .decode("utf-8") 57 ) 58 README_FILE = os.path.join(repo_root, "README.md") 59 with open(os.path.join(README_FILE), "r") as f: 60 LONG_DESCRIPTION = f.read() 61 62 setup( 63 name=NAME, 64 author=AUTHOR, 65 description=DESCRIPTION, 66 long_description=LONG_DESCRIPTION, 67 long_description_content_type="text/markdown", 68 python_requires=REQUIRES_PYTHON, 69 url=URL, 70 packages=find_packages(exclude=("tests",)), 71 install_requires=REQUIRED, 72 # https://stackoverflow.com/questions/28509965/setuptools-development-requirements 73 # Install dev requirements with: pip install -e .[dev] 74 extras_require={"dev": ["mypy-protobuf==1.*", "grpcio-testing==1.*"]}, 75 include_package_data=True, 76 license="Apache", 77 classifiers=[ 78 # Trove classifiers 79 # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers 80 "License :: OSI Approved :: Apache Software License", 81 "Programming Language :: Python", 82 "Programming Language :: Python :: 3", 83 "Programming Language :: Python :: 3.6", 84 ], 85 entry_points={"console_scripts": ["feast=feast.cli:cli"]}, 86 use_scm_version={"root": "../..", "relative_to": __file__}, 87 setup_requires=["setuptools_scm"], 88 ) 89 [end of sdk/python/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sdk/python/setup.py b/sdk/python/setup.py --- a/sdk/python/setup.py +++ b/sdk/python/setup.py @@ -13,6 +13,7 @@ # limitations under the License. import os +import re import subprocess from setuptools import find_packages, setup @@ -59,6 +60,13 @@ with open(os.path.join(README_FILE), "r") as f: LONG_DESCRIPTION = f.read() +# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm. +# Regex modified from default tag regex in: +# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9 +TAG_REGEX = re.compile( + r"^(?:[\/\w-]+)?(?P<version>[vV]?\d+(?:\.\d+){0,2}[^\+]*)(?:\+.*)?$" +) + setup( name=NAME, author=AUTHOR, @@ -83,6 +91,6 @@ "Programming Language :: Python :: 3.6", ], entry_points={"console_scripts": ["feast=feast.cli:cli"]}, - use_scm_version={"root": "../..", "relative_to": __file__}, + use_scm_version={"root": "../..", "relative_to": __file__, "tag_regex": TAG_REGEX}, setup_requires=["setuptools_scm"], )
{"golden_diff": "diff --git a/sdk/python/setup.py b/sdk/python/setup.py\n--- a/sdk/python/setup.py\n+++ b/sdk/python/setup.py\n@@ -13,6 +13,7 @@\n # limitations under the License.\n \n import os\n+import re\n import subprocess\n \n from setuptools import find_packages, setup\n@@ -59,6 +60,13 @@\n with open(os.path.join(README_FILE), \"r\") as f:\n LONG_DESCRIPTION = f.read()\n \n+# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n+# Regex modified from default tag regex in:\n+# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\n+TAG_REGEX = re.compile(\n+ r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n+)\n+\n setup(\n name=NAME,\n author=AUTHOR,\n@@ -83,6 +91,6 @@\n \"Programming Language :: Python :: 3.6\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n- use_scm_version={\"root\": \"../..\", \"relative_to\": __file__},\n+ use_scm_version={\"root\": \"../..\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX},\n setup_requires=[\"setuptools_scm\"],\n )\n", "issue": "Go SDK: go get by Version Tag Does Not Work\n## Problem & Expected Behavour\r\n`go get` to install the Feast Go SDK should work by version tag:\r\n```\r\ngo get github.com/feast-dev/feast/sdk/[email protected]\r\n```\r\nReturns: \r\n```\r\ngo get github.com/feast-dev/feast/sdk/[email protected]: module github.com/feast-dev/[email protected] found, but does not contain package github.com/feast-dev/feast/sdk/go\r\n```\r\nInstead of installing the Go SDK at release version tag v0.5.1.\r\n\r\n## Proposed Solution\r\n- Update [setup.py](https://github.com/feast-dev/feast/blob/89883d418b4935d595585689d63d246ad133cabe/sdk/python/setup.py#L86) to support non semver tags. (ie `sdk/go/v0.5.1`)\r\n- Introduce additional tag `sdk/go/v<VERSION>` each release. This tag will allow us to pull the go module by a versioned tag. [Reference](https://github.com/golang/go/wiki/Modules#faqs--multi-module-repositories)\n", "before_files": [{"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.6.0\"\n\nREQUIRED = [\n \"Click==7.*\",\n \"google-api-core==1.20.*\",\n \"google-auth<2.0dev,>=1.14.0\",\n \"google-cloud-bigquery==1.18.*\",\n \"google-cloud-storage==1.20.*\",\n \"google-cloud-core==1.0.*\",\n \"googleapis-common-protos==1.*\",\n \"google-cloud-bigquery-storage==0.7.*\",\n \"grpcio==1.*\",\n \"pandas~=1.0.0\",\n \"pandavro==1.5.*\",\n \"protobuf>=3.10\",\n \"PyYAML==5.1.*\",\n \"fastavro>=0.22.11,<0.23\",\n \"kafka-python==1.*\",\n \"tabulate==0.8.*\",\n \"toml==0.10.*\",\n \"tqdm==4.*\",\n \"pyarrow<0.16.0,>=0.15.1\",\n \"numpy\",\n \"google\",\n \"confluent_kafka\",\n]\n\n# README file from Feast repo root directory\nrepo_root = (\n subprocess.Popen([\"git\", \"rev-parse\", \"--show-toplevel\"], stdout=subprocess.PIPE)\n .communicate()[0]\n .rstrip()\n .decode(\"utf-8\")\n)\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(os.path.join(README_FILE), \"r\") as f:\n LONG_DESCRIPTION = f.read()\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\"dev\": [\"mypy-protobuf==1.*\", \"grpcio-testing==1.*\"]},\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version={\"root\": \"../..\", \"relative_to\": __file__},\n setup_requires=[\"setuptools_scm\"],\n)\n", "path": "sdk/python/setup.py"}]}
1,776
359
gh_patches_debug_506
rasdani/github-patches
git_diff
liqd__a4-opin-765
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> add test for email sending after moderator was added to project </issue> <code> [start of euth/projects/emails.py] 1 from euth.contrib.emails import OpinEmail 2 3 4 class ModeratorAddedEmail(OpinEmail): 5 template_name = 'notify_new_moderator' 6 7 def get_recipients(self): 8 self.kwargs['user'] 9 [end of euth/projects/emails.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/euth/projects/emails.py b/euth/projects/emails.py --- a/euth/projects/emails.py +++ b/euth/projects/emails.py @@ -4,5 +4,5 @@ class ModeratorAddedEmail(OpinEmail): template_name = 'notify_new_moderator' - def get_recipients(self): - self.kwargs['user'] + def get_receivers(self): + return [self.kwargs['user']]
{"golden_diff": "diff --git a/euth/projects/emails.py b/euth/projects/emails.py\n--- a/euth/projects/emails.py\n+++ b/euth/projects/emails.py\n@@ -4,5 +4,5 @@\n class ModeratorAddedEmail(OpinEmail):\n template_name = 'notify_new_moderator'\n \n- def get_recipients(self):\n- self.kwargs['user']\n+ def get_receivers(self):\n+ return [self.kwargs['user']]\n", "issue": "add test for email sending after moderator was added to project\n\n", "before_files": [{"content": "from euth.contrib.emails import OpinEmail\n\n\nclass ModeratorAddedEmail(OpinEmail):\n template_name = 'notify_new_moderator'\n\n def get_recipients(self):\n self.kwargs['user']\n", "path": "euth/projects/emails.py"}]}
602
100
gh_patches_debug_20006
rasdani/github-patches
git_diff
googleapis__google-api-python-client-1104
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Create and populate folder for static discovery documents `googleapiclient` downloads the discovery docs for a Google API service at runtime and then constructs a client. Issue #1063 captures the need to move to a static package where the discovery docs are not fetched at run time. An incremental step towards satisfying #1063 would be to create a folder with static Google API discovery documents in this repo. The [discoveries folder](https://github.com/googleapis/discovery-artifact-manager/tree/master/discoveries) in the googleapis/discovery-artifact-manager repo has an up to date copy of the discovery documents. One approach would be to modify [synth.py](https://github.com/googleapis/google-api-python-client/blob/master/synth.py) to copy the discovery documents from discovery-artifact-manager similar to how it is done [here](https://github.com/googleapis/python-api-common-protos/blob/master/synth.py#L38). </issue> <code> [start of synth.py] 1 # Copyright 2020 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import synthtool as s 16 from synthtool import gcp 17 18 common = gcp.CommonTemplates() 19 20 # ---------------------------------------------------------------------------- 21 # Add templated files 22 # ---------------------------------------------------------------------------- 23 templated_files = common.py_library() 24 25 # Copy kokoro configs. 26 # Docs are excluded as repo docs cannot currently be generated using sphinx. 27 s.move(templated_files / '.kokoro', excludes=['**/docs/*', 'publish-docs.sh']) 28 29 # Also move issue templates 30 s.move(templated_files / '.github') 31 32 s.shell.run(["nox", "-s", "docs"], hide_output=False) [end of synth.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/synth.py b/synth.py --- a/synth.py +++ b/synth.py @@ -14,6 +14,9 @@ import synthtool as s from synthtool import gcp +from synthtool.sources import git + +DISCOVERY_ARTIFACT_MANAGER_REPO = "googleapis/discovery-artifact-manager" common = gcp.CommonTemplates() @@ -29,4 +32,20 @@ # Also move issue templates s.move(templated_files / '.github') -s.shell.run(["nox", "-s", "docs"], hide_output=False) \ No newline at end of file +# ---------------------------------------------------------------------------- +# Copy discoveries folder from discovery-artifact-manager repo +# ---------------------------------------------------------------------------- + +discovery_artifact_manager_url = git.make_repo_clone_url(DISCOVERY_ARTIFACT_MANAGER_REPO) +discovery_artifacts = git.clone(discovery_artifact_manager_url) / "discoveries" + +excludes = [ + "**/BUILD.bazel", +] +s.copy(discovery_artifacts, + "googleapiclient/discovery_cache/documents", excludes=excludes) + +# ---------------------------------------------------------------------------- +# Generate docs +# ---------------------------------------------------------------------------- +s.shell.run(["nox", "-s", "docs"], hide_output=False)
{"golden_diff": "diff --git a/synth.py b/synth.py\n--- a/synth.py\n+++ b/synth.py\n@@ -14,6 +14,9 @@\n \n import synthtool as s\n from synthtool import gcp\n+from synthtool.sources import git\n+\n+DISCOVERY_ARTIFACT_MANAGER_REPO = \"googleapis/discovery-artifact-manager\"\n \n common = gcp.CommonTemplates()\n \n@@ -29,4 +32,20 @@\n # Also move issue templates\n s.move(templated_files / '.github')\n \n-s.shell.run([\"nox\", \"-s\", \"docs\"], hide_output=False)\n\\ No newline at end of file\n+# ----------------------------------------------------------------------------\n+# Copy discoveries folder from discovery-artifact-manager repo\n+# ----------------------------------------------------------------------------\n+\n+discovery_artifact_manager_url = git.make_repo_clone_url(DISCOVERY_ARTIFACT_MANAGER_REPO)\n+discovery_artifacts = git.clone(discovery_artifact_manager_url) / \"discoveries\"\n+\n+excludes = [\n+ \"**/BUILD.bazel\",\n+]\n+s.copy(discovery_artifacts,\n+ \"googleapiclient/discovery_cache/documents\", excludes=excludes)\n+\n+# ----------------------------------------------------------------------------\n+# Generate docs\n+# ----------------------------------------------------------------------------\n+s.shell.run([\"nox\", \"-s\", \"docs\"], hide_output=False)\n", "issue": "Create and populate folder for static discovery documents \n`googleapiclient` downloads the discovery docs for a Google API service at runtime and then constructs a client. Issue #1063 captures the need to move to a static package where the discovery docs are not fetched at run time. An incremental step towards satisfying #1063 would be to create a folder with static Google API discovery documents in this repo. The [discoveries folder](https://github.com/googleapis/discovery-artifact-manager/tree/master/discoveries) in the googleapis/discovery-artifact-manager repo has an up to date copy of the discovery documents. One approach would be to modify [synth.py](https://github.com/googleapis/google-api-python-client/blob/master/synth.py) to copy the discovery documents from discovery-artifact-manager similar to how it is done [here](https://github.com/googleapis/python-api-common-protos/blob/master/synth.py#L38).\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport synthtool as s\nfrom synthtool import gcp\n\ncommon = gcp.CommonTemplates()\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library()\n\n# Copy kokoro configs.\n# Docs are excluded as repo docs cannot currently be generated using sphinx.\ns.move(templated_files / '.kokoro', excludes=['**/docs/*', 'publish-docs.sh'])\n\n# Also move issue templates\ns.move(templated_files / '.github')\n\ns.shell.run([\"nox\", \"-s\", \"docs\"], hide_output=False)", "path": "synth.py"}]}
1,031
263
gh_patches_debug_24701
rasdani/github-patches
git_diff
ietf-tools__datatracker-6328
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Migrate DocHistory objects for RFCs ### Describe the issue `DocHistory` objects that pertain to the RFC rather than the I-D need to be migrated to hang off the new `type_id="rfc"` `Document`. The `name` and `rfc_number` fields should be updated to match. ### Code of Conduct - [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md) </issue> <code> [start of ietf/doc/migrations/0017_move_dochistory.py] 1 # Generated by Django 4.2.5 on 2023-09-11 17:52 2 3 from django.db import migrations 4 5 from django.db.models import Subquery, OuterRef, F 6 7 8 def forward(apps, schema_editor): 9 DocHistory = apps.get_model("doc", "DocHistory") 10 RelatedDocument = apps.get_model("doc", "RelatedDocument") 11 DocHistory.objects.filter(type_id="draft", doc__type_id="rfc").update(type_id="rfc") 12 DocHistory.objects.filter( 13 type_id="draft", doc__type_id="draft", name__startswith="rfc" 14 ).annotate( 15 rfc_id=Subquery( 16 RelatedDocument.objects.filter( 17 source_id=OuterRef("doc_id"), relationship_id="became_rfc" 18 ).values_list("target_id", flat=True)[:1] 19 ) 20 ).update( 21 doc_id=F("rfc_id"), type_id="rfc" 22 ) 23 assert not DocHistory.objects.filter( 24 name__startswith="rfc", type_id="draft" 25 ).exists() 26 27 28 class Migration(migrations.Migration): 29 dependencies = [ 30 ("doc", "0016_delete_docalias"), 31 ] 32 33 # There is no going back 34 operations = [migrations.RunPython(forward)] 35 [end of ietf/doc/migrations/0017_move_dochistory.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ietf/doc/migrations/0017_move_dochistory.py b/ietf/doc/migrations/0017_move_dochistory.py --- a/ietf/doc/migrations/0017_move_dochistory.py +++ b/ietf/doc/migrations/0017_move_dochistory.py @@ -8,6 +8,7 @@ def forward(apps, schema_editor): DocHistory = apps.get_model("doc", "DocHistory") RelatedDocument = apps.get_model("doc", "RelatedDocument") + Document = apps.get_model("doc", "Document") DocHistory.objects.filter(type_id="draft", doc__type_id="rfc").update(type_id="rfc") DocHistory.objects.filter( type_id="draft", doc__type_id="draft", name__startswith="rfc" @@ -20,9 +21,19 @@ ).update( doc_id=F("rfc_id"), type_id="rfc" ) + DocHistory.objects.filter(type_id="rfc").annotate( + rfcno=Subquery( + Document.objects.filter(pk=OuterRef("doc_id")).values_list( + "rfc_number", flat=True + )[:1] + ) + ).update(rfc_number=F("rfcno")) assert not DocHistory.objects.filter( name__startswith="rfc", type_id="draft" ).exists() + assert not DocHistory.objects.filter( + type_id="rfc", rfc_number__isnull=True + ).exists() class Migration(migrations.Migration):
{"golden_diff": "diff --git a/ietf/doc/migrations/0017_move_dochistory.py b/ietf/doc/migrations/0017_move_dochistory.py\n--- a/ietf/doc/migrations/0017_move_dochistory.py\n+++ b/ietf/doc/migrations/0017_move_dochistory.py\n@@ -8,6 +8,7 @@\n def forward(apps, schema_editor):\n DocHistory = apps.get_model(\"doc\", \"DocHistory\")\n RelatedDocument = apps.get_model(\"doc\", \"RelatedDocument\")\n+ Document = apps.get_model(\"doc\", \"Document\")\n DocHistory.objects.filter(type_id=\"draft\", doc__type_id=\"rfc\").update(type_id=\"rfc\")\n DocHistory.objects.filter(\n type_id=\"draft\", doc__type_id=\"draft\", name__startswith=\"rfc\"\n@@ -20,9 +21,19 @@\n ).update(\n doc_id=F(\"rfc_id\"), type_id=\"rfc\"\n )\n+ DocHistory.objects.filter(type_id=\"rfc\").annotate(\n+ rfcno=Subquery(\n+ Document.objects.filter(pk=OuterRef(\"doc_id\")).values_list(\n+ \"rfc_number\", flat=True\n+ )[:1]\n+ )\n+ ).update(rfc_number=F(\"rfcno\"))\n assert not DocHistory.objects.filter(\n name__startswith=\"rfc\", type_id=\"draft\"\n ).exists()\n+ assert not DocHistory.objects.filter(\n+ type_id=\"rfc\", rfc_number__isnull=True\n+ ).exists()\n \n \n class Migration(migrations.Migration):\n", "issue": "Migrate DocHistory objects for RFCs\n### Describe the issue\n\n`DocHistory` objects that pertain to the RFC rather than the I-D need to be migrated to hang off the new `type_id=\"rfc\"` `Document`. The `name` and `rfc_number` fields should be updated to match.\n\n### Code of Conduct\n\n- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)\n", "before_files": [{"content": "# Generated by Django 4.2.5 on 2023-09-11 17:52\n\nfrom django.db import migrations\n\nfrom django.db.models import Subquery, OuterRef, F\n\n\ndef forward(apps, schema_editor):\n DocHistory = apps.get_model(\"doc\", \"DocHistory\")\n RelatedDocument = apps.get_model(\"doc\", \"RelatedDocument\")\n DocHistory.objects.filter(type_id=\"draft\", doc__type_id=\"rfc\").update(type_id=\"rfc\")\n DocHistory.objects.filter(\n type_id=\"draft\", doc__type_id=\"draft\", name__startswith=\"rfc\"\n ).annotate(\n rfc_id=Subquery(\n RelatedDocument.objects.filter(\n source_id=OuterRef(\"doc_id\"), relationship_id=\"became_rfc\"\n ).values_list(\"target_id\", flat=True)[:1]\n )\n ).update(\n doc_id=F(\"rfc_id\"), type_id=\"rfc\"\n )\n assert not DocHistory.objects.filter(\n name__startswith=\"rfc\", type_id=\"draft\"\n ).exists()\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n (\"doc\", \"0016_delete_docalias\"),\n ]\n\n # There is no going back\n operations = [migrations.RunPython(forward)]\n", "path": "ietf/doc/migrations/0017_move_dochistory.py"}]}
1,010
351
gh_patches_debug_11776
rasdani/github-patches
git_diff
bookwyrm-social__bookwyrm-273
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> It's possible to create a FollowRequest for a Follow that already exists Send a follow request. User accepts. Send a second follow request (eg. using an old tab) Because requests and follows are stored in separate tables it's not possible to enforce this with a constraint. Pre-checking would leave a race condition, so I guess we either need to mess with table locks or create the follow request, and then delete it if its unneeded. Can we enforce this in the pre save of the model, perhaps? </issue> <code> [start of bookwyrm/models/relationship.py] 1 ''' defines relationships between users ''' 2 from django.db import models 3 4 from bookwyrm import activitypub 5 from .base_model import ActivitypubMixin, ActivityMapping, BookWyrmModel 6 7 8 class UserRelationship(ActivitypubMixin, BookWyrmModel): 9 ''' many-to-many through table for followers ''' 10 user_subject = models.ForeignKey( 11 'User', 12 on_delete=models.PROTECT, 13 related_name='%(class)s_user_subject' 14 ) 15 user_object = models.ForeignKey( 16 'User', 17 on_delete=models.PROTECT, 18 related_name='%(class)s_user_object' 19 ) 20 21 class Meta: 22 ''' relationships should be unique ''' 23 abstract = True 24 constraints = [ 25 models.UniqueConstraint( 26 fields=['user_subject', 'user_object'], 27 name='%(class)s_unique' 28 ), 29 models.CheckConstraint( 30 check=~models.Q(user_subject=models.F('user_object')), 31 name='%(class)s_no_self' 32 ) 33 ] 34 35 activity_mappings = [ 36 ActivityMapping('id', 'remote_id'), 37 ActivityMapping('actor', 'user_subject'), 38 ActivityMapping('object', 'user_object'), 39 ] 40 activity_serializer = activitypub.Follow 41 42 def get_remote_id(self, status=None): 43 ''' use shelf identifier in remote_id ''' 44 status = status or 'follows' 45 base_path = self.user_subject.remote_id 46 return '%s#%s/%d' % (base_path, status, self.id) 47 48 49 def to_accept_activity(self): 50 ''' generate an Accept for this follow request ''' 51 return activitypub.Accept( 52 id=self.get_remote_id(status='accepts'), 53 actor=self.user_object.remote_id, 54 object=self.to_activity() 55 ).serialize() 56 57 58 def to_reject_activity(self): 59 ''' generate an Accept for this follow request ''' 60 return activitypub.Reject( 61 id=self.get_remote_id(status='rejects'), 62 actor=self.user_object.remote_id, 63 object=self.to_activity() 64 ).serialize() 65 66 67 class UserFollows(UserRelationship): 68 ''' Following a user ''' 69 status = 'follows' 70 71 @classmethod 72 def from_request(cls, follow_request): 73 ''' converts a follow request into a follow relationship ''' 74 return cls( 75 user_subject=follow_request.user_subject, 76 user_object=follow_request.user_object, 77 remote_id=follow_request.remote_id, 78 ) 79 80 81 class UserFollowRequest(UserRelationship): 82 ''' following a user requires manual or automatic confirmation ''' 83 status = 'follow_request' 84 85 86 class UserBlocks(UserRelationship): 87 ''' prevent another user from following you and seeing your posts ''' 88 # TODO: not implemented 89 status = 'blocks' 90 [end of bookwyrm/models/relationship.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bookwyrm/models/relationship.py b/bookwyrm/models/relationship.py --- a/bookwyrm/models/relationship.py +++ b/bookwyrm/models/relationship.py @@ -82,6 +82,17 @@ ''' following a user requires manual or automatic confirmation ''' status = 'follow_request' + def save(self, *args, **kwargs): + ''' make sure the follow relationship doesn't already exist ''' + try: + UserFollows.objects.get( + user_subject=self.user_subject, + user_object=self.user_object + ) + return None + except UserFollows.DoesNotExist: + return super().save(*args, **kwargs) + class UserBlocks(UserRelationship): ''' prevent another user from following you and seeing your posts '''
{"golden_diff": "diff --git a/bookwyrm/models/relationship.py b/bookwyrm/models/relationship.py\n--- a/bookwyrm/models/relationship.py\n+++ b/bookwyrm/models/relationship.py\n@@ -82,6 +82,17 @@\n ''' following a user requires manual or automatic confirmation '''\n status = 'follow_request'\n \n+ def save(self, *args, **kwargs):\n+ ''' make sure the follow relationship doesn't already exist '''\n+ try:\n+ UserFollows.objects.get(\n+ user_subject=self.user_subject,\n+ user_object=self.user_object\n+ )\n+ return None\n+ except UserFollows.DoesNotExist:\n+ return super().save(*args, **kwargs)\n+\n \n class UserBlocks(UserRelationship):\n ''' prevent another user from following you and seeing your posts '''\n", "issue": "It's possible to create a FollowRequest for a Follow that already exists\nSend a follow request.\r\nUser accepts.\r\nSend a second follow request (eg. using an old tab)\r\n\r\nBecause requests and follows are stored in separate tables it's not possible to enforce this with a constraint. Pre-checking would leave a race condition, so I guess we either need to mess with table locks or create the follow request, and then delete it if its unneeded. Can we enforce this in the pre save of the model, perhaps?\n", "before_files": [{"content": "''' defines relationships between users '''\nfrom django.db import models\n\nfrom bookwyrm import activitypub\nfrom .base_model import ActivitypubMixin, ActivityMapping, BookWyrmModel\n\n\nclass UserRelationship(ActivitypubMixin, BookWyrmModel):\n ''' many-to-many through table for followers '''\n user_subject = models.ForeignKey(\n 'User',\n on_delete=models.PROTECT,\n related_name='%(class)s_user_subject'\n )\n user_object = models.ForeignKey(\n 'User',\n on_delete=models.PROTECT,\n related_name='%(class)s_user_object'\n )\n\n class Meta:\n ''' relationships should be unique '''\n abstract = True\n constraints = [\n models.UniqueConstraint(\n fields=['user_subject', 'user_object'],\n name='%(class)s_unique'\n ),\n models.CheckConstraint(\n check=~models.Q(user_subject=models.F('user_object')),\n name='%(class)s_no_self'\n )\n ]\n\n activity_mappings = [\n ActivityMapping('id', 'remote_id'),\n ActivityMapping('actor', 'user_subject'),\n ActivityMapping('object', 'user_object'),\n ]\n activity_serializer = activitypub.Follow\n\n def get_remote_id(self, status=None):\n ''' use shelf identifier in remote_id '''\n status = status or 'follows'\n base_path = self.user_subject.remote_id\n return '%s#%s/%d' % (base_path, status, self.id)\n\n\n def to_accept_activity(self):\n ''' generate an Accept for this follow request '''\n return activitypub.Accept(\n id=self.get_remote_id(status='accepts'),\n actor=self.user_object.remote_id,\n object=self.to_activity()\n ).serialize()\n\n\n def to_reject_activity(self):\n ''' generate an Accept for this follow request '''\n return activitypub.Reject(\n id=self.get_remote_id(status='rejects'),\n actor=self.user_object.remote_id,\n object=self.to_activity()\n ).serialize()\n\n\nclass UserFollows(UserRelationship):\n ''' Following a user '''\n status = 'follows'\n\n @classmethod\n def from_request(cls, follow_request):\n ''' converts a follow request into a follow relationship '''\n return cls(\n user_subject=follow_request.user_subject,\n user_object=follow_request.user_object,\n remote_id=follow_request.remote_id,\n )\n\n\nclass UserFollowRequest(UserRelationship):\n ''' following a user requires manual or automatic confirmation '''\n status = 'follow_request'\n\n\nclass UserBlocks(UserRelationship):\n ''' prevent another user from following you and seeing your posts '''\n # TODO: not implemented\n status = 'blocks'\n", "path": "bookwyrm/models/relationship.py"}]}
1,372
175
gh_patches_debug_56973
rasdani/github-patches
git_diff
bookwyrm-social__bookwyrm-3126
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Saved lists pagination is broken **Describe the bug** Trying to move through pages of saved lists is broken. Instead, one moves back to all lists. **To Reproduce** Steps to reproduce the behavior: 1. Save enough lists to have at least two pages 2. Go to [`Lists -> Saved Lists`](https://bookwyrm.social/list/saved) 3. Click on `Next` 4. Wonder why the lists shown are not the ones you saved 5. Realize you're back on `All Lists` **Expected behavior** One should be able to paginate through saved lists **Instance** [bookwyrm.social](https://bookwyrm.social/) **Additional comments** I'm trying to dig through the code a bit, but I don't have time to setup dev environment and populate local dbs and whatever needs to be done. It might just be that `path` needs to be changed to `/list/saved` [here](https://github.com/bookwyrm-social/bookwyrm/blob/6667178703b93d0d1874f1fd547e97c12a2ec144/bookwyrm/views/list/lists.py#L62)? But idk, this isn't a stack I'm very intimate with 🤷 </issue> <code> [start of bookwyrm/views/list/lists.py] 1 """ book list views""" 2 from django.contrib.auth.decorators import login_required 3 from django.core.paginator import Paginator 4 from django.shortcuts import redirect 5 from django.template.response import TemplateResponse 6 from django.utils.decorators import method_decorator 7 from django.views import View 8 9 from bookwyrm import forms, models 10 from bookwyrm.lists_stream import ListsStream 11 from bookwyrm.views.helpers import get_user_from_username 12 13 14 # pylint: disable=no-self-use 15 class Lists(View): 16 """book list page""" 17 18 def get(self, request): 19 """display a book list""" 20 if request.user.is_authenticated: 21 lists = ListsStream().get_list_stream(request.user) 22 else: 23 lists = models.List.objects.filter(privacy="public") 24 paginated = Paginator(lists, 12) 25 data = { 26 "lists": paginated.get_page(request.GET.get("page")), 27 "list_form": forms.ListForm(), 28 "path": "/list", 29 } 30 return TemplateResponse(request, "lists/lists.html", data) 31 32 @method_decorator(login_required, name="dispatch") 33 # pylint: disable=unused-argument 34 def post(self, request): 35 """create a book_list""" 36 form = forms.ListForm(request.POST) 37 if not form.is_valid(): 38 return redirect("lists") 39 book_list = form.save(request, commit=False) 40 41 # list should not have a group if it is not group curated 42 if not book_list.curation == "group": 43 book_list.group = None 44 book_list.save() 45 46 return redirect(book_list.local_path) 47 48 49 @method_decorator(login_required, name="dispatch") 50 class SavedLists(View): 51 """saved book list page""" 52 53 def get(self, request): 54 """display book lists""" 55 # hide lists with no approved books 56 lists = request.user.saved_lists.order_by("-updated_date") 57 58 paginated = Paginator(lists, 12) 59 data = { 60 "lists": paginated.get_page(request.GET.get("page")), 61 "list_form": forms.ListForm(), 62 "path": "/list", 63 } 64 return TemplateResponse(request, "lists/lists.html", data) 65 66 67 @method_decorator(login_required, name="dispatch") 68 class UserLists(View): 69 """a user's book list page""" 70 71 def get(self, request, username): 72 """display a book list""" 73 user = get_user_from_username(request.user, username) 74 lists = models.List.privacy_filter(request.user).filter(user=user) 75 paginated = Paginator(lists, 12) 76 77 data = { 78 "user": user, 79 "is_self": request.user.id == user.id, 80 "lists": paginated.get_page(request.GET.get("page")), 81 "list_form": forms.ListForm(), 82 "path": user.local_path + "/lists", 83 } 84 return TemplateResponse(request, "user/lists.html", data) 85 [end of bookwyrm/views/list/lists.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bookwyrm/views/list/lists.py b/bookwyrm/views/list/lists.py --- a/bookwyrm/views/list/lists.py +++ b/bookwyrm/views/list/lists.py @@ -59,7 +59,7 @@ data = { "lists": paginated.get_page(request.GET.get("page")), "list_form": forms.ListForm(), - "path": "/list", + "path": "/list/saved", } return TemplateResponse(request, "lists/lists.html", data)
{"golden_diff": "diff --git a/bookwyrm/views/list/lists.py b/bookwyrm/views/list/lists.py\n--- a/bookwyrm/views/list/lists.py\n+++ b/bookwyrm/views/list/lists.py\n@@ -59,7 +59,7 @@\n data = {\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n \"list_form\": forms.ListForm(),\n- \"path\": \"/list\",\n+ \"path\": \"/list/saved\",\n }\n return TemplateResponse(request, \"lists/lists.html\", data)\n", "issue": "Saved lists pagination is broken\n**Describe the bug**\r\nTrying to move through pages of saved lists is broken. Instead, one moves back to all lists.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Save enough lists to have at least two pages\r\n2. Go to [`Lists -> Saved Lists`](https://bookwyrm.social/list/saved)\r\n3. Click on `Next`\r\n4. Wonder why the lists shown are not the ones you saved\r\n5. Realize you're back on `All Lists`\r\n\r\n**Expected behavior**\r\nOne should be able to paginate through saved lists\r\n\r\n**Instance**\r\n[bookwyrm.social](https://bookwyrm.social/)\r\n\r\n**Additional comments**\r\nI'm trying to dig through the code a bit, but I don't have time to setup dev environment and populate local dbs and whatever needs to be done. It might just be that `path` needs to be changed to `/list/saved` [here](https://github.com/bookwyrm-social/bookwyrm/blob/6667178703b93d0d1874f1fd547e97c12a2ec144/bookwyrm/views/list/lists.py#L62)? But idk, this isn't a stack I'm very intimate with \ud83e\udd37\r\n\n", "before_files": [{"content": "\"\"\" book list views\"\"\"\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.lists_stream import ListsStream\nfrom bookwyrm.views.helpers import get_user_from_username\n\n\n# pylint: disable=no-self-use\nclass Lists(View):\n \"\"\"book list page\"\"\"\n\n def get(self, request):\n \"\"\"display a book list\"\"\"\n if request.user.is_authenticated:\n lists = ListsStream().get_list_stream(request.user)\n else:\n lists = models.List.objects.filter(privacy=\"public\")\n paginated = Paginator(lists, 12)\n data = {\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n \"list_form\": forms.ListForm(),\n \"path\": \"/list\",\n }\n return TemplateResponse(request, \"lists/lists.html\", data)\n\n @method_decorator(login_required, name=\"dispatch\")\n # pylint: disable=unused-argument\n def post(self, request):\n \"\"\"create a book_list\"\"\"\n form = forms.ListForm(request.POST)\n if not form.is_valid():\n return redirect(\"lists\")\n book_list = form.save(request, commit=False)\n\n # list should not have a group if it is not group curated\n if not book_list.curation == \"group\":\n book_list.group = None\n book_list.save()\n\n return redirect(book_list.local_path)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass SavedLists(View):\n \"\"\"saved book list page\"\"\"\n\n def get(self, request):\n \"\"\"display book lists\"\"\"\n # hide lists with no approved books\n lists = request.user.saved_lists.order_by(\"-updated_date\")\n\n paginated = Paginator(lists, 12)\n data = {\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n \"list_form\": forms.ListForm(),\n \"path\": \"/list\",\n }\n return TemplateResponse(request, \"lists/lists.html\", data)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass UserLists(View):\n \"\"\"a user's book list page\"\"\"\n\n def get(self, request, username):\n \"\"\"display a book list\"\"\"\n user = get_user_from_username(request.user, username)\n lists = models.List.privacy_filter(request.user).filter(user=user)\n paginated = Paginator(lists, 12)\n\n data = {\n \"user\": user,\n \"is_self\": request.user.id == user.id,\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n \"list_form\": forms.ListForm(),\n \"path\": user.local_path + \"/lists\",\n }\n return TemplateResponse(request, \"user/lists.html\", data)\n", "path": "bookwyrm/views/list/lists.py"}]}
1,583
113
gh_patches_debug_1469
rasdani/github-patches
git_diff
microsoft__DeepSpeed-5577
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] fp_quantizer is not correctly built when non-jit installation **Describe the bug** fp_quantizer is not correctly built when non-jit installation. **To Reproduce** Steps to reproduce the behavior: ``` DS_BUILD_FP_QUANTIZER=1 pip install deepspeed ``` install will succeed but ``` from deepspeed.ops.fp_quantizer import FP_Quantize FP_Quantize() ``` will raise `ImportError: dynamic module does not define module export function (PyInit_fp_quantizer_op)` **Expected behavior** Renaming csrc/fp_quantizer/quantize.cu may solve the issue. This restriction seems to be cause of the bug. > Note that setuptools cannot handle files with the same name but different extensions https://pytorch.org/tutorials/advanced/cpp_extension.html </issue> <code> [start of op_builder/fp_quantizer.py] 1 # Copyright (c) Microsoft Corporation. 2 # SPDX-License-Identifier: Apache-2.0 3 4 # DeepSpeed Team 5 6 from .builder import CUDAOpBuilder, installed_cuda_version 7 8 9 class FPQuantizerBuilder(CUDAOpBuilder): 10 BUILD_VAR = "DS_BUILD_FP_QUANTIZER" 11 NAME = "fp_quantizer" 12 13 def __init__(self, name=None): 14 name = self.NAME if name is None else name 15 super().__init__(name=name) 16 17 def absolute_name(self): 18 return f'deepspeed.ops.fp_quantizer.{self.NAME}_op' 19 20 def is_compatible(self, verbose=True): 21 try: 22 import torch 23 except ImportError: 24 self.warning("Please install torch if trying to pre-compile inference kernels") 25 return False 26 27 cuda_okay = True 28 if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda 29 sys_cuda_major, _ = installed_cuda_version() 30 torch_cuda_major = int(torch.version.cuda.split('.')[0]) 31 cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda 32 if cuda_capability < 8: 33 self.warning("NVIDIA Inference is only supported on Ampere and newer architectures") 34 cuda_okay = False 35 if cuda_capability >= 8: 36 if torch_cuda_major < 11 or sys_cuda_major < 11: 37 self.warning("On Ampere and higher architectures please use CUDA 11+") 38 cuda_okay = False 39 return super().is_compatible(verbose) and cuda_okay 40 41 def filter_ccs(self, ccs): 42 ccs_retained = [] 43 ccs_pruned = [] 44 for cc in ccs: 45 if int(cc[0]) >= 8: 46 ccs_retained.append(cc) 47 else: 48 ccs_pruned.append(cc) 49 if len(ccs_pruned) > 0: 50 self.warning(f"Filtered compute capabilities {ccs_pruned}") 51 return ccs_retained 52 53 def sources(self): 54 return [ 55 "csrc/fp_quantizer/quantize.cu", 56 "csrc/fp_quantizer/quantize.cpp", 57 ] 58 59 def extra_ldflags(self): 60 return ['-lcurand'] 61 62 def include_paths(self): 63 return ['csrc/fp_quantizer/includes', 'csrc/includes'] 64 [end of op_builder/fp_quantizer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/op_builder/fp_quantizer.py b/op_builder/fp_quantizer.py --- a/op_builder/fp_quantizer.py +++ b/op_builder/fp_quantizer.py @@ -52,8 +52,8 @@ def sources(self): return [ - "csrc/fp_quantizer/quantize.cu", - "csrc/fp_quantizer/quantize.cpp", + "csrc/fp_quantizer/fp_quantize.cu", + "csrc/fp_quantizer/fp_quantize.cpp", ] def extra_ldflags(self):
{"golden_diff": "diff --git a/op_builder/fp_quantizer.py b/op_builder/fp_quantizer.py\n--- a/op_builder/fp_quantizer.py\n+++ b/op_builder/fp_quantizer.py\n@@ -52,8 +52,8 @@\n \n def sources(self):\n return [\n- \"csrc/fp_quantizer/quantize.cu\",\n- \"csrc/fp_quantizer/quantize.cpp\",\n+ \"csrc/fp_quantizer/fp_quantize.cu\",\n+ \"csrc/fp_quantizer/fp_quantize.cpp\",\n ]\n \n def extra_ldflags(self):\n", "issue": "[BUG] fp_quantizer is not correctly built when non-jit installation\n**Describe the bug**\r\nfp_quantizer is not correctly built when non-jit installation.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n```\r\nDS_BUILD_FP_QUANTIZER=1 pip install deepspeed\r\n```\r\ninstall will succeed but\r\n```\r\nfrom deepspeed.ops.fp_quantizer import FP_Quantize\r\nFP_Quantize()\r\n```\r\nwill raise `ImportError: dynamic module does not define module export function (PyInit_fp_quantizer_op)`\r\n\r\n**Expected behavior**\r\n\r\nRenaming csrc/fp_quantizer/quantize.cu may solve the issue.\r\nThis restriction seems to be cause of the bug.\r\n> Note that setuptools cannot handle files with the same name but different extensions\r\nhttps://pytorch.org/tutorials/advanced/cpp_extension.html\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nfrom .builder import CUDAOpBuilder, installed_cuda_version\n\n\nclass FPQuantizerBuilder(CUDAOpBuilder):\n BUILD_VAR = \"DS_BUILD_FP_QUANTIZER\"\n NAME = \"fp_quantizer\"\n\n def __init__(self, name=None):\n name = self.NAME if name is None else name\n super().__init__(name=name)\n\n def absolute_name(self):\n return f'deepspeed.ops.fp_quantizer.{self.NAME}_op'\n\n def is_compatible(self, verbose=True):\n try:\n import torch\n except ImportError:\n self.warning(\"Please install torch if trying to pre-compile inference kernels\")\n return False\n\n cuda_okay = True\n if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda\n sys_cuda_major, _ = installed_cuda_version()\n torch_cuda_major = int(torch.version.cuda.split('.')[0])\n cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda\n if cuda_capability < 8:\n self.warning(\"NVIDIA Inference is only supported on Ampere and newer architectures\")\n cuda_okay = False\n if cuda_capability >= 8:\n if torch_cuda_major < 11 or sys_cuda_major < 11:\n self.warning(\"On Ampere and higher architectures please use CUDA 11+\")\n cuda_okay = False\n return super().is_compatible(verbose) and cuda_okay\n\n def filter_ccs(self, ccs):\n ccs_retained = []\n ccs_pruned = []\n for cc in ccs:\n if int(cc[0]) >= 8:\n ccs_retained.append(cc)\n else:\n ccs_pruned.append(cc)\n if len(ccs_pruned) > 0:\n self.warning(f\"Filtered compute capabilities {ccs_pruned}\")\n return ccs_retained\n\n def sources(self):\n return [\n \"csrc/fp_quantizer/quantize.cu\",\n \"csrc/fp_quantizer/quantize.cpp\",\n ]\n\n def extra_ldflags(self):\n return ['-lcurand']\n\n def include_paths(self):\n return ['csrc/fp_quantizer/includes', 'csrc/includes']\n", "path": "op_builder/fp_quantizer.py"}]}
1,342
132
gh_patches_debug_7174
rasdani/github-patches
git_diff
cowrie__cowrie-1054
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug in csirtg plugin @wesyoung Not sure when this bug started, but just looked today at my honeypots and saw this happening all over the place in the logs. ``` 2018-02-11T16:53:14-0500 [twisted.internet.defer#critical] Unhandled error in Deferred: 2018-02-11T16:53:14-0500 [twisted.internet.defer#critical] Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/twisted/internet/tcp.py", line 289, in connectionLost protocol.connectionLost(reason) File "/usr/local/lib/python2.7/dist-packages/twisted/web/client.py", line 223, in connectionLost self.factory._disconnectedDeferred.callback(None) File "/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py", line 459, in callback self._startRunCallbacks(result) File "/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py", line 567, in _startRunCallbacks self._runCallbacks() --- <exception caught here> --- File "/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py", line 653, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/cowrie/cowrie/cowrie/commands/wget.py", line 241, in error url=self.url) File "/home/cowrie/cowrie/cowrie/shell/protocol.py", line 80, in logDispatch pt.factory.logDispatch(*msg, **args) File "/home/cowrie/cowrie/cowrie/telnet/transport.py", line 43, in logDispatch output.logDispatch(*msg, **args) File "/home/cowrie/cowrie/cowrie/core/output.py", line 117, in logDispatch self.emit(ev) File "/home/cowrie/cowrie/cowrie/core/output.py", line 206, in emit self.write(ev) File "/home/cowrie/cowrie/cowrie/output/csirtg.py", line 43, in write system = e['system'] exceptions.KeyError: 'system' ``` </issue> <code> [start of src/cowrie/output/csirtg.py] 1 from __future__ import absolute_import, division 2 3 import os 4 from datetime import datetime 5 6 from csirtgsdk.client import Client 7 from csirtgsdk.indicator import Indicator 8 9 from twisted.python import log 10 11 import cowrie.core.output 12 from cowrie.core.config import CONFIG 13 14 USERNAME = os.environ.get('CSIRTG_USER') 15 FEED = os.environ.get('CSIRTG_FEED') 16 TOKEN = os.environ.get('CSIRG_TOKEN') 17 DESCRIPTION = os.environ.get('CSIRTG_DESCRIPTION', 'random scanning activity') 18 19 20 class Output(cowrie.core.output.Output): 21 def __init__(self): 22 self.user = CONFIG.get('output_csirtg', 'username') or USERNAME 23 self.feed = CONFIG.get('output_csirtg', 'feed') or FEED 24 self.token = CONFIG.get('output_csirtg', 'token') or TOKEN 25 try: 26 self.description = CONFIG.get('output_csirtg', 'description') 27 except Exception: 28 self.description = DESCRIPTION 29 self.context = {} 30 self.client = Client(token=self.token) 31 cowrie.core.output.Output.__init__(self) 32 33 def start(self, ): 34 pass 35 36 def stop(self): 37 pass 38 39 def write(self, e): 40 peerIP = e['src_ip'] 41 ts = e['timestamp'] 42 system = e['system'] 43 44 if system not in ['cowrie.ssh.factory.CowrieSSHFactory', 'cowrie.telnet.transport.HoneyPotTelnetFactory']: 45 return 46 47 today = str(datetime.now().date()) 48 49 if not self.context.get(today): 50 self.context = {} 51 self.context[today] = set() 52 53 key = ','.join([peerIP, system]) 54 55 if key in self.context[today]: 56 return 57 58 self.context[today].add(key) 59 60 tags = 'scanner,ssh' 61 port = 22 62 if e['system'] == 'cowrie.telnet.transport.HoneyPotTelnetFactory': 63 tags = 'scanner,telnet' 64 port = 23 65 66 i = { 67 'user': self.user, 68 'feed': self.feed, 69 'indicator': peerIP, 70 'portlist': port, 71 'protocol': 'tcp', 72 'tags': tags, 73 'firsttime': ts, 74 'lasttime': ts, 75 'description': self.description 76 } 77 78 ret = Indicator(self.client, i).submit() 79 log.msg('logged to csirtg %s ' % ret['location']) 80 [end of src/cowrie/output/csirtg.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cowrie/output/csirtg.py b/src/cowrie/output/csirtg.py --- a/src/cowrie/output/csirtg.py +++ b/src/cowrie/output/csirtg.py @@ -39,9 +39,10 @@ def write(self, e): peerIP = e['src_ip'] ts = e['timestamp'] - system = e['system'] + system = e.get('system', None) - if system not in ['cowrie.ssh.factory.CowrieSSHFactory', 'cowrie.telnet.transport.HoneyPotTelnetFactory']: + if system not in ['cowrie.ssh.factory.CowrieSSHFactory', + 'cowrie.telnet.transport.HoneyPotTelnetFactory']: return today = str(datetime.now().date())
{"golden_diff": "diff --git a/src/cowrie/output/csirtg.py b/src/cowrie/output/csirtg.py\n--- a/src/cowrie/output/csirtg.py\n+++ b/src/cowrie/output/csirtg.py\n@@ -39,9 +39,10 @@\n def write(self, e):\n peerIP = e['src_ip']\n ts = e['timestamp']\n- system = e['system']\n+ system = e.get('system', None)\n \n- if system not in ['cowrie.ssh.factory.CowrieSSHFactory', 'cowrie.telnet.transport.HoneyPotTelnetFactory']:\n+ if system not in ['cowrie.ssh.factory.CowrieSSHFactory',\n+ 'cowrie.telnet.transport.HoneyPotTelnetFactory']:\n return\n \n today = str(datetime.now().date())\n", "issue": "Bug in csirtg plugin\n@wesyoung Not sure when this bug started, but just looked today at my honeypots and saw this happening all over the place in the logs.\r\n\r\n```\r\n2018-02-11T16:53:14-0500 [twisted.internet.defer#critical] Unhandled error in Deferred:\r\n2018-02-11T16:53:14-0500 [twisted.internet.defer#critical]\r\n\tTraceback (most recent call last):\r\n\t File \"/usr/local/lib/python2.7/dist-packages/twisted/internet/tcp.py\", line 289, in connectionLost\r\n\t protocol.connectionLost(reason)\r\n\t File \"/usr/local/lib/python2.7/dist-packages/twisted/web/client.py\", line 223, in connectionLost\r\n\t self.factory._disconnectedDeferred.callback(None)\r\n\t File \"/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py\", line 459, in callback\r\n\t self._startRunCallbacks(result)\r\n\t File \"/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py\", line 567, in _startRunCallbacks\r\n\t self._runCallbacks()\r\n\t--- <exception caught here> ---\r\n\t File \"/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py\", line 653, in _runCallbacks\r\n\t current.result = callback(current.result, *args, **kw)\r\n\t File \"/home/cowrie/cowrie/cowrie/commands/wget.py\", line 241, in error\r\n\t url=self.url)\r\n\t File \"/home/cowrie/cowrie/cowrie/shell/protocol.py\", line 80, in logDispatch\r\n\t pt.factory.logDispatch(*msg, **args)\r\n\t File \"/home/cowrie/cowrie/cowrie/telnet/transport.py\", line 43, in logDispatch\r\n\t output.logDispatch(*msg, **args)\r\n\t File \"/home/cowrie/cowrie/cowrie/core/output.py\", line 117, in logDispatch\r\n\t self.emit(ev)\r\n\t File \"/home/cowrie/cowrie/cowrie/core/output.py\", line 206, in emit\r\n\t self.write(ev)\r\n\t File \"/home/cowrie/cowrie/cowrie/output/csirtg.py\", line 43, in write\r\n\t system = e['system']\r\n\texceptions.KeyError: 'system'\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import, division\n\nimport os\nfrom datetime import datetime\n\nfrom csirtgsdk.client import Client\nfrom csirtgsdk.indicator import Indicator\n\nfrom twisted.python import log\n\nimport cowrie.core.output\nfrom cowrie.core.config import CONFIG\n\nUSERNAME = os.environ.get('CSIRTG_USER')\nFEED = os.environ.get('CSIRTG_FEED')\nTOKEN = os.environ.get('CSIRG_TOKEN')\nDESCRIPTION = os.environ.get('CSIRTG_DESCRIPTION', 'random scanning activity')\n\n\nclass Output(cowrie.core.output.Output):\n def __init__(self):\n self.user = CONFIG.get('output_csirtg', 'username') or USERNAME\n self.feed = CONFIG.get('output_csirtg', 'feed') or FEED\n self.token = CONFIG.get('output_csirtg', 'token') or TOKEN\n try:\n self.description = CONFIG.get('output_csirtg', 'description')\n except Exception:\n self.description = DESCRIPTION\n self.context = {}\n self.client = Client(token=self.token)\n cowrie.core.output.Output.__init__(self)\n\n def start(self, ):\n pass\n\n def stop(self):\n pass\n\n def write(self, e):\n peerIP = e['src_ip']\n ts = e['timestamp']\n system = e['system']\n\n if system not in ['cowrie.ssh.factory.CowrieSSHFactory', 'cowrie.telnet.transport.HoneyPotTelnetFactory']:\n return\n\n today = str(datetime.now().date())\n\n if not self.context.get(today):\n self.context = {}\n self.context[today] = set()\n\n key = ','.join([peerIP, system])\n\n if key in self.context[today]:\n return\n\n self.context[today].add(key)\n\n tags = 'scanner,ssh'\n port = 22\n if e['system'] == 'cowrie.telnet.transport.HoneyPotTelnetFactory':\n tags = 'scanner,telnet'\n port = 23\n\n i = {\n 'user': self.user,\n 'feed': self.feed,\n 'indicator': peerIP,\n 'portlist': port,\n 'protocol': 'tcp',\n 'tags': tags,\n 'firsttime': ts,\n 'lasttime': ts,\n 'description': self.description\n }\n\n ret = Indicator(self.client, i).submit()\n log.msg('logged to csirtg %s ' % ret['location'])\n", "path": "src/cowrie/output/csirtg.py"}]}
1,780
181
gh_patches_debug_5664
rasdani/github-patches
git_diff
cisagov__manage.get.gov-278
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> DomainApplication list on logged in user homepage Outcome: A list (limited to one now) of domain applications on the homepage of a logged in user. A/C: As a user I want to see my in progress applications for a domain. As a user I want to be able to select an application that is `In Progress` and continue my progress. As a user I want to be able to view submitted applications. </issue> <code> [start of src/registrar/views/index.py] 1 from django.shortcuts import render 2 3 4 def index(request): 5 """This page is available to anyone without logging in.""" 6 return render(request, "home.html") 7 [end of src/registrar/views/index.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/registrar/views/index.py b/src/registrar/views/index.py --- a/src/registrar/views/index.py +++ b/src/registrar/views/index.py @@ -1,6 +1,12 @@ from django.shortcuts import render +from registrar.models import DomainApplication + def index(request): """This page is available to anyone without logging in.""" - return render(request, "home.html") + context = {} + if request.user.is_authenticated: + applications = DomainApplication.objects.filter(creator=request.user) + context["domain_applications"] = applications + return render(request, "home.html", context)
{"golden_diff": "diff --git a/src/registrar/views/index.py b/src/registrar/views/index.py\n--- a/src/registrar/views/index.py\n+++ b/src/registrar/views/index.py\n@@ -1,6 +1,12 @@\n from django.shortcuts import render\n \n+from registrar.models import DomainApplication\n+\n \n def index(request):\n \"\"\"This page is available to anyone without logging in.\"\"\"\n- return render(request, \"home.html\")\n+ context = {}\n+ if request.user.is_authenticated:\n+ applications = DomainApplication.objects.filter(creator=request.user)\n+ context[\"domain_applications\"] = applications\n+ return render(request, \"home.html\", context)\n", "issue": "DomainApplication list on logged in user homepage\nOutcome: A list (limited to one now) of domain applications on the homepage of a logged in user. \n\nA/C: As a user I want to see my in progress applications for a domain. As a user I want to be able to select an application that is `In Progress` and continue my progress. As a user I want to be able to view submitted applications. \n", "before_files": [{"content": "from django.shortcuts import render\n\n\ndef index(request):\n \"\"\"This page is available to anyone without logging in.\"\"\"\n return render(request, \"home.html\")\n", "path": "src/registrar/views/index.py"}]}
665
139
gh_patches_debug_14729
rasdani/github-patches
git_diff
dotkom__onlineweb4-612
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Gravatar default images do not load correctly </issue> <code> [start of apps/authentication/templatetags/gravatar_url_resolver.py] 1 from django import template 2 from django.conf import settings 3 import urllib, hashlib 4 5 register = template.Library() 6 7 @register.assignment_tag(takes_context=True) 8 def gravatar_url(context, user, size): 9 default = "%s%s_%s.png" % (context['request'].META['HTTP_HOST'], 10 settings.DEFAULT_PROFILE_PICTURE_PREFIX, user.gender) 11 gravatar_url = "https://www.gravatar.com/avatar/" + hashlib.md5(user.get_email().email).hexdigest() + "?" 12 gravatar_url += urllib.urlencode({'d': default, 's':str(size)}) 13 14 return gravatar_url [end of apps/authentication/templatetags/gravatar_url_resolver.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/apps/authentication/templatetags/gravatar_url_resolver.py b/apps/authentication/templatetags/gravatar_url_resolver.py --- a/apps/authentication/templatetags/gravatar_url_resolver.py +++ b/apps/authentication/templatetags/gravatar_url_resolver.py @@ -6,8 +6,11 @@ @register.assignment_tag(takes_context=True) def gravatar_url(context, user, size): - default = "%s%s_%s.png" % (context['request'].META['HTTP_HOST'], + + prefix = "https://" if context['request'].is_secure() else "http://" + default = "%s%s%s_%s.png" % (prefix, context['request'].META['HTTP_HOST'], settings.DEFAULT_PROFILE_PICTURE_PREFIX, user.gender) + gravatar_url = "https://www.gravatar.com/avatar/" + hashlib.md5(user.get_email().email).hexdigest() + "?" gravatar_url += urllib.urlencode({'d': default, 's':str(size)})
{"golden_diff": "diff --git a/apps/authentication/templatetags/gravatar_url_resolver.py b/apps/authentication/templatetags/gravatar_url_resolver.py\n--- a/apps/authentication/templatetags/gravatar_url_resolver.py\n+++ b/apps/authentication/templatetags/gravatar_url_resolver.py\n@@ -6,8 +6,11 @@\n \n @register.assignment_tag(takes_context=True)\n def gravatar_url(context, user, size):\n- default = \"%s%s_%s.png\" % (context['request'].META['HTTP_HOST'],\n+\n+ prefix = \"https://\" if context['request'].is_secure() else \"http://\"\n+ default = \"%s%s%s_%s.png\" % (prefix, context['request'].META['HTTP_HOST'],\n settings.DEFAULT_PROFILE_PICTURE_PREFIX, user.gender)\n+\n gravatar_url = \"https://www.gravatar.com/avatar/\" + hashlib.md5(user.get_email().email).hexdigest() + \"?\"\n gravatar_url += urllib.urlencode({'d': default, 's':str(size)})\n", "issue": "Gravatar default images do not load correctly\n\n", "before_files": [{"content": "from django import template\nfrom django.conf import settings\nimport urllib, hashlib\n \nregister = template.Library()\n\[email protected]_tag(takes_context=True)\ndef gravatar_url(context, user, size):\n default = \"%s%s_%s.png\" % (context['request'].META['HTTP_HOST'],\n settings.DEFAULT_PROFILE_PICTURE_PREFIX, user.gender)\n gravatar_url = \"https://www.gravatar.com/avatar/\" + hashlib.md5(user.get_email().email).hexdigest() + \"?\"\n gravatar_url += urllib.urlencode({'d': default, 's':str(size)})\n\n return gravatar_url", "path": "apps/authentication/templatetags/gravatar_url_resolver.py"}]}
705
219
gh_patches_debug_17320
rasdani/github-patches
git_diff
sql-machine-learning__elasticdl-1324
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Only keep necessary dependencies in requirements.txt Currently, we have one single `requirements.txt` file which contains some unnecessary dependencies used for testing only such as `pytest`, `pytest-cov`, and `mock`. They should not be part of `requirements.txt` and should not be distributed with `elasticdl` Python package. </issue> <code> [start of setup.py] 1 from setuptools import find_packages, setup 2 3 with open("elasticdl/requirements.txt") as f: 4 requirements = f.read().splitlines() 5 6 setup( 7 name="elasticdl", 8 version="develop", 9 description="A Kubernetes-native Deep Learning Framework", 10 long_description="ElasticDL is a Kubernetes-native deep learning framework" 11 " built on top of TensorFlow 2.0 that supports" 12 " fault-tolerance and elastic scheduling.", 13 long_description_content_type="text/markdown", 14 author="Ant Financial", 15 url="https://elasticdl.org", 16 install_requires=requirements, 17 packages=find_packages(exclude=["*test*"]), 18 package_data={"": ["proto/elasticdl.proto", "docker/*", "Makefile"]}, 19 entry_points={ 20 "console_scripts": ["elasticdl=elasticdl.python.elasticdl.client:main"] 21 }, 22 ) 23 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,7 +1,11 @@ from setuptools import find_packages, setup with open("elasticdl/requirements.txt") as f: - requirements = f.read().splitlines() + required_deps = f.read().splitlines() + +extras = {} +with open("elasticdl/requirements-dev.txt") as f: + extras["develop"] = f.read().splitlines() setup( name="elasticdl", @@ -13,7 +17,8 @@ long_description_content_type="text/markdown", author="Ant Financial", url="https://elasticdl.org", - install_requires=requirements, + install_requires=required_deps, + extras_require=extras, packages=find_packages(exclude=["*test*"]), package_data={"": ["proto/elasticdl.proto", "docker/*", "Makefile"]}, entry_points={
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,7 +1,11 @@\n from setuptools import find_packages, setup\n \n with open(\"elasticdl/requirements.txt\") as f:\n- requirements = f.read().splitlines()\n+ required_deps = f.read().splitlines()\n+\n+extras = {}\n+with open(\"elasticdl/requirements-dev.txt\") as f:\n+ extras[\"develop\"] = f.read().splitlines()\n \n setup(\n name=\"elasticdl\",\n@@ -13,7 +17,8 @@\n long_description_content_type=\"text/markdown\",\n author=\"Ant Financial\",\n url=\"https://elasticdl.org\",\n- install_requires=requirements,\n+ install_requires=required_deps,\n+ extras_require=extras,\n packages=find_packages(exclude=[\"*test*\"]),\n package_data={\"\": [\"proto/elasticdl.proto\", \"docker/*\", \"Makefile\"]},\n entry_points={\n", "issue": "Only keep necessary dependencies in requirements.txt\nCurrently, we have one single `requirements.txt` file which contains some unnecessary dependencies used for testing only such as `pytest`, `pytest-cov`, and `mock`. They should not be part of `requirements.txt` and should not be distributed with `elasticdl` Python package.\n", "before_files": [{"content": "from setuptools import find_packages, setup\n\nwith open(\"elasticdl/requirements.txt\") as f:\n requirements = f.read().splitlines()\n\nsetup(\n name=\"elasticdl\",\n version=\"develop\",\n description=\"A Kubernetes-native Deep Learning Framework\",\n long_description=\"ElasticDL is a Kubernetes-native deep learning framework\"\n \" built on top of TensorFlow 2.0 that supports\"\n \" fault-tolerance and elastic scheduling.\",\n long_description_content_type=\"text/markdown\",\n author=\"Ant Financial\",\n url=\"https://elasticdl.org\",\n install_requires=requirements,\n packages=find_packages(exclude=[\"*test*\"]),\n package_data={\"\": [\"proto/elasticdl.proto\", \"docker/*\", \"Makefile\"]},\n entry_points={\n \"console_scripts\": [\"elasticdl=elasticdl.python.elasticdl.client:main\"]\n },\n)\n", "path": "setup.py"}]}
813
209
gh_patches_debug_14455
rasdani/github-patches
git_diff
searx__searx-333
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Piratebay engine : update the URL The current URL is https://thepiratebay.se which doesn't respond. This one seems to work https://thepiratebay.mn (see https://en.wikipedia.org/wiki/The_Pirate_Bay ) Another solution : disable / remove the engine in settings.yml </issue> <code> [start of searx/engines/piratebay.py] 1 # Piratebay (Videos, Music, Files) 2 # 3 # @website https://thepiratebay.se 4 # @provide-api no (nothing found) 5 # 6 # @using-api no 7 # @results HTML (using search portal) 8 # @stable yes (HTML can change) 9 # @parse url, title, content, seed, leech, magnetlink 10 11 from urlparse import urljoin 12 from cgi import escape 13 from urllib import quote 14 from lxml import html 15 from operator import itemgetter 16 from searx.engines.xpath import extract_text 17 18 # engine dependent config 19 categories = ['videos', 'music', 'files'] 20 paging = True 21 22 # search-url 23 url = 'https://thepiratebay.se/' 24 search_url = url + 'search/{search_term}/{pageno}/99/{search_type}' 25 26 # piratebay specific type-definitions 27 search_types = {'files': '0', 28 'music': '100', 29 'videos': '200'} 30 31 # specific xpath variables 32 magnet_xpath = './/a[@title="Download this torrent using magnet"]' 33 torrent_xpath = './/a[@title="Download this torrent"]' 34 content_xpath = './/font[@class="detDesc"]' 35 36 37 # do search-request 38 def request(query, params): 39 search_type = search_types.get(params['category'], '0') 40 41 params['url'] = search_url.format(search_term=quote(query), 42 search_type=search_type, 43 pageno=params['pageno'] - 1) 44 45 # FIX: SSLError: hostname 'kthepiratebay.se' 46 # doesn't match either of 'ssl2000.cloudflare.com', 'cloudflare.com', '*.cloudflare.com' 47 params['verify'] = False 48 49 return params 50 51 52 # get response from search-request 53 def response(resp): 54 results = [] 55 56 dom = html.fromstring(resp.text) 57 58 search_res = dom.xpath('//table[@id="searchResult"]//tr') 59 60 # return empty array if nothing is found 61 if not search_res: 62 return [] 63 64 # parse results 65 for result in search_res[1:]: 66 link = result.xpath('.//div[@class="detName"]//a')[0] 67 href = urljoin(url, link.attrib.get('href')) 68 title = extract_text(link) 69 content = escape(extract_text(result.xpath(content_xpath))) 70 seed, leech = result.xpath('.//td[@align="right"]/text()')[:2] 71 72 # convert seed to int if possible 73 if seed.isdigit(): 74 seed = int(seed) 75 else: 76 seed = 0 77 78 # convert leech to int if possible 79 if leech.isdigit(): 80 leech = int(leech) 81 else: 82 leech = 0 83 84 magnetlink = result.xpath(magnet_xpath)[0] 85 torrentfile_links = result.xpath(torrent_xpath) 86 if torrentfile_links: 87 torrentfile_link = torrentfile_links[0].attrib.get('href') 88 else: 89 torrentfile_link = None 90 91 # append result 92 results.append({'url': href, 93 'title': title, 94 'content': content, 95 'seed': seed, 96 'leech': leech, 97 'magnetlink': magnetlink.attrib.get('href'), 98 'torrentfile': torrentfile_link, 99 'template': 'torrent.html'}) 100 101 # return results sorted by seeder 102 return sorted(results, key=itemgetter('seed'), reverse=True) 103 [end of searx/engines/piratebay.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/searx/engines/piratebay.py b/searx/engines/piratebay.py --- a/searx/engines/piratebay.py +++ b/searx/engines/piratebay.py @@ -20,7 +20,7 @@ paging = True # search-url -url = 'https://thepiratebay.se/' +url = 'https://thepiratebay.am/' search_url = url + 'search/{search_term}/{pageno}/99/{search_type}' # piratebay specific type-definitions @@ -42,10 +42,6 @@ search_type=search_type, pageno=params['pageno'] - 1) - # FIX: SSLError: hostname 'kthepiratebay.se' - # doesn't match either of 'ssl2000.cloudflare.com', 'cloudflare.com', '*.cloudflare.com' - params['verify'] = False - return params
{"golden_diff": "diff --git a/searx/engines/piratebay.py b/searx/engines/piratebay.py\n--- a/searx/engines/piratebay.py\n+++ b/searx/engines/piratebay.py\n@@ -20,7 +20,7 @@\n paging = True\n \n # search-url\n-url = 'https://thepiratebay.se/'\n+url = 'https://thepiratebay.am/'\n search_url = url + 'search/{search_term}/{pageno}/99/{search_type}'\n \n # piratebay specific type-definitions\n@@ -42,10 +42,6 @@\n search_type=search_type,\n pageno=params['pageno'] - 1)\n \n- # FIX: SSLError: hostname 'kthepiratebay.se'\n- # doesn't match either of 'ssl2000.cloudflare.com', 'cloudflare.com', '*.cloudflare.com'\n- params['verify'] = False\n-\n return params\n", "issue": "Piratebay engine : update the URL\nThe current URL is https://thepiratebay.se which doesn't respond.\nThis one seems to work https://thepiratebay.mn (see https://en.wikipedia.org/wiki/The_Pirate_Bay )\n\nAnother solution : disable / remove the engine in settings.yml\n\n", "before_files": [{"content": "# Piratebay (Videos, Music, Files)\n#\n# @website https://thepiratebay.se\n# @provide-api no (nothing found)\n#\n# @using-api no\n# @results HTML (using search portal)\n# @stable yes (HTML can change)\n# @parse url, title, content, seed, leech, magnetlink\n\nfrom urlparse import urljoin\nfrom cgi import escape\nfrom urllib import quote\nfrom lxml import html\nfrom operator import itemgetter\nfrom searx.engines.xpath import extract_text\n\n# engine dependent config\ncategories = ['videos', 'music', 'files']\npaging = True\n\n# search-url\nurl = 'https://thepiratebay.se/'\nsearch_url = url + 'search/{search_term}/{pageno}/99/{search_type}'\n\n# piratebay specific type-definitions\nsearch_types = {'files': '0',\n 'music': '100',\n 'videos': '200'}\n\n# specific xpath variables\nmagnet_xpath = './/a[@title=\"Download this torrent using magnet\"]'\ntorrent_xpath = './/a[@title=\"Download this torrent\"]'\ncontent_xpath = './/font[@class=\"detDesc\"]'\n\n\n# do search-request\ndef request(query, params):\n search_type = search_types.get(params['category'], '0')\n\n params['url'] = search_url.format(search_term=quote(query),\n search_type=search_type,\n pageno=params['pageno'] - 1)\n\n # FIX: SSLError: hostname 'kthepiratebay.se'\n # doesn't match either of 'ssl2000.cloudflare.com', 'cloudflare.com', '*.cloudflare.com'\n params['verify'] = False\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.text)\n\n search_res = dom.xpath('//table[@id=\"searchResult\"]//tr')\n\n # return empty array if nothing is found\n if not search_res:\n return []\n\n # parse results\n for result in search_res[1:]:\n link = result.xpath('.//div[@class=\"detName\"]//a')[0]\n href = urljoin(url, link.attrib.get('href'))\n title = extract_text(link)\n content = escape(extract_text(result.xpath(content_xpath)))\n seed, leech = result.xpath('.//td[@align=\"right\"]/text()')[:2]\n\n # convert seed to int if possible\n if seed.isdigit():\n seed = int(seed)\n else:\n seed = 0\n\n # convert leech to int if possible\n if leech.isdigit():\n leech = int(leech)\n else:\n leech = 0\n\n magnetlink = result.xpath(magnet_xpath)[0]\n torrentfile_links = result.xpath(torrent_xpath)\n if torrentfile_links:\n torrentfile_link = torrentfile_links[0].attrib.get('href')\n else:\n torrentfile_link = None\n\n # append result\n results.append({'url': href,\n 'title': title,\n 'content': content,\n 'seed': seed,\n 'leech': leech,\n 'magnetlink': magnetlink.attrib.get('href'),\n 'torrentfile': torrentfile_link,\n 'template': 'torrent.html'})\n\n # return results sorted by seeder\n return sorted(results, key=itemgetter('seed'), reverse=True)\n", "path": "searx/engines/piratebay.py"}]}
1,558
216
gh_patches_debug_275
rasdani/github-patches
git_diff
sanic-org__sanic-878
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> 0.5.5 release request Because 0.5.4 has actual protocol parsing problem (#755) I request to quickly release 0.5.5. It causes actual request loss and unhandlable 400 errors for the sanic users. (unless they make local patch for sanic) </issue> <code> [start of sanic/__init__.py] 1 from sanic.app import Sanic 2 from sanic.blueprints import Blueprint 3 4 __version__ = '0.5.4' 5 6 __all__ = ['Sanic', 'Blueprint'] 7 [end of sanic/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sanic/__init__.py b/sanic/__init__.py --- a/sanic/__init__.py +++ b/sanic/__init__.py @@ -1,6 +1,6 @@ from sanic.app import Sanic from sanic.blueprints import Blueprint -__version__ = '0.5.4' +__version__ = '0.6.0' __all__ = ['Sanic', 'Blueprint']
{"golden_diff": "diff --git a/sanic/__init__.py b/sanic/__init__.py\n--- a/sanic/__init__.py\n+++ b/sanic/__init__.py\n@@ -1,6 +1,6 @@\n from sanic.app import Sanic\n from sanic.blueprints import Blueprint\n \n-__version__ = '0.5.4'\n+__version__ = '0.6.0'\n \n __all__ = ['Sanic', 'Blueprint']\n", "issue": "0.5.5 release request\nBecause 0.5.4 has actual protocol parsing problem (#755) I request to quickly release 0.5.5.\r\n\r\nIt causes actual request loss and unhandlable 400 errors for the sanic users. (unless they make local patch for sanic)\n", "before_files": [{"content": "from sanic.app import Sanic\nfrom sanic.blueprints import Blueprint\n\n__version__ = '0.5.4'\n\n__all__ = ['Sanic', 'Blueprint']\n", "path": "sanic/__init__.py"}]}
654
99
gh_patches_debug_31431
rasdani/github-patches
git_diff
CTFd__CTFd-1581
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Hidden scores, hides graphs for admins as well Hidden scores, hides graphs for admins as well. </issue> <code> [start of CTFd/utils/decorators/visibility.py] 1 import functools 2 3 from flask import abort, redirect, render_template, request, url_for 4 5 from CTFd.constants.config import ( 6 AccountVisibilityTypes, 7 ChallengeVisibilityTypes, 8 ConfigTypes, 9 RegistrationVisibilityTypes, 10 ScoreVisibilityTypes, 11 ) 12 from CTFd.utils import get_config 13 from CTFd.utils.user import authed, is_admin 14 15 16 def check_score_visibility(f): 17 @functools.wraps(f) 18 def _check_score_visibility(*args, **kwargs): 19 v = get_config(ConfigTypes.SCORE_VISIBILITY) 20 if v == ScoreVisibilityTypes.PUBLIC: 21 return f(*args, **kwargs) 22 23 elif v == ScoreVisibilityTypes.PRIVATE: 24 if authed(): 25 return f(*args, **kwargs) 26 else: 27 if request.content_type == "application/json": 28 abort(403) 29 else: 30 return redirect(url_for("auth.login", next=request.full_path)) 31 32 elif v == ScoreVisibilityTypes.HIDDEN: 33 return ( 34 render_template("errors/403.html", error="Scores are currently hidden"), 35 403, 36 ) 37 38 elif v == ScoreVisibilityTypes.ADMINS: 39 if is_admin(): 40 return f(*args, **kwargs) 41 else: 42 abort(404) 43 44 return _check_score_visibility 45 46 47 def check_challenge_visibility(f): 48 @functools.wraps(f) 49 def _check_challenge_visibility(*args, **kwargs): 50 v = get_config(ConfigTypes.CHALLENGE_VISIBILITY) 51 if v == ChallengeVisibilityTypes.PUBLIC: 52 return f(*args, **kwargs) 53 54 elif v == ChallengeVisibilityTypes.PRIVATE: 55 if authed(): 56 return f(*args, **kwargs) 57 else: 58 if request.content_type == "application/json": 59 abort(403) 60 else: 61 return redirect(url_for("auth.login", next=request.full_path)) 62 63 elif v == ChallengeVisibilityTypes.ADMINS: 64 if is_admin(): 65 return f(*args, **kwargs) 66 else: 67 if authed(): 68 abort(403) 69 else: 70 return redirect(url_for("auth.login", next=request.full_path)) 71 72 return _check_challenge_visibility 73 74 75 def check_account_visibility(f): 76 @functools.wraps(f) 77 def _check_account_visibility(*args, **kwargs): 78 v = get_config(ConfigTypes.ACCOUNT_VISIBILITY) 79 if v == AccountVisibilityTypes.PUBLIC: 80 return f(*args, **kwargs) 81 82 elif v == AccountVisibilityTypes.PRIVATE: 83 if authed(): 84 return f(*args, **kwargs) 85 else: 86 if request.content_type == "application/json": 87 abort(403) 88 else: 89 return redirect(url_for("auth.login", next=request.full_path)) 90 91 elif v == AccountVisibilityTypes.ADMINS: 92 if is_admin(): 93 return f(*args, **kwargs) 94 else: 95 abort(404) 96 97 return _check_account_visibility 98 99 100 def check_registration_visibility(f): 101 @functools.wraps(f) 102 def _check_registration_visibility(*args, **kwargs): 103 v = get_config(ConfigTypes.REGISTRATION_VISIBILITY) 104 if v == RegistrationVisibilityTypes.PUBLIC: 105 return f(*args, **kwargs) 106 elif v == RegistrationVisibilityTypes.PRIVATE: 107 abort(404) 108 109 return _check_registration_visibility 110 [end of CTFd/utils/decorators/visibility.py] [start of CTFd/scoreboard.py] 1 from flask import Blueprint, render_template 2 3 from CTFd.cache import cache, make_cache_key 4 from CTFd.utils import config 5 from CTFd.utils.decorators.visibility import check_score_visibility 6 from CTFd.utils.helpers import get_infos 7 from CTFd.utils.scores import get_standings 8 9 scoreboard = Blueprint("scoreboard", __name__) 10 11 12 @scoreboard.route("/scoreboard") 13 @check_score_visibility 14 @cache.cached(timeout=60, key_prefix=make_cache_key) 15 def listing(): 16 infos = get_infos() 17 18 if config.is_scoreboard_frozen(): 19 infos.append("Scoreboard has been frozen") 20 21 standings = get_standings() 22 return render_template("scoreboard.html", standings=standings, infos=infos) 23 [end of CTFd/scoreboard.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/CTFd/scoreboard.py b/CTFd/scoreboard.py --- a/CTFd/scoreboard.py +++ b/CTFd/scoreboard.py @@ -2,9 +2,11 @@ from CTFd.cache import cache, make_cache_key from CTFd.utils import config +from CTFd.utils.config.visibility import scores_visible from CTFd.utils.decorators.visibility import check_score_visibility from CTFd.utils.helpers import get_infos from CTFd.utils.scores import get_standings +from CTFd.utils.user import is_admin scoreboard = Blueprint("scoreboard", __name__) @@ -18,5 +20,8 @@ if config.is_scoreboard_frozen(): infos.append("Scoreboard has been frozen") + if is_admin() is True and scores_visible() is False: + infos.append("Scores are not currently visible to users") + standings = get_standings() return render_template("scoreboard.html", standings=standings, infos=infos) diff --git a/CTFd/utils/decorators/visibility.py b/CTFd/utils/decorators/visibility.py --- a/CTFd/utils/decorators/visibility.py +++ b/CTFd/utils/decorators/visibility.py @@ -30,10 +30,18 @@ return redirect(url_for("auth.login", next=request.full_path)) elif v == ScoreVisibilityTypes.HIDDEN: - return ( - render_template("errors/403.html", error="Scores are currently hidden"), - 403, - ) + if is_admin(): + return f(*args, **kwargs) + else: + if request.content_type == "application/json": + abort(403) + else: + return ( + render_template( + "errors/403.html", error="Scores are currently hidden" + ), + 403, + ) elif v == ScoreVisibilityTypes.ADMINS: if is_admin():
{"golden_diff": "diff --git a/CTFd/scoreboard.py b/CTFd/scoreboard.py\n--- a/CTFd/scoreboard.py\n+++ b/CTFd/scoreboard.py\n@@ -2,9 +2,11 @@\n \n from CTFd.cache import cache, make_cache_key\n from CTFd.utils import config\n+from CTFd.utils.config.visibility import scores_visible\n from CTFd.utils.decorators.visibility import check_score_visibility\n from CTFd.utils.helpers import get_infos\n from CTFd.utils.scores import get_standings\n+from CTFd.utils.user import is_admin\n \n scoreboard = Blueprint(\"scoreboard\", __name__)\n \n@@ -18,5 +20,8 @@\n if config.is_scoreboard_frozen():\n infos.append(\"Scoreboard has been frozen\")\n \n+ if is_admin() is True and scores_visible() is False:\n+ infos.append(\"Scores are not currently visible to users\")\n+\n standings = get_standings()\n return render_template(\"scoreboard.html\", standings=standings, infos=infos)\ndiff --git a/CTFd/utils/decorators/visibility.py b/CTFd/utils/decorators/visibility.py\n--- a/CTFd/utils/decorators/visibility.py\n+++ b/CTFd/utils/decorators/visibility.py\n@@ -30,10 +30,18 @@\n return redirect(url_for(\"auth.login\", next=request.full_path))\n \n elif v == ScoreVisibilityTypes.HIDDEN:\n- return (\n- render_template(\"errors/403.html\", error=\"Scores are currently hidden\"),\n- 403,\n- )\n+ if is_admin():\n+ return f(*args, **kwargs)\n+ else:\n+ if request.content_type == \"application/json\":\n+ abort(403)\n+ else:\n+ return (\n+ render_template(\n+ \"errors/403.html\", error=\"Scores are currently hidden\"\n+ ),\n+ 403,\n+ )\n \n elif v == ScoreVisibilityTypes.ADMINS:\n if is_admin():\n", "issue": "Hidden scores, hides graphs for admins as well\nHidden scores, hides graphs for admins as well. \n", "before_files": [{"content": "import functools\n\nfrom flask import abort, redirect, render_template, request, url_for\n\nfrom CTFd.constants.config import (\n AccountVisibilityTypes,\n ChallengeVisibilityTypes,\n ConfigTypes,\n RegistrationVisibilityTypes,\n ScoreVisibilityTypes,\n)\nfrom CTFd.utils import get_config\nfrom CTFd.utils.user import authed, is_admin\n\n\ndef check_score_visibility(f):\n @functools.wraps(f)\n def _check_score_visibility(*args, **kwargs):\n v = get_config(ConfigTypes.SCORE_VISIBILITY)\n if v == ScoreVisibilityTypes.PUBLIC:\n return f(*args, **kwargs)\n\n elif v == ScoreVisibilityTypes.PRIVATE:\n if authed():\n return f(*args, **kwargs)\n else:\n if request.content_type == \"application/json\":\n abort(403)\n else:\n return redirect(url_for(\"auth.login\", next=request.full_path))\n\n elif v == ScoreVisibilityTypes.HIDDEN:\n return (\n render_template(\"errors/403.html\", error=\"Scores are currently hidden\"),\n 403,\n )\n\n elif v == ScoreVisibilityTypes.ADMINS:\n if is_admin():\n return f(*args, **kwargs)\n else:\n abort(404)\n\n return _check_score_visibility\n\n\ndef check_challenge_visibility(f):\n @functools.wraps(f)\n def _check_challenge_visibility(*args, **kwargs):\n v = get_config(ConfigTypes.CHALLENGE_VISIBILITY)\n if v == ChallengeVisibilityTypes.PUBLIC:\n return f(*args, **kwargs)\n\n elif v == ChallengeVisibilityTypes.PRIVATE:\n if authed():\n return f(*args, **kwargs)\n else:\n if request.content_type == \"application/json\":\n abort(403)\n else:\n return redirect(url_for(\"auth.login\", next=request.full_path))\n\n elif v == ChallengeVisibilityTypes.ADMINS:\n if is_admin():\n return f(*args, **kwargs)\n else:\n if authed():\n abort(403)\n else:\n return redirect(url_for(\"auth.login\", next=request.full_path))\n\n return _check_challenge_visibility\n\n\ndef check_account_visibility(f):\n @functools.wraps(f)\n def _check_account_visibility(*args, **kwargs):\n v = get_config(ConfigTypes.ACCOUNT_VISIBILITY)\n if v == AccountVisibilityTypes.PUBLIC:\n return f(*args, **kwargs)\n\n elif v == AccountVisibilityTypes.PRIVATE:\n if authed():\n return f(*args, **kwargs)\n else:\n if request.content_type == \"application/json\":\n abort(403)\n else:\n return redirect(url_for(\"auth.login\", next=request.full_path))\n\n elif v == AccountVisibilityTypes.ADMINS:\n if is_admin():\n return f(*args, **kwargs)\n else:\n abort(404)\n\n return _check_account_visibility\n\n\ndef check_registration_visibility(f):\n @functools.wraps(f)\n def _check_registration_visibility(*args, **kwargs):\n v = get_config(ConfigTypes.REGISTRATION_VISIBILITY)\n if v == RegistrationVisibilityTypes.PUBLIC:\n return f(*args, **kwargs)\n elif v == RegistrationVisibilityTypes.PRIVATE:\n abort(404)\n\n return _check_registration_visibility\n", "path": "CTFd/utils/decorators/visibility.py"}, {"content": "from flask import Blueprint, render_template\n\nfrom CTFd.cache import cache, make_cache_key\nfrom CTFd.utils import config\nfrom CTFd.utils.decorators.visibility import check_score_visibility\nfrom CTFd.utils.helpers import get_infos\nfrom CTFd.utils.scores import get_standings\n\nscoreboard = Blueprint(\"scoreboard\", __name__)\n\n\[email protected](\"/scoreboard\")\n@check_score_visibility\[email protected](timeout=60, key_prefix=make_cache_key)\ndef listing():\n infos = get_infos()\n\n if config.is_scoreboard_frozen():\n infos.append(\"Scoreboard has been frozen\")\n\n standings = get_standings()\n return render_template(\"scoreboard.html\", standings=standings, infos=infos)\n", "path": "CTFd/scoreboard.py"}]}
1,731
450
gh_patches_debug_28399
rasdani/github-patches
git_diff
mirumee__ariadne-1162
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Python 3.12 has deprecation warnings Ariadne produces few warnings when ran on Py3.12. Most of those are out of our hands as they are coming from dependencies, but there are some we should fix: ``` /ariadne/ariadne/contrib/tracing/apollotracing.py:36: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC). self.start_date = datetime.utcnow() /ariadne/ariadne/contrib/tracing/apollotracing.py:96: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC). "end": datetime.utcnow(), ``` </issue> <code> [start of ariadne/contrib/tracing/apollotracing.py] 1 from datetime import datetime 2 from inspect import iscoroutinefunction 3 from typing import Any, List, Optional, cast 4 5 from graphql import GraphQLResolveInfo 6 from graphql.pyutils import is_awaitable 7 8 from ...types import ContextValue, Extension, Resolver 9 from .utils import format_path, should_trace 10 11 try: 12 from time import perf_counter_ns 13 except ImportError: 14 # Py 3.6 fallback 15 from time import perf_counter 16 17 NS_IN_SECOND = 1000000000 18 19 def perf_counter_ns() -> int: 20 return int(perf_counter() * NS_IN_SECOND) 21 22 23 TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" 24 25 26 class ApolloTracingExtension(Extension): 27 def __init__(self, trace_default_resolver: bool = False) -> None: 28 self.trace_default_resolver = trace_default_resolver 29 self.start_date: Optional[datetime] = None 30 self.start_timestamp: int = 0 31 self.resolvers: List[dict] = [] 32 33 self._totals = None 34 35 def request_started(self, context: ContextValue): 36 self.start_date = datetime.utcnow() 37 self.start_timestamp = perf_counter_ns() 38 39 def resolve(self, next_: Resolver, obj: Any, info: GraphQLResolveInfo, **kwargs): 40 if not should_trace(info, self.trace_default_resolver): 41 return next_(obj, info, **kwargs) 42 43 if iscoroutinefunction(next_): 44 return self.resolve_async(next_, obj, info, **kwargs) 45 46 return self.resolve_sync(next_, obj, info, **kwargs) 47 48 async def resolve_async( 49 self, next_: Resolver, obj: Any, info: GraphQLResolveInfo, **kwargs 50 ): 51 start_timestamp = perf_counter_ns() 52 record = { 53 "path": format_path(info.path), 54 "parentType": str(info.parent_type), 55 "fieldName": info.field_name, 56 "returnType": str(info.return_type), 57 "startOffset": start_timestamp - cast(int, self.start_timestamp), 58 } 59 self.resolvers.append(record) 60 try: 61 result = next_(obj, info, **kwargs) 62 if is_awaitable(result): 63 result = await result 64 return result 65 finally: 66 end_timestamp = perf_counter_ns() 67 record["duration"] = end_timestamp - start_timestamp 68 69 def resolve_sync( 70 self, next_: Resolver, obj: Any, info: GraphQLResolveInfo, **kwargs 71 ): 72 start_timestamp = perf_counter_ns() 73 record = { 74 "path": format_path(info.path), 75 "parentType": str(info.parent_type), 76 "fieldName": info.field_name, 77 "returnType": str(info.return_type), 78 "startOffset": start_timestamp - cast(int, self.start_timestamp), 79 } 80 self.resolvers.append(record) 81 try: 82 result = next_(obj, info, **kwargs) 83 return result 84 finally: 85 end_timestamp = perf_counter_ns() 86 record["duration"] = end_timestamp - start_timestamp 87 88 def get_totals(self): 89 if self._totals is None: 90 self._totals = self._get_totals() 91 return self._totals 92 93 def _get_totals(self): 94 return { 95 "start": self.start_date, 96 "end": datetime.utcnow(), 97 "duration": perf_counter_ns() - self.start_timestamp, 98 "resolvers": self.resolvers, 99 } 100 101 def format(self, context: ContextValue): 102 totals = self.get_totals() 103 104 return { 105 "tracing": { 106 "version": 1, 107 "startTime": totals["start"].strftime(TIMESTAMP_FORMAT), 108 "endTime": totals["end"].strftime(TIMESTAMP_FORMAT), 109 "duration": totals["duration"], 110 "execution": {"resolvers": totals["resolvers"]}, 111 } 112 } 113 [end of ariadne/contrib/tracing/apollotracing.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ariadne/contrib/tracing/apollotracing.py b/ariadne/contrib/tracing/apollotracing.py --- a/ariadne/contrib/tracing/apollotracing.py +++ b/ariadne/contrib/tracing/apollotracing.py @@ -1,4 +1,3 @@ -from datetime import datetime from inspect import iscoroutinefunction from typing import Any, List, Optional, cast @@ -20,6 +19,19 @@ return int(perf_counter() * NS_IN_SECOND) +try: + from datetime import UTC, datetime # type: ignore[attr-defined] + + def utc_now(): + return datetime.now(UTC) + +except ImportError: + from datetime import datetime + + def utc_now(): + return datetime.utcnow() + + TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" @@ -33,7 +45,7 @@ self._totals = None def request_started(self, context: ContextValue): - self.start_date = datetime.utcnow() + self.start_date = utc_now() self.start_timestamp = perf_counter_ns() def resolve(self, next_: Resolver, obj: Any, info: GraphQLResolveInfo, **kwargs): @@ -93,7 +105,7 @@ def _get_totals(self): return { "start": self.start_date, - "end": datetime.utcnow(), + "end": utc_now(), "duration": perf_counter_ns() - self.start_timestamp, "resolvers": self.resolvers, }
{"golden_diff": "diff --git a/ariadne/contrib/tracing/apollotracing.py b/ariadne/contrib/tracing/apollotracing.py\n--- a/ariadne/contrib/tracing/apollotracing.py\n+++ b/ariadne/contrib/tracing/apollotracing.py\n@@ -1,4 +1,3 @@\n-from datetime import datetime\n from inspect import iscoroutinefunction\n from typing import Any, List, Optional, cast\n \n@@ -20,6 +19,19 @@\n return int(perf_counter() * NS_IN_SECOND)\n \n \n+try:\n+ from datetime import UTC, datetime # type: ignore[attr-defined]\n+\n+ def utc_now():\n+ return datetime.now(UTC)\n+\n+except ImportError:\n+ from datetime import datetime\n+\n+ def utc_now():\n+ return datetime.utcnow()\n+\n+\n TIMESTAMP_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n \n \n@@ -33,7 +45,7 @@\n self._totals = None\n \n def request_started(self, context: ContextValue):\n- self.start_date = datetime.utcnow()\n+ self.start_date = utc_now()\n self.start_timestamp = perf_counter_ns()\n \n def resolve(self, next_: Resolver, obj: Any, info: GraphQLResolveInfo, **kwargs):\n@@ -93,7 +105,7 @@\n def _get_totals(self):\n return {\n \"start\": self.start_date,\n- \"end\": datetime.utcnow(),\n+ \"end\": utc_now(),\n \"duration\": perf_counter_ns() - self.start_timestamp,\n \"resolvers\": self.resolvers,\n }\n", "issue": "Python 3.12 has deprecation warnings\nAriadne produces few warnings when ran on Py3.12. Most of those are out of our hands as they are coming from dependencies, but there are some we should fix:\r\n\r\n```\r\n\r\n /ariadne/ariadne/contrib/tracing/apollotracing.py:36: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC).\r\n self.start_date = datetime.utcnow()\r\n\r\n /ariadne/ariadne/contrib/tracing/apollotracing.py:96: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC).\r\n \"end\": datetime.utcnow(),\r\n```\n", "before_files": [{"content": "from datetime import datetime\nfrom inspect import iscoroutinefunction\nfrom typing import Any, List, Optional, cast\n\nfrom graphql import GraphQLResolveInfo\nfrom graphql.pyutils import is_awaitable\n\nfrom ...types import ContextValue, Extension, Resolver\nfrom .utils import format_path, should_trace\n\ntry:\n from time import perf_counter_ns\nexcept ImportError:\n # Py 3.6 fallback\n from time import perf_counter\n\n NS_IN_SECOND = 1000000000\n\n def perf_counter_ns() -> int:\n return int(perf_counter() * NS_IN_SECOND)\n\n\nTIMESTAMP_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\n\nclass ApolloTracingExtension(Extension):\n def __init__(self, trace_default_resolver: bool = False) -> None:\n self.trace_default_resolver = trace_default_resolver\n self.start_date: Optional[datetime] = None\n self.start_timestamp: int = 0\n self.resolvers: List[dict] = []\n\n self._totals = None\n\n def request_started(self, context: ContextValue):\n self.start_date = datetime.utcnow()\n self.start_timestamp = perf_counter_ns()\n\n def resolve(self, next_: Resolver, obj: Any, info: GraphQLResolveInfo, **kwargs):\n if not should_trace(info, self.trace_default_resolver):\n return next_(obj, info, **kwargs)\n\n if iscoroutinefunction(next_):\n return self.resolve_async(next_, obj, info, **kwargs)\n\n return self.resolve_sync(next_, obj, info, **kwargs)\n\n async def resolve_async(\n self, next_: Resolver, obj: Any, info: GraphQLResolveInfo, **kwargs\n ):\n start_timestamp = perf_counter_ns()\n record = {\n \"path\": format_path(info.path),\n \"parentType\": str(info.parent_type),\n \"fieldName\": info.field_name,\n \"returnType\": str(info.return_type),\n \"startOffset\": start_timestamp - cast(int, self.start_timestamp),\n }\n self.resolvers.append(record)\n try:\n result = next_(obj, info, **kwargs)\n if is_awaitable(result):\n result = await result\n return result\n finally:\n end_timestamp = perf_counter_ns()\n record[\"duration\"] = end_timestamp - start_timestamp\n\n def resolve_sync(\n self, next_: Resolver, obj: Any, info: GraphQLResolveInfo, **kwargs\n ):\n start_timestamp = perf_counter_ns()\n record = {\n \"path\": format_path(info.path),\n \"parentType\": str(info.parent_type),\n \"fieldName\": info.field_name,\n \"returnType\": str(info.return_type),\n \"startOffset\": start_timestamp - cast(int, self.start_timestamp),\n }\n self.resolvers.append(record)\n try:\n result = next_(obj, info, **kwargs)\n return result\n finally:\n end_timestamp = perf_counter_ns()\n record[\"duration\"] = end_timestamp - start_timestamp\n\n def get_totals(self):\n if self._totals is None:\n self._totals = self._get_totals()\n return self._totals\n\n def _get_totals(self):\n return {\n \"start\": self.start_date,\n \"end\": datetime.utcnow(),\n \"duration\": perf_counter_ns() - self.start_timestamp,\n \"resolvers\": self.resolvers,\n }\n\n def format(self, context: ContextValue):\n totals = self.get_totals()\n\n return {\n \"tracing\": {\n \"version\": 1,\n \"startTime\": totals[\"start\"].strftime(TIMESTAMP_FORMAT),\n \"endTime\": totals[\"end\"].strftime(TIMESTAMP_FORMAT),\n \"duration\": totals[\"duration\"],\n \"execution\": {\"resolvers\": totals[\"resolvers\"]},\n }\n }\n", "path": "ariadne/contrib/tracing/apollotracing.py"}]}
1,783
356
gh_patches_debug_14538
rasdani/github-patches
git_diff
airctic__icevision-189
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> helper function for COCO parser ## 🚀 Feature **Is your feature request related to a problem? Please describe.** As described in #79, it's currently very verbose to create a COCO parser, the steps should be ideally be reduced to a single function call. **Dream API** ```python parser = datasets.coco.parser( annotations_file="path_to_annotations.json", img_dir="path_to_images_dir", ) ``` </issue> <code> [start of mantisshrimp/datasets/coco/parsers.py] 1 __all__ = ["COCOImageInfoParser", "COCOAnnotationParser"] 2 3 from mantisshrimp.imports import * 4 from mantisshrimp.core import * 5 from mantisshrimp.parsers.defaults import * 6 from mantisshrimp.parsers.mixins import * 7 8 9 class COCOImageInfoParser(DefaultImageInfoParser): 10 def __init__(self, infos, img_dir): 11 super().__init__() 12 self.infos = infos 13 self.img_dir = img_dir 14 15 def __iter__(self): 16 yield from self.infos 17 18 def __len__(self): 19 return len(self.infos) 20 21 def imageid(self, o) -> int: 22 return o["id"] 23 24 def filepath(self, o) -> Union[str, Path]: 25 return self.img_dir / o["file_name"] 26 27 def height(self, o) -> int: 28 return o["height"] 29 30 def width(self, o) -> int: 31 return o["width"] 32 33 34 class COCOAnnotationParser(MaskRCNNParser, AreasParserMixin, IsCrowdsParserMixin): 35 def __init__(self, annotations: list): 36 self.annotations = annotations 37 38 def __iter__(self): 39 yield from self.annotations 40 41 def __len__(self): 42 return len(self.annotations) 43 44 def imageid(self, o) -> int: 45 return o["image_id"] 46 47 def labels(self, o) -> List[int]: 48 return [o["category_id"]] 49 50 def bboxes(self, o) -> List[BBox]: 51 return [BBox.from_xywh(*o["bbox"])] 52 53 def areas(self, o) -> List[float]: 54 return [o["area"]] 55 56 def masks(self, o) -> List[MaskArray]: 57 seg = o["segmentation"] 58 if o["iscrowd"]: 59 return [RLE.from_coco(seg["counts"])] 60 else: 61 return [Polygon(seg)] 62 63 def iscrowds(self, o) -> List[bool]: 64 return [o["iscrowd"]] 65 [end of mantisshrimp/datasets/coco/parsers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mantisshrimp/datasets/coco/parsers.py b/mantisshrimp/datasets/coco/parsers.py --- a/mantisshrimp/datasets/coco/parsers.py +++ b/mantisshrimp/datasets/coco/parsers.py @@ -1,9 +1,23 @@ -__all__ = ["COCOImageInfoParser", "COCOAnnotationParser"] +__all__ = ["parser", "COCOImageInfoParser", "COCOAnnotationParser"] from mantisshrimp.imports import * from mantisshrimp.core import * -from mantisshrimp.parsers.defaults import * -from mantisshrimp.parsers.mixins import * +from mantisshrimp.parsers import * + + +def parser( + annotations_file: Union[str, Path], img_dir: Union[str, Path] +) -> ParserInterface: + annotations_dict = json.loads(Path(annotations_file).read()) + + image_info_parser = COCOImageInfoParser( + infos=annotations_dict["images"], img_dir=img_dir + ) + annotations_parser = COCOAnnotationParser( + annotations=annotations_dict["annotations"] + ) + + return CombinedParser(image_info_parser, annotations_parser) class COCOImageInfoParser(DefaultImageInfoParser):
{"golden_diff": "diff --git a/mantisshrimp/datasets/coco/parsers.py b/mantisshrimp/datasets/coco/parsers.py\n--- a/mantisshrimp/datasets/coco/parsers.py\n+++ b/mantisshrimp/datasets/coco/parsers.py\n@@ -1,9 +1,23 @@\n-__all__ = [\"COCOImageInfoParser\", \"COCOAnnotationParser\"]\n+__all__ = [\"parser\", \"COCOImageInfoParser\", \"COCOAnnotationParser\"]\n \n from mantisshrimp.imports import *\n from mantisshrimp.core import *\n-from mantisshrimp.parsers.defaults import *\n-from mantisshrimp.parsers.mixins import *\n+from mantisshrimp.parsers import *\n+\n+\n+def parser(\n+ annotations_file: Union[str, Path], img_dir: Union[str, Path]\n+) -> ParserInterface:\n+ annotations_dict = json.loads(Path(annotations_file).read())\n+\n+ image_info_parser = COCOImageInfoParser(\n+ infos=annotations_dict[\"images\"], img_dir=img_dir\n+ )\n+ annotations_parser = COCOAnnotationParser(\n+ annotations=annotations_dict[\"annotations\"]\n+ )\n+\n+ return CombinedParser(image_info_parser, annotations_parser)\n \n \n class COCOImageInfoParser(DefaultImageInfoParser):\n", "issue": "helper function for COCO parser\n## \ud83d\ude80 Feature\r\n**Is your feature request related to a problem? Please describe.**\r\nAs described in #79, it's currently very verbose to create a COCO parser, the steps should be ideally be reduced to a single function call.\r\n\r\n**Dream API**\r\n```python\r\n parser = datasets.coco.parser(\r\n annotations_file=\"path_to_annotations.json\",\r\n img_dir=\"path_to_images_dir\",\r\n )\r\n```\r\n\n", "before_files": [{"content": "__all__ = [\"COCOImageInfoParser\", \"COCOAnnotationParser\"]\n\nfrom mantisshrimp.imports import *\nfrom mantisshrimp.core import *\nfrom mantisshrimp.parsers.defaults import *\nfrom mantisshrimp.parsers.mixins import *\n\n\nclass COCOImageInfoParser(DefaultImageInfoParser):\n def __init__(self, infos, img_dir):\n super().__init__()\n self.infos = infos\n self.img_dir = img_dir\n\n def __iter__(self):\n yield from self.infos\n\n def __len__(self):\n return len(self.infos)\n\n def imageid(self, o) -> int:\n return o[\"id\"]\n\n def filepath(self, o) -> Union[str, Path]:\n return self.img_dir / o[\"file_name\"]\n\n def height(self, o) -> int:\n return o[\"height\"]\n\n def width(self, o) -> int:\n return o[\"width\"]\n\n\nclass COCOAnnotationParser(MaskRCNNParser, AreasParserMixin, IsCrowdsParserMixin):\n def __init__(self, annotations: list):\n self.annotations = annotations\n\n def __iter__(self):\n yield from self.annotations\n\n def __len__(self):\n return len(self.annotations)\n\n def imageid(self, o) -> int:\n return o[\"image_id\"]\n\n def labels(self, o) -> List[int]:\n return [o[\"category_id\"]]\n\n def bboxes(self, o) -> List[BBox]:\n return [BBox.from_xywh(*o[\"bbox\"])]\n\n def areas(self, o) -> List[float]:\n return [o[\"area\"]]\n\n def masks(self, o) -> List[MaskArray]:\n seg = o[\"segmentation\"]\n if o[\"iscrowd\"]:\n return [RLE.from_coco(seg[\"counts\"])]\n else:\n return [Polygon(seg)]\n\n def iscrowds(self, o) -> List[bool]:\n return [o[\"iscrowd\"]]\n", "path": "mantisshrimp/datasets/coco/parsers.py"}]}
1,204
277
gh_patches_debug_17904
rasdani/github-patches
git_diff
sopel-irc__sopel-983
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [currency] Currency codes are needlessly case-sensitive There's no reason that the currency codes should have to be capitalized by the user, is there? ``` [06:03:23] <dgw> ;cur 1 USD in CAD [06:03:24] <Sopel> 1.0 USD (U.S. dollar) = 1.386001386 CAD (Canadian Dollar) [06:03:28] <dgw> ;cur 1 usd in CAD [06:03:29] <Sopel> 1.0 usd (U.S. dollar) = 1.386001386 CAD (Canadian Dollar) [06:03:33] <dgw> ;cur 1 USD in cad [06:03:34] <Sopel> dgw: Unknown currency: cad ``` If no reason not to do so arises, I'll PR this one myself in a couple days. </issue> <code> [start of sopel/modules/currency.py] 1 # coding=utf-8 2 # Copyright 2013 Elsie Powell, embolalia.com 3 # Licensed under the Eiffel Forum License 2 4 from __future__ import unicode_literals, absolute_import, print_function, division 5 6 import json 7 import xmltodict 8 import re 9 10 from sopel import web 11 from sopel.module import commands, example, NOLIMIT 12 13 # The Canadian central bank has better exchange rate data than the Fed, the 14 # Bank of England, or the European Central Bank. Who knew? 15 base_url = 'http://www.bankofcanada.ca/stats/assets/rates_rss/noon/en_{}.xml' 16 regex = re.compile(r''' 17 (\d+(?:\.\d+)?) # Decimal number 18 \s*([a-zA-Z]{3}) # 3-letter currency code 19 \s+(?:in|as|of|to)\s+ # preposition 20 ([a-zA-Z]{3}) # 3-letter currency code 21 ''', re.VERBOSE) 22 23 24 def get_rate(code): 25 if code.upper() == 'CAD': 26 return 1, 'Canadian Dollar' 27 elif code.upper() == 'BTC': 28 rates = json.loads(web.get('https://api.bitcoinaverage.com/ticker/all')) 29 return 1 / rates['CAD']['24h_avg'], 'Bitcoin—24hr average' 30 31 data, headers = web.get(base_url.format(code), dont_decode=True, return_headers=True) 32 if headers['_http_status'] == 404: 33 return False, False 34 namespaces = { 35 'http://www.cbwiki.net/wiki/index.php/Specification_1.1': 'cb', 36 'http://purl.org/rss/1.0/': None, 37 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf' } 38 xml = xmltodict.parse(data, process_namespaces=True, namespaces=namespaces).get('rdf:RDF') 39 namestring = xml.get('channel').get('title').get('#text') 40 name = namestring[len('Bank of Canada noon rate: '):] 41 name = re.sub(r'\s*\(noon\)\s*', '', name) 42 rate = xml.get('item').get('cb:statistics').get('cb:exchangeRate').get('cb:value').get('#text') 43 return float(rate), name 44 45 46 @commands('cur', 'currency', 'exchange') 47 @example('.cur 20 EUR in USD') 48 def exchange(bot, trigger): 49 """Show the exchange rate between two currencies""" 50 if not trigger.group(2): 51 return bot.reply("No search term. An example: .cur 20 EUR in USD") 52 match = regex.match(trigger.group(2)) 53 if not match: 54 # It's apologetic, because it's using Canadian data. 55 bot.reply("Sorry, I didn't understand the input.") 56 return NOLIMIT 57 58 amount, of, to = match.groups() 59 try: 60 amount = float(amount) 61 except: 62 bot.reply("Sorry, I didn't understand the input.") 63 display(bot, amount, of, to) 64 65 66 def display(bot, amount, of, to): 67 if not amount: 68 bot.reply("Zero is zero, no matter what country you're in.") 69 try: 70 of_rate, of_name = get_rate(of) 71 if not of_name: 72 bot.reply("Unknown currency: %s" % of) 73 return 74 to_rate, to_name = get_rate(to) 75 if not to_name: 76 bot.reply("Unknown currency: %s" % to) 77 return 78 except Exception as e: 79 raise 80 bot.reply("Something went wrong while I was getting the exchange rate.") 81 return NOLIMIT 82 83 result = amount / of_rate * to_rate 84 bot.say("{} {} ({}) = {} {} ({})".format(amount, of, of_name, 85 result, to, to_name)) 86 87 88 @commands('btc', 'bitcoin') 89 @example('.btc 20 EUR') 90 def bitcoin(bot, trigger): 91 #if 2 args, 1st is number and 2nd is currency. If 1 arg, it's either the number or the currency. 92 to = trigger.group(4) 93 amount = trigger.group(3) 94 if not to: 95 to = trigger.group(3) or 'USD' 96 amount = 1 97 98 try: 99 amount = float(amount) 100 except: 101 bot.reply("Sorry, I didn't understand the input.") 102 return NOLIMIT 103 104 display(bot, amount, 'BTC', to) 105 [end of sopel/modules/currency.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sopel/modules/currency.py b/sopel/modules/currency.py --- a/sopel/modules/currency.py +++ b/sopel/modules/currency.py @@ -26,9 +26,10 @@ def get_rate(code): - if code.upper() == 'CAD': + code = code.upper() + if code == 'CAD': return 1, 'Canadian Dollar' - elif code.upper() == 'BTC': + elif code == 'BTC': rates = json.loads(web.get('https://api.bitcoinaverage.com/ticker/all')) return 1 / rates['CAD']['24h_avg'], 'Bitcoin—24hr average' @@ -85,8 +86,8 @@ return NOLIMIT result = amount / of_rate * to_rate - bot.say("{} {} ({}) = {} {} ({})".format(amount, of, of_name, - result, to, to_name)) + bot.say("{} {} ({}) = {} {} ({})".format(amount, of.upper(), of_name, + result, to.upper(), to_name)) @commands('btc', 'bitcoin')
{"golden_diff": "diff --git a/sopel/modules/currency.py b/sopel/modules/currency.py\n--- a/sopel/modules/currency.py\n+++ b/sopel/modules/currency.py\n@@ -26,9 +26,10 @@\n \n \n def get_rate(code):\n- if code.upper() == 'CAD':\n+ code = code.upper()\n+ if code == 'CAD':\n return 1, 'Canadian Dollar'\n- elif code.upper() == 'BTC':\n+ elif code == 'BTC':\n rates = json.loads(web.get('https://api.bitcoinaverage.com/ticker/all'))\n return 1 / rates['CAD']['24h_avg'], 'Bitcoin\u201424hr average'\n \n@@ -85,8 +86,8 @@\n return NOLIMIT\n \n result = amount / of_rate * to_rate\n- bot.say(\"{} {} ({}) = {} {} ({})\".format(amount, of, of_name,\n- result, to, to_name))\n+ bot.say(\"{} {} ({}) = {} {} ({})\".format(amount, of.upper(), of_name,\n+ result, to.upper(), to_name))\n \n \n @commands('btc', 'bitcoin')\n", "issue": "[currency] Currency codes are needlessly case-sensitive\nThere's no reason that the currency codes should have to be capitalized by the user, is there?\n\n```\n[06:03:23] <dgw> ;cur 1 USD in CAD\n[06:03:24] <Sopel> 1.0 USD (U.S. dollar) = 1.386001386 CAD (Canadian Dollar)\n[06:03:28] <dgw> ;cur 1 usd in CAD\n[06:03:29] <Sopel> 1.0 usd (U.S. dollar) = 1.386001386 CAD (Canadian Dollar)\n[06:03:33] <dgw> ;cur 1 USD in cad\n[06:03:34] <Sopel> dgw: Unknown currency: cad\n```\n\nIf no reason not to do so arises, I'll PR this one myself in a couple days.\n\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2013 Elsie Powell, embolalia.com\n# Licensed under the Eiffel Forum License 2\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport json\nimport xmltodict\nimport re\n\nfrom sopel import web\nfrom sopel.module import commands, example, NOLIMIT\n\n# The Canadian central bank has better exchange rate data than the Fed, the\n# Bank of England, or the European Central Bank. Who knew?\nbase_url = 'http://www.bankofcanada.ca/stats/assets/rates_rss/noon/en_{}.xml'\nregex = re.compile(r'''\n (\\d+(?:\\.\\d+)?) # Decimal number\n \\s*([a-zA-Z]{3}) # 3-letter currency code\n \\s+(?:in|as|of|to)\\s+ # preposition\n ([a-zA-Z]{3}) # 3-letter currency code\n ''', re.VERBOSE)\n\n\ndef get_rate(code):\n if code.upper() == 'CAD':\n return 1, 'Canadian Dollar'\n elif code.upper() == 'BTC':\n rates = json.loads(web.get('https://api.bitcoinaverage.com/ticker/all'))\n return 1 / rates['CAD']['24h_avg'], 'Bitcoin\u201424hr average'\n\n data, headers = web.get(base_url.format(code), dont_decode=True, return_headers=True)\n if headers['_http_status'] == 404:\n return False, False\n namespaces = {\n 'http://www.cbwiki.net/wiki/index.php/Specification_1.1': 'cb',\n 'http://purl.org/rss/1.0/': None,\n 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf' }\n xml = xmltodict.parse(data, process_namespaces=True, namespaces=namespaces).get('rdf:RDF')\n namestring = xml.get('channel').get('title').get('#text')\n name = namestring[len('Bank of Canada noon rate: '):]\n name = re.sub(r'\\s*\\(noon\\)\\s*', '', name)\n rate = xml.get('item').get('cb:statistics').get('cb:exchangeRate').get('cb:value').get('#text')\n return float(rate), name\n\n\n@commands('cur', 'currency', 'exchange')\n@example('.cur 20 EUR in USD')\ndef exchange(bot, trigger):\n \"\"\"Show the exchange rate between two currencies\"\"\"\n if not trigger.group(2):\n return bot.reply(\"No search term. An example: .cur 20 EUR in USD\")\n match = regex.match(trigger.group(2))\n if not match:\n # It's apologetic, because it's using Canadian data.\n bot.reply(\"Sorry, I didn't understand the input.\")\n return NOLIMIT\n\n amount, of, to = match.groups()\n try:\n amount = float(amount)\n except:\n bot.reply(\"Sorry, I didn't understand the input.\")\n display(bot, amount, of, to)\n\n\ndef display(bot, amount, of, to):\n if not amount:\n bot.reply(\"Zero is zero, no matter what country you're in.\")\n try:\n of_rate, of_name = get_rate(of)\n if not of_name:\n bot.reply(\"Unknown currency: %s\" % of)\n return\n to_rate, to_name = get_rate(to)\n if not to_name:\n bot.reply(\"Unknown currency: %s\" % to)\n return\n except Exception as e:\n raise\n bot.reply(\"Something went wrong while I was getting the exchange rate.\")\n return NOLIMIT\n\n result = amount / of_rate * to_rate\n bot.say(\"{} {} ({}) = {} {} ({})\".format(amount, of, of_name,\n result, to, to_name))\n\n\n@commands('btc', 'bitcoin')\n@example('.btc 20 EUR')\ndef bitcoin(bot, trigger):\n #if 2 args, 1st is number and 2nd is currency. If 1 arg, it's either the number or the currency.\n to = trigger.group(4)\n amount = trigger.group(3)\n if not to:\n to = trigger.group(3) or 'USD'\n amount = 1\n\n try:\n amount = float(amount)\n except:\n bot.reply(\"Sorry, I didn't understand the input.\")\n return NOLIMIT\n\n display(bot, amount, 'BTC', to)\n", "path": "sopel/modules/currency.py"}]}
1,978
256
gh_patches_debug_9215
rasdani/github-patches
git_diff
meltano__meltano-6321
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `meltano --help` fails on Windows in some environments Running any Meltano command in my fresh Windows VM results in the following error: ![image](https://user-images.githubusercontent.com/11428666/176504379-a9489721-09b5-4a2d-ad83-c2f5a37adfb0.png) Sorry for the image of text - I forgot to enable the shared clipboard for the VM. The failure is happening here: ```python freedesktop_data = ( platform.freedesktop_os_release() if safe_hasattr(platform, "freedesktop_os_release") else defaultdict(type(None)) ) ``` This might be because I'm running under git bash instead of Powershell. We had assumed that if `freedesktop_os_release` is present, it will work. This is apparently only sometimes true. To prevent problems for Windows users we should put the call to `platform.freedesktop_os_release` in a try-except block. Doing so will also mean we no longer need to call `hasattr`. </issue> <code> [start of src/meltano/core/tracking/contexts/environment.py] 1 """Environment context for the Snowplow tracker.""" 2 3 from __future__ import annotations 4 5 import os 6 import platform 7 import uuid 8 from collections import defaultdict 9 from datetime import datetime 10 from pathlib import Path 11 from typing import Any 12 13 import psutil 14 from cached_property import cached_property 15 from snowplow_tracker import SelfDescribingJson 16 from structlog.stdlib import get_logger 17 18 import meltano 19 from meltano.core.tracking.schemas import EnvironmentContextSchema 20 from meltano.core.utils import hash_sha256, safe_hasattr 21 22 logger = get_logger(__name__) 23 24 # This file is only ever created in CI when building a release 25 release_marker_path = Path(__file__).parent / ".release_marker" 26 27 28 class EnvironmentContext(SelfDescribingJson): 29 """Environment context for the Snowplow tracker.""" 30 31 def __init__(self): 32 """Initialize the environment context.""" 33 ci_markers = ("GITHUB_ACTIONS", "CI") 34 super().__init__( 35 EnvironmentContextSchema.url, 36 { 37 "context_uuid": str(uuid.uuid4()), 38 "meltano_version": meltano.__version__, 39 "is_dev_build": not release_marker_path.exists(), 40 "is_ci_environment": any( 41 # True if 'true', 'TRUE', 'True', or '1' 42 os.environ.get(marker, "").lower()[:1] in {"1", "t"} 43 for marker in ci_markers 44 ), 45 "python_version": platform.python_version(), 46 "python_implementation": platform.python_implementation(), 47 **self.system_info, 48 **self.process_info, 49 }, 50 ) 51 52 @cached_property 53 def system_info(self) -> dict[str, Any]: 54 """Get system information. 55 56 Returns: 57 A dictionary containing system information. 58 """ 59 freedesktop_data = ( 60 platform.freedesktop_os_release() 61 if safe_hasattr(platform, "freedesktop_os_release") 62 else defaultdict(type(None)) 63 ) 64 65 return { 66 "system_name": platform.system() or None, 67 "system_release": platform.release() or None, 68 "system_version": platform.version() or None, 69 "machine": platform.machine() or None, 70 "windows_edition": platform.win32_edition() 71 if safe_hasattr(platform, "win32_edition") 72 else None, 73 "freedesktop_id": freedesktop_data["ID"], 74 "freedesktop_id_like": freedesktop_data.get("ID_LIKE", None), 75 "freedesktop_version_id": freedesktop_data.get("VERSION_ID", None), 76 } 77 78 @staticmethod 79 def get_process_timestamp(process: psutil.Process) -> str: 80 """Obtain the creation time of a process as a ISO 8601 timestamp. 81 82 Args: 83 process: The process to obtain the creation time from. 84 85 Returns: 86 A ISO 8601 timestamp formatted string. 87 """ 88 return f"{datetime.utcfromtimestamp(process.create_time()).isoformat()}Z" 89 90 @cached_property 91 def process_info(self) -> dict[str, Any]: 92 """Obtain the process information for the current process. 93 94 Returns: 95 A dictionary containing the process information. Such as the hashed process name, pid, core counts, etc 96 """ 97 process = psutil.Process() 98 with process.oneshot(): 99 return { 100 "num_cpu_cores": psutil.cpu_count(), 101 "num_cpu_cores_available": self.num_available_cores, 102 "process_hierarchy": [ 103 { 104 "process_name_hash": hash_sha256(proc.name()), 105 "process_creation_timestamp": self.get_process_timestamp(proc), 106 } 107 for proc in (process, *process.parents()) 108 ], 109 } 110 111 @cached_property 112 def num_available_cores(self) -> int: 113 """Obtain the number of available CPU cores. 114 115 Uses sched_getaffinity where available, otherwise falls back to cpu_count(). 116 117 Returns: 118 int: The number of available CPU cores. 119 """ 120 if safe_hasattr(os, "sched_getaffinity"): 121 return len(os.sched_getaffinity(0)) 122 return os.cpu_count() 123 124 125 environment_context = EnvironmentContext() 126 [end of src/meltano/core/tracking/contexts/environment.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/meltano/core/tracking/contexts/environment.py b/src/meltano/core/tracking/contexts/environment.py --- a/src/meltano/core/tracking/contexts/environment.py +++ b/src/meltano/core/tracking/contexts/environment.py @@ -56,11 +56,10 @@ Returns: A dictionary containing system information. """ - freedesktop_data = ( - platform.freedesktop_os_release() - if safe_hasattr(platform, "freedesktop_os_release") - else defaultdict(type(None)) - ) + try: + freedesktop_data = platform.freedesktop_os_release() + except Exception: + freedesktop_data = defaultdict(type(None)) return { "system_name": platform.system() or None,
{"golden_diff": "diff --git a/src/meltano/core/tracking/contexts/environment.py b/src/meltano/core/tracking/contexts/environment.py\n--- a/src/meltano/core/tracking/contexts/environment.py\n+++ b/src/meltano/core/tracking/contexts/environment.py\n@@ -56,11 +56,10 @@\n Returns:\n A dictionary containing system information.\n \"\"\"\n- freedesktop_data = (\n- platform.freedesktop_os_release()\n- if safe_hasattr(platform, \"freedesktop_os_release\")\n- else defaultdict(type(None))\n- )\n+ try:\n+ freedesktop_data = platform.freedesktop_os_release()\n+ except Exception:\n+ freedesktop_data = defaultdict(type(None))\n \n return {\n \"system_name\": platform.system() or None,\n", "issue": "`meltano --help` fails on Windows in some environments\nRunning any Meltano command in my fresh Windows VM results in the following error:\r\n\r\n![image](https://user-images.githubusercontent.com/11428666/176504379-a9489721-09b5-4a2d-ad83-c2f5a37adfb0.png)\r\n\r\nSorry for the image of text - I forgot to enable the shared clipboard for the VM.\r\n\r\nThe failure is happening here:\r\n\r\n```python\r\n freedesktop_data = (\r\n platform.freedesktop_os_release()\r\n if safe_hasattr(platform, \"freedesktop_os_release\")\r\n else defaultdict(type(None))\r\n )\r\n```\r\n\r\nThis might be because I'm running under git bash instead of Powershell.\r\n\r\nWe had assumed that if `freedesktop_os_release` is present, it will work. This is apparently only sometimes true. To prevent problems for Windows users we should put the call to `platform.freedesktop_os_release` in a try-except block. Doing so will also mean we no longer need to call `hasattr`.\n", "before_files": [{"content": "\"\"\"Environment context for the Snowplow tracker.\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport platform\nimport uuid\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import Any\n\nimport psutil\nfrom cached_property import cached_property\nfrom snowplow_tracker import SelfDescribingJson\nfrom structlog.stdlib import get_logger\n\nimport meltano\nfrom meltano.core.tracking.schemas import EnvironmentContextSchema\nfrom meltano.core.utils import hash_sha256, safe_hasattr\n\nlogger = get_logger(__name__)\n\n# This file is only ever created in CI when building a release\nrelease_marker_path = Path(__file__).parent / \".release_marker\"\n\n\nclass EnvironmentContext(SelfDescribingJson):\n \"\"\"Environment context for the Snowplow tracker.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the environment context.\"\"\"\n ci_markers = (\"GITHUB_ACTIONS\", \"CI\")\n super().__init__(\n EnvironmentContextSchema.url,\n {\n \"context_uuid\": str(uuid.uuid4()),\n \"meltano_version\": meltano.__version__,\n \"is_dev_build\": not release_marker_path.exists(),\n \"is_ci_environment\": any(\n # True if 'true', 'TRUE', 'True', or '1'\n os.environ.get(marker, \"\").lower()[:1] in {\"1\", \"t\"}\n for marker in ci_markers\n ),\n \"python_version\": platform.python_version(),\n \"python_implementation\": platform.python_implementation(),\n **self.system_info,\n **self.process_info,\n },\n )\n\n @cached_property\n def system_info(self) -> dict[str, Any]:\n \"\"\"Get system information.\n\n Returns:\n A dictionary containing system information.\n \"\"\"\n freedesktop_data = (\n platform.freedesktop_os_release()\n if safe_hasattr(platform, \"freedesktop_os_release\")\n else defaultdict(type(None))\n )\n\n return {\n \"system_name\": platform.system() or None,\n \"system_release\": platform.release() or None,\n \"system_version\": platform.version() or None,\n \"machine\": platform.machine() or None,\n \"windows_edition\": platform.win32_edition()\n if safe_hasattr(platform, \"win32_edition\")\n else None,\n \"freedesktop_id\": freedesktop_data[\"ID\"],\n \"freedesktop_id_like\": freedesktop_data.get(\"ID_LIKE\", None),\n \"freedesktop_version_id\": freedesktop_data.get(\"VERSION_ID\", None),\n }\n\n @staticmethod\n def get_process_timestamp(process: psutil.Process) -> str:\n \"\"\"Obtain the creation time of a process as a ISO 8601 timestamp.\n\n Args:\n process: The process to obtain the creation time from.\n\n Returns:\n A ISO 8601 timestamp formatted string.\n \"\"\"\n return f\"{datetime.utcfromtimestamp(process.create_time()).isoformat()}Z\"\n\n @cached_property\n def process_info(self) -> dict[str, Any]:\n \"\"\"Obtain the process information for the current process.\n\n Returns:\n A dictionary containing the process information. Such as the hashed process name, pid, core counts, etc\n \"\"\"\n process = psutil.Process()\n with process.oneshot():\n return {\n \"num_cpu_cores\": psutil.cpu_count(),\n \"num_cpu_cores_available\": self.num_available_cores,\n \"process_hierarchy\": [\n {\n \"process_name_hash\": hash_sha256(proc.name()),\n \"process_creation_timestamp\": self.get_process_timestamp(proc),\n }\n for proc in (process, *process.parents())\n ],\n }\n\n @cached_property\n def num_available_cores(self) -> int:\n \"\"\"Obtain the number of available CPU cores.\n\n Uses sched_getaffinity where available, otherwise falls back to cpu_count().\n\n Returns:\n int: The number of available CPU cores.\n \"\"\"\n if safe_hasattr(os, \"sched_getaffinity\"):\n return len(os.sched_getaffinity(0))\n return os.cpu_count()\n\n\nenvironment_context = EnvironmentContext()\n", "path": "src/meltano/core/tracking/contexts/environment.py"}]}
1,933
171
gh_patches_debug_23649
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-5403
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> enterprise car hire spider missing ~6k outlets In the latest run of the enterprise.py spider from 2023-05-15, there are around 6000 fewer results than in the previous run from 2023-04-15. I think the cause is that the search API that ATP uses is no longer returning results for some major countries. For example, the following still work: * https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/IT * https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/BE * https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/MX but the following do not: * https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/GB * https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/US * https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/FR I'm not sure why the API has stopped returning for certain countries, or if there's anything we can do about it. If we want to look at alternative means, then it's possible that the "Airport Locations" and "Neighbourhood Locations" lists at https://www.enterprise.co.uk/en/car-hire/locations/uk.html are complete for GB. (Together they have 427 locations, compared with the 425 I extracted for my Chain Reaction tool from a previous ATP dataset.) But then you'd need to parse each branch page, and that's a lot more HTTP fetches. I haven't looked what other localised sites do in terms of location lists. </issue> <code> [start of locations/spiders/enterprise.py] 1 import geonamescache 2 from scrapy import Spider 3 from scrapy.http import JsonRequest 4 5 from locations.dict_parser import DictParser 6 7 8 class EnterpriseSpider(Spider): 9 name = "enterprise" 10 item_attributes = {"brand": "Enterprise Rent-A-Car", "brand_wikidata": "Q17085454"} 11 allowed_domains = ["prd.location.enterprise.com"] 12 13 def start_requests(self): 14 gc = geonamescache.GeonamesCache() 15 countries = gc.get_countries() 16 for country_code in countries.keys(): 17 yield JsonRequest( 18 url=f"https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/{country_code}" 19 ) 20 21 def parse(self, response): 22 for location in response.json(): 23 if location["closed"] or not location["physicalLocation"]: 24 continue 25 item = DictParser.parse(location) 26 item["ref"] = location["stationId"] 27 item["name"] = location["locationNameTranslation"] 28 item["street_address"] = ", ".join(filter(None, location["addressLines"])) 29 item["phone"] = location["formattedPhone"] 30 yield item 31 [end of locations/spiders/enterprise.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/enterprise.py b/locations/spiders/enterprise.py --- a/locations/spiders/enterprise.py +++ b/locations/spiders/enterprise.py @@ -8,15 +8,20 @@ class EnterpriseSpider(Spider): name = "enterprise" item_attributes = {"brand": "Enterprise Rent-A-Car", "brand_wikidata": "Q17085454"} - allowed_domains = ["prd.location.enterprise.com"] + allowed_domains = ["prd.location.enterprise.com", "int1.location.enterprise.com"] def start_requests(self): gc = geonamescache.GeonamesCache() countries = gc.get_countries() for country_code in countries.keys(): - yield JsonRequest( - url=f"https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/{country_code}" - ) + # It appears that countries are sharded between two + # servers. Other servers are int2, xqa1, xqa2, xqa3 + # but search of these servers reveals no additional + # locations on top of just prd and int1. + for subdomain in ["prd", "int1"]: + yield JsonRequest( + url=f"https://{subdomain}.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/{country_code}" + ) def parse(self, response): for location in response.json():
{"golden_diff": "diff --git a/locations/spiders/enterprise.py b/locations/spiders/enterprise.py\n--- a/locations/spiders/enterprise.py\n+++ b/locations/spiders/enterprise.py\n@@ -8,15 +8,20 @@\n class EnterpriseSpider(Spider):\n name = \"enterprise\"\n item_attributes = {\"brand\": \"Enterprise Rent-A-Car\", \"brand_wikidata\": \"Q17085454\"}\n- allowed_domains = [\"prd.location.enterprise.com\"]\n+ allowed_domains = [\"prd.location.enterprise.com\", \"int1.location.enterprise.com\"]\n \n def start_requests(self):\n gc = geonamescache.GeonamesCache()\n countries = gc.get_countries()\n for country_code in countries.keys():\n- yield JsonRequest(\n- url=f\"https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/{country_code}\"\n- )\n+ # It appears that countries are sharded between two\n+ # servers. Other servers are int2, xqa1, xqa2, xqa3\n+ # but search of these servers reveals no additional\n+ # locations on top of just prd and int1.\n+ for subdomain in [\"prd\", \"int1\"]:\n+ yield JsonRequest(\n+ url=f\"https://{subdomain}.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/{country_code}\"\n+ )\n \n def parse(self, response):\n for location in response.json():\n", "issue": "enterprise car hire spider missing ~6k outlets\nIn the latest run of the enterprise.py spider from 2023-05-15, there are around 6000 fewer results than in the previous run from 2023-04-15.\r\n\r\nI think the cause is that the search API that ATP uses is no longer returning results for some major countries. For example, the following still work:\r\n* https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/IT\r\n* https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/BE\r\n* https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/MX\r\n\r\nbut the following do not:\r\n* https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/GB\r\n* https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/US\r\n* https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/FR\r\n\r\nI'm not sure why the API has stopped returning for certain countries, or if there's anything we can do about it.\r\n\r\nIf we want to look at alternative means, then it's possible that the \"Airport Locations\" and \"Neighbourhood Locations\" lists at https://www.enterprise.co.uk/en/car-hire/locations/uk.html are complete for GB. (Together they have 427 locations, compared with the 425 I extracted for my Chain Reaction tool from a previous ATP dataset.) But then you'd need to parse each branch page, and that's a lot more HTTP fetches. I haven't looked what other localised sites do in terms of location lists.\n", "before_files": [{"content": "import geonamescache\nfrom scrapy import Spider\nfrom scrapy.http import JsonRequest\n\nfrom locations.dict_parser import DictParser\n\n\nclass EnterpriseSpider(Spider):\n name = \"enterprise\"\n item_attributes = {\"brand\": \"Enterprise Rent-A-Car\", \"brand_wikidata\": \"Q17085454\"}\n allowed_domains = [\"prd.location.enterprise.com\"]\n\n def start_requests(self):\n gc = geonamescache.GeonamesCache()\n countries = gc.get_countries()\n for country_code in countries.keys():\n yield JsonRequest(\n url=f\"https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/{country_code}\"\n )\n\n def parse(self, response):\n for location in response.json():\n if location[\"closed\"] or not location[\"physicalLocation\"]:\n continue\n item = DictParser.parse(location)\n item[\"ref\"] = location[\"stationId\"]\n item[\"name\"] = location[\"locationNameTranslation\"]\n item[\"street_address\"] = \", \".join(filter(None, location[\"addressLines\"]))\n item[\"phone\"] = location[\"formattedPhone\"]\n yield item\n", "path": "locations/spiders/enterprise.py"}]}
1,194
318
gh_patches_debug_6733
rasdani/github-patches
git_diff
mitmproxy__mitmproxy-5894
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> "raw" view is not raw, adds extra backslash #### Problem Description I just noticed during https://github.com/mitmproxy/mitmproxy/issues/5469#issuecomment-1191343747 #### Steps to reproduce the behavior: http.txt ``` HTTP/1.1 200 OK Content-Type: text/plain Content-Length: 1 \ ``` 1. `ncat -l 1337 < http.txt` 2. `mitmproxy` 3. `curl --proxy localhost:8080 http://127.0.0.1:1337` raw is `\\`: ![image](https://user-images.githubusercontent.com/679144/180199838-07c02668-5dab-4210-b5b6-aea78066e0fd.png) hex: ![image](https://user-images.githubusercontent.com/679144/180199860-2cdd9455-7846-4921-b67c-6b9c3669b268.png) #### System Information ``` Mitmproxy: 8.1.1 binary Python: 3.10.5 OpenSSL: OpenSSL 3.0.3 3 May 2022 Platform: Linux-5.15.0-41-generic-x86_64-with-glibc2.35 ``` </issue> <code> [start of mitmproxy/contentviews/raw.py] 1 from . import base 2 from mitmproxy.utils import strutils 3 4 5 class ViewRaw(base.View): 6 name = "Raw" 7 8 def __call__(self, data, **metadata): 9 return "Raw", base.format_text(strutils.bytes_to_escaped_str(data, True)) 10 11 def render_priority(self, data: bytes, **metadata) -> float: 12 return 0.1 * float(bool(data)) 13 [end of mitmproxy/contentviews/raw.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mitmproxy/contentviews/raw.py b/mitmproxy/contentviews/raw.py --- a/mitmproxy/contentviews/raw.py +++ b/mitmproxy/contentviews/raw.py @@ -1,12 +1,11 @@ from . import base -from mitmproxy.utils import strutils class ViewRaw(base.View): name = "Raw" def __call__(self, data, **metadata): - return "Raw", base.format_text(strutils.bytes_to_escaped_str(data, True)) + return "Raw", base.format_text(data) def render_priority(self, data: bytes, **metadata) -> float: return 0.1 * float(bool(data))
{"golden_diff": "diff --git a/mitmproxy/contentviews/raw.py b/mitmproxy/contentviews/raw.py\n--- a/mitmproxy/contentviews/raw.py\n+++ b/mitmproxy/contentviews/raw.py\n@@ -1,12 +1,11 @@\n from . import base\n-from mitmproxy.utils import strutils\n \n \n class ViewRaw(base.View):\n name = \"Raw\"\n \n def __call__(self, data, **metadata):\n- return \"Raw\", base.format_text(strutils.bytes_to_escaped_str(data, True))\n+ return \"Raw\", base.format_text(data)\n \n def render_priority(self, data: bytes, **metadata) -> float:\n return 0.1 * float(bool(data))\n", "issue": "\"raw\" view is not raw, adds extra backslash\n#### Problem Description\r\n\r\nI just noticed during https://github.com/mitmproxy/mitmproxy/issues/5469#issuecomment-1191343747\r\n\r\n#### Steps to reproduce the behavior:\r\n\r\nhttp.txt\r\n\r\n```\r\nHTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 1\r\n\r\n\\\r\n```\r\n\r\n1. `ncat -l 1337 < http.txt`\r\n2. `mitmproxy`\r\n3. `curl --proxy localhost:8080 http://127.0.0.1:1337`\r\n\r\nraw is `\\\\`:\r\n\r\n![image](https://user-images.githubusercontent.com/679144/180199838-07c02668-5dab-4210-b5b6-aea78066e0fd.png)\r\n\r\nhex:\r\n\r\n![image](https://user-images.githubusercontent.com/679144/180199860-2cdd9455-7846-4921-b67c-6b9c3669b268.png)\r\n\r\n#### System Information\r\n\r\n```\r\nMitmproxy: 8.1.1 binary\r\nPython: 3.10.5\r\nOpenSSL: OpenSSL 3.0.3 3 May 2022\r\nPlatform: Linux-5.15.0-41-generic-x86_64-with-glibc2.35\r\n```\r\n\n", "before_files": [{"content": "from . import base\nfrom mitmproxy.utils import strutils\n\n\nclass ViewRaw(base.View):\n name = \"Raw\"\n\n def __call__(self, data, **metadata):\n return \"Raw\", base.format_text(strutils.bytes_to_escaped_str(data, True))\n\n def render_priority(self, data: bytes, **metadata) -> float:\n return 0.1 * float(bool(data))\n", "path": "mitmproxy/contentviews/raw.py"}]}
1,000
152
gh_patches_debug_10976
rasdani/github-patches
git_diff
sagemath__sage-36488
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `func_persist` fails with Python 3.11 ```sage @func_persist def test(n): return n*n ``` fails with ```python AttributeError Traceback (most recent call last) Cell In[1], line 1 ----> 1 @func_persist 2 def test(n): 3 return n*n File /usr/lib/python3.11/site-packages/sage/misc/func_persist.py:60, in func_persist.__init__(self, f, dir) 56 self.__dir = dir 57 os.makedirs(dir, exist_ok=True) 58 self.__doc__ = '%s%s%s' % ( 59 f.__name__, ---> 60 inspect.formatargspec(*inspect.getargs(f.__code__)), 61 f.__doc__) AttributeError: module 'inspect' has no attribute 'formatargspec' ``` This is because `inspect.formatargspec` has been [deprecated in Python 3.5 and removed in 3.11](https://docs.python.org/3/whatsnew/3.11.html#removed). Probably related to #33842. </issue> <code> [start of src/sage/misc/func_persist.py] 1 r""" 2 Support for persistent functions in .sage files 3 4 Persistent functions are functions whose values are stored on disk 5 so they do not have to be recomputed. 6 7 The inputs to the function must be hashable (so lists are not 8 allowed). Though a hash is used, in the incredibly unlikely event 9 that a hash collision occurs, your function will not return an 10 incorrect result because of this (though the cache might not be 11 used either). 12 13 This is meant to be used from ``.sage`` files, not from 14 library ``.py`` files. 15 16 To use this disk caching mechanism, just put 17 ``@func_persist`` right before your function 18 definition. For example, 19 20 :: 21 22 @func_persist 23 def bern(n): 24 "Return the n-th Bernoulli number, caching the result to disk." 25 return bernoulli(n) 26 27 You can then use the function ``bern`` as usual, except 28 it will almost instantly return values that have already been 29 computed, even if you quit and restart. 30 31 The disk cache files are stored by default in the subdirectory 32 ``func_persist`` of the current working directory, 33 with one file for each evaluation of the function. 34 """ 35 ######################################################################## 36 # Copyright (C) 2006 William Stein <[email protected]> 37 # 38 # Distributed under the terms of the GNU General Public License (GPL) 39 # 40 # https://www.gnu.org/licenses/ 41 ######################################################################## 42 43 import inspect 44 import os 45 46 from . import persist 47 48 49 class func_persist: 50 r""" 51 Put ``@func_persist`` right before your function 52 definition to cache values it computes to disk. 53 """ 54 def __init__(self, f, dir='func_persist'): 55 self.__func = f 56 self.__dir = dir 57 os.makedirs(dir, exist_ok=True) 58 self.__doc__ = '%s%s%s' % ( 59 f.__name__, 60 inspect.formatargspec(*inspect.getargs(f.__code__)), 61 f.__doc__) 62 63 def __call__(self, *args, **kwds): 64 key = (tuple(args), tuple(kwds.items())) 65 h = hash(key) 66 name = '%s/%s_%s.sobj' % (self.__dir, self.__func.__name__, h) 67 68 if os.path.exists(name): 69 key2, val = persist.load(name) 70 if key == key2: 71 # We save and test equality of keys to avoid 72 # the (extremely remote) possibility of a hash 73 # collision. Correctness is crucial in mathematics. 74 return val 75 76 val = self.__func(*args, **kwds) 77 persist.save((key, val), name) 78 return val 79 [end of src/sage/misc/func_persist.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/sage/misc/func_persist.py b/src/sage/misc/func_persist.py --- a/src/sage/misc/func_persist.py +++ b/src/sage/misc/func_persist.py @@ -45,7 +45,6 @@ from . import persist - class func_persist: r""" Put ``@func_persist`` right before your function @@ -57,7 +56,7 @@ os.makedirs(dir, exist_ok=True) self.__doc__ = '%s%s%s' % ( f.__name__, - inspect.formatargspec(*inspect.getargs(f.__code__)), + inspect.signature(f), f.__doc__) def __call__(self, *args, **kwds):
{"golden_diff": "diff --git a/src/sage/misc/func_persist.py b/src/sage/misc/func_persist.py\n--- a/src/sage/misc/func_persist.py\n+++ b/src/sage/misc/func_persist.py\n@@ -45,7 +45,6 @@\n \n from . import persist\n \n-\n class func_persist:\n r\"\"\"\n Put ``@func_persist`` right before your function\n@@ -57,7 +56,7 @@\n os.makedirs(dir, exist_ok=True)\n self.__doc__ = '%s%s%s' % (\n f.__name__,\n- inspect.formatargspec(*inspect.getargs(f.__code__)),\n+ inspect.signature(f),\n f.__doc__)\n \n def __call__(self, *args, **kwds):\n", "issue": "`func_persist` fails with Python 3.11\n```sage\n@func_persist\ndef test(n):\n return n*n\n```\n\nfails with\n\n```python\nAttributeError Traceback (most recent call last)\nCell In[1], line 1\n----> 1 @func_persist\n 2 def test(n):\n 3 return n*n\n\nFile /usr/lib/python3.11/site-packages/sage/misc/func_persist.py:60, in func_persist.__init__(self, f, dir)\n 56 self.__dir = dir\n 57 os.makedirs(dir, exist_ok=True)\n 58 self.__doc__ = '%s%s%s' % (\n 59 f.__name__,\n---> 60 inspect.formatargspec(*inspect.getargs(f.__code__)),\n 61 f.__doc__)\n\nAttributeError: module 'inspect' has no attribute 'formatargspec'\n```\n\nThis is because `inspect.formatargspec` has been [deprecated in Python 3.5 and removed in 3.11](https://docs.python.org/3/whatsnew/3.11.html#removed). Probably related to #33842.\n\n", "before_files": [{"content": "r\"\"\"\nSupport for persistent functions in .sage files\n\nPersistent functions are functions whose values are stored on disk\nso they do not have to be recomputed.\n\nThe inputs to the function must be hashable (so lists are not\nallowed). Though a hash is used, in the incredibly unlikely event\nthat a hash collision occurs, your function will not return an\nincorrect result because of this (though the cache might not be\nused either).\n\nThis is meant to be used from ``.sage`` files, not from\nlibrary ``.py`` files.\n\nTo use this disk caching mechanism, just put\n``@func_persist`` right before your function\ndefinition. For example,\n\n::\n\n @func_persist\n def bern(n):\n \"Return the n-th Bernoulli number, caching the result to disk.\"\n return bernoulli(n)\n\nYou can then use the function ``bern`` as usual, except\nit will almost instantly return values that have already been\ncomputed, even if you quit and restart.\n\nThe disk cache files are stored by default in the subdirectory\n``func_persist`` of the current working directory,\nwith one file for each evaluation of the function.\n\"\"\"\n########################################################################\n# Copyright (C) 2006 William Stein <[email protected]>\n#\n# Distributed under the terms of the GNU General Public License (GPL)\n#\n# https://www.gnu.org/licenses/\n########################################################################\n\nimport inspect\nimport os\n\nfrom . import persist\n\n\nclass func_persist:\n r\"\"\"\n Put ``@func_persist`` right before your function\n definition to cache values it computes to disk.\n \"\"\"\n def __init__(self, f, dir='func_persist'):\n self.__func = f\n self.__dir = dir\n os.makedirs(dir, exist_ok=True)\n self.__doc__ = '%s%s%s' % (\n f.__name__,\n inspect.formatargspec(*inspect.getargs(f.__code__)),\n f.__doc__)\n\n def __call__(self, *args, **kwds):\n key = (tuple(args), tuple(kwds.items()))\n h = hash(key)\n name = '%s/%s_%s.sobj' % (self.__dir, self.__func.__name__, h)\n\n if os.path.exists(name):\n key2, val = persist.load(name)\n if key == key2:\n # We save and test equality of keys to avoid\n # the (extremely remote) possibility of a hash\n # collision. Correctness is crucial in mathematics.\n return val\n\n val = self.__func(*args, **kwds)\n persist.save((key, val), name)\n return val\n", "path": "src/sage/misc/func_persist.py"}]}
1,543
169
gh_patches_debug_30376
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-230
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Implement filters for schema list API **Problem** <!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.--> Users should be able to: - get schemas associated with a given database key from the schema list API. - filter schemas by name. **Proposed solution** <!-- A clear and concise description of your proposed solution or feature. --> Schema list API should take name and database filter parameters. **Additional context** <!-- Add any other context or screenshots about the feature request here.--> We should use `django-filter` for this. </issue> <code> [start of mathesar/views/api.py] 1 from rest_framework import status, viewsets 2 from rest_framework.exceptions import NotFound, ValidationError 3 from rest_framework.mixins import ListModelMixin, RetrieveModelMixin, CreateModelMixin 4 from rest_framework.response import Response 5 from django_filters import rest_framework as filters 6 7 from mathesar.database.utils import get_non_default_database_keys 8 from mathesar.models import Table, Schema, DataFile 9 from mathesar.pagination import DefaultLimitOffsetPagination, TableLimitOffsetPagination 10 from mathesar.serializers import TableSerializer, SchemaSerializer, RecordSerializer, DataFileSerializer 11 from mathesar.utils.schemas import create_schema_and_object 12 from mathesar.utils.api import create_table_from_datafile 13 from mathesar.filters import TableFilter 14 15 16 class SchemaViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin): 17 queryset = Schema.objects.all().order_by('-created_at') 18 serializer_class = SchemaSerializer 19 pagination_class = DefaultLimitOffsetPagination 20 21 def create(self, request): 22 schema = create_schema_and_object(request.data['name'], request.data['database']) 23 serializer = SchemaSerializer(schema) 24 return Response(serializer.data, status=status.HTTP_201_CREATED) 25 26 27 class TableViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin, 28 CreateModelMixin): 29 queryset = Table.objects.all().order_by('-created_at') 30 serializer_class = TableSerializer 31 pagination_class = DefaultLimitOffsetPagination 32 filter_backends = (filters.DjangoFilterBackend,) 33 filterset_class = TableFilter 34 35 def create(self, request): 36 serializer = TableSerializer(data=request.data, 37 context={'request': request}) 38 if serializer.is_valid(): 39 return create_table_from_datafile(request, serializer.data) 40 else: 41 raise ValidationError(serializer.errors) 42 43 44 class RecordViewSet(viewsets.ViewSet): 45 # There is no "update" method. 46 # We're not supporting PUT requests because there aren't a lot of use cases 47 # where the entire record needs to be replaced, PATCH suffices for updates. 48 queryset = Table.objects.all().order_by('-created_at') 49 50 def list(self, request, table_pk=None): 51 paginator = TableLimitOffsetPagination() 52 records = paginator.paginate_queryset(self.queryset, request, table_pk) 53 serializer = RecordSerializer(records, many=True) 54 return paginator.get_paginated_response(serializer.data) 55 56 def retrieve(self, request, pk=None, table_pk=None): 57 table = Table.objects.get(id=table_pk) 58 record = table.get_record(pk) 59 if not record: 60 raise NotFound 61 serializer = RecordSerializer(record) 62 return Response(serializer.data) 63 64 def create(self, request, table_pk=None): 65 table = Table.objects.get(id=table_pk) 66 # We only support adding a single record through the API. 67 assert isinstance((request.data), dict) 68 record = table.create_record_or_records(request.data) 69 serializer = RecordSerializer(record) 70 return Response(serializer.data, status=status.HTTP_201_CREATED) 71 72 def partial_update(self, request, pk=None, table_pk=None): 73 table = Table.objects.get(id=table_pk) 74 record = table.update_record(pk, request.data) 75 serializer = RecordSerializer(record) 76 return Response(serializer.data) 77 78 def destroy(self, request, pk=None, table_pk=None): 79 table = Table.objects.get(id=table_pk) 80 table.delete_record(pk) 81 return Response(status=status.HTTP_204_NO_CONTENT) 82 83 84 class DatabaseKeyViewSet(viewsets.ViewSet): 85 def list(self, request): 86 return Response(get_non_default_database_keys()) 87 88 89 class DataFileViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin, CreateModelMixin): 90 queryset = DataFile.objects.all().order_by('-created_at') 91 serializer_class = DataFileSerializer 92 pagination_class = DefaultLimitOffsetPagination 93 [end of mathesar/views/api.py] [start of mathesar/filters.py] 1 from django_filters import rest_framework as filters 2 3 from mathesar.models import Table 4 5 6 class CharInFilter(filters.BaseInFilter, filters.CharFilter): 7 pass 8 9 10 class TableFilter(filters.FilterSet): 11 name = CharInFilter(field_name='name', lookup_expr='in') 12 13 class Meta: 14 model = Table 15 fields = ['name'] 16 [end of mathesar/filters.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mathesar/filters.py b/mathesar/filters.py --- a/mathesar/filters.py +++ b/mathesar/filters.py @@ -1,12 +1,21 @@ from django_filters import rest_framework as filters -from mathesar.models import Table +from mathesar.models import Schema, Table class CharInFilter(filters.BaseInFilter, filters.CharFilter): pass +class SchemaFilter(filters.FilterSet): + name = CharInFilter(field_name='name', lookup_expr='in') + database = CharInFilter(field_name='database', lookup_expr='in') + + class Meta: + model = Schema + fields = ['name', 'database'] + + class TableFilter(filters.FilterSet): name = CharInFilter(field_name='name', lookup_expr='in') diff --git a/mathesar/views/api.py b/mathesar/views/api.py --- a/mathesar/views/api.py +++ b/mathesar/views/api.py @@ -10,13 +10,15 @@ from mathesar.serializers import TableSerializer, SchemaSerializer, RecordSerializer, DataFileSerializer from mathesar.utils.schemas import create_schema_and_object from mathesar.utils.api import create_table_from_datafile -from mathesar.filters import TableFilter +from mathesar.filters import SchemaFilter, TableFilter class SchemaViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin): queryset = Schema.objects.all().order_by('-created_at') serializer_class = SchemaSerializer pagination_class = DefaultLimitOffsetPagination + filter_backends = (filters.DjangoFilterBackend,) + filterset_class = SchemaFilter def create(self, request): schema = create_schema_and_object(request.data['name'], request.data['database'])
{"golden_diff": "diff --git a/mathesar/filters.py b/mathesar/filters.py\n--- a/mathesar/filters.py\n+++ b/mathesar/filters.py\n@@ -1,12 +1,21 @@\n from django_filters import rest_framework as filters\n \n-from mathesar.models import Table\n+from mathesar.models import Schema, Table\n \n \n class CharInFilter(filters.BaseInFilter, filters.CharFilter):\n pass\n \n \n+class SchemaFilter(filters.FilterSet):\n+ name = CharInFilter(field_name='name', lookup_expr='in')\n+ database = CharInFilter(field_name='database', lookup_expr='in')\n+\n+ class Meta:\n+ model = Schema\n+ fields = ['name', 'database']\n+\n+\n class TableFilter(filters.FilterSet):\n name = CharInFilter(field_name='name', lookup_expr='in')\n \ndiff --git a/mathesar/views/api.py b/mathesar/views/api.py\n--- a/mathesar/views/api.py\n+++ b/mathesar/views/api.py\n@@ -10,13 +10,15 @@\n from mathesar.serializers import TableSerializer, SchemaSerializer, RecordSerializer, DataFileSerializer\n from mathesar.utils.schemas import create_schema_and_object\n from mathesar.utils.api import create_table_from_datafile\n-from mathesar.filters import TableFilter\n+from mathesar.filters import SchemaFilter, TableFilter\n \n \n class SchemaViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):\n queryset = Schema.objects.all().order_by('-created_at')\n serializer_class = SchemaSerializer\n pagination_class = DefaultLimitOffsetPagination\n+ filter_backends = (filters.DjangoFilterBackend,)\n+ filterset_class = SchemaFilter\n \n def create(self, request):\n schema = create_schema_and_object(request.data['name'], request.data['database'])\n", "issue": "Implement filters for schema list API\n**Problem**\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\nUsers should be able to:\r\n- get schemas associated with a given database key from the schema list API.\r\n- filter schemas by name.\r\n\r\n**Proposed solution**\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\nSchema list API should take name and database filter parameters.\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here.-->\r\nWe should use `django-filter` for this.\n", "before_files": [{"content": "from rest_framework import status, viewsets\nfrom rest_framework.exceptions import NotFound, ValidationError\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin, CreateModelMixin\nfrom rest_framework.response import Response\nfrom django_filters import rest_framework as filters\n\nfrom mathesar.database.utils import get_non_default_database_keys\nfrom mathesar.models import Table, Schema, DataFile\nfrom mathesar.pagination import DefaultLimitOffsetPagination, TableLimitOffsetPagination\nfrom mathesar.serializers import TableSerializer, SchemaSerializer, RecordSerializer, DataFileSerializer\nfrom mathesar.utils.schemas import create_schema_and_object\nfrom mathesar.utils.api import create_table_from_datafile\nfrom mathesar.filters import TableFilter\n\n\nclass SchemaViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):\n queryset = Schema.objects.all().order_by('-created_at')\n serializer_class = SchemaSerializer\n pagination_class = DefaultLimitOffsetPagination\n\n def create(self, request):\n schema = create_schema_and_object(request.data['name'], request.data['database'])\n serializer = SchemaSerializer(schema)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n\nclass TableViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin,\n CreateModelMixin):\n queryset = Table.objects.all().order_by('-created_at')\n serializer_class = TableSerializer\n pagination_class = DefaultLimitOffsetPagination\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = TableFilter\n\n def create(self, request):\n serializer = TableSerializer(data=request.data,\n context={'request': request})\n if serializer.is_valid():\n return create_table_from_datafile(request, serializer.data)\n else:\n raise ValidationError(serializer.errors)\n\n\nclass RecordViewSet(viewsets.ViewSet):\n # There is no \"update\" method.\n # We're not supporting PUT requests because there aren't a lot of use cases\n # where the entire record needs to be replaced, PATCH suffices for updates.\n queryset = Table.objects.all().order_by('-created_at')\n\n def list(self, request, table_pk=None):\n paginator = TableLimitOffsetPagination()\n records = paginator.paginate_queryset(self.queryset, request, table_pk)\n serializer = RecordSerializer(records, many=True)\n return paginator.get_paginated_response(serializer.data)\n\n def retrieve(self, request, pk=None, table_pk=None):\n table = Table.objects.get(id=table_pk)\n record = table.get_record(pk)\n if not record:\n raise NotFound\n serializer = RecordSerializer(record)\n return Response(serializer.data)\n\n def create(self, request, table_pk=None):\n table = Table.objects.get(id=table_pk)\n # We only support adding a single record through the API.\n assert isinstance((request.data), dict)\n record = table.create_record_or_records(request.data)\n serializer = RecordSerializer(record)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None, table_pk=None):\n table = Table.objects.get(id=table_pk)\n record = table.update_record(pk, request.data)\n serializer = RecordSerializer(record)\n return Response(serializer.data)\n\n def destroy(self, request, pk=None, table_pk=None):\n table = Table.objects.get(id=table_pk)\n table.delete_record(pk)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass DatabaseKeyViewSet(viewsets.ViewSet):\n def list(self, request):\n return Response(get_non_default_database_keys())\n\n\nclass DataFileViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin, CreateModelMixin):\n queryset = DataFile.objects.all().order_by('-created_at')\n serializer_class = DataFileSerializer\n pagination_class = DefaultLimitOffsetPagination\n", "path": "mathesar/views/api.py"}, {"content": "from django_filters import rest_framework as filters\n\nfrom mathesar.models import Table\n\n\nclass CharInFilter(filters.BaseInFilter, filters.CharFilter):\n pass\n\n\nclass TableFilter(filters.FilterSet):\n name = CharInFilter(field_name='name', lookup_expr='in')\n\n class Meta:\n model = Table\n fields = ['name']\n", "path": "mathesar/filters.py"}]}
1,740
380
gh_patches_debug_21093
rasdani/github-patches
git_diff
liqd__adhocracy4-294
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> IntegrityError in ratings See: - https://sentry.liqd.net/sentry/debattenportalspdde/issues/84/ - https://sentry.liqd.net/sentry/meinberlin-prod/issues/5/ </issue> <code> [start of adhocracy4/ratings/api.py] 1 from django.conf import settings 2 3 from django_filters import rest_framework as filters 4 from rest_framework import mixins, viewsets 5 from rest_framework.response import Response 6 7 from adhocracy4.api.mixins import ContentTypeMixin 8 from adhocracy4.api.permissions import ViewSetRulesPermission 9 10 from .models import Rating 11 from .serializers import RatingSerializer 12 13 14 class RatingViewSet(mixins.CreateModelMixin, 15 mixins.UpdateModelMixin, 16 ContentTypeMixin, 17 viewsets.GenericViewSet): 18 19 queryset = Rating.objects.all() 20 serializer_class = RatingSerializer 21 permission_classes = (ViewSetRulesPermission,) 22 filter_backends = (filters.DjangoFilterBackend,) 23 filter_fields = ('object_pk', 'content_type') 24 content_type_filter = settings.A4_RATEABLES 25 26 def perform_create(self, serializer): 27 serializer.save( 28 content_object=self.content_object, 29 creator=self.request.user 30 ) 31 32 def get_permission_object(self): 33 return self.content_object 34 35 @property 36 def rules_method_map(self): 37 return ViewSetRulesPermission.default_rules_method_map._replace( 38 POST='{app_label}.rate_{model}'.format( 39 app_label=self.content_type.app_label, 40 model=self.content_type.model 41 ) 42 ) 43 44 def destroy(self, request, content_type, object_pk, pk=None): 45 """ 46 Sets value to zero 47 NOTE: Rating is NOT deleted. 48 """ 49 rating = self.get_object() 50 rating.update(0) 51 serializer = self.get_serializer(rating) 52 return Response(serializer.data) 53 [end of adhocracy4/ratings/api.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/adhocracy4/ratings/api.py b/adhocracy4/ratings/api.py --- a/adhocracy4/ratings/api.py +++ b/adhocracy4/ratings/api.py @@ -3,6 +3,7 @@ from django_filters import rest_framework as filters from rest_framework import mixins, viewsets from rest_framework.response import Response +from rest_framework.serializers import ValidationError from adhocracy4.api.mixins import ContentTypeMixin from adhocracy4.api.permissions import ViewSetRulesPermission @@ -24,6 +25,11 @@ content_type_filter = settings.A4_RATEABLES def perform_create(self, serializer): + queryset = Rating.objects.filter(content_type_id=self.content_type.pk, + creator=self.request.user, + object_pk=self.content_object.pk) + if queryset.exists(): + raise ValidationError(queryset[0].pk) serializer.save( content_object=self.content_object, creator=self.request.user
{"golden_diff": "diff --git a/adhocracy4/ratings/api.py b/adhocracy4/ratings/api.py\n--- a/adhocracy4/ratings/api.py\n+++ b/adhocracy4/ratings/api.py\n@@ -3,6 +3,7 @@\n from django_filters import rest_framework as filters\n from rest_framework import mixins, viewsets\n from rest_framework.response import Response\n+from rest_framework.serializers import ValidationError\n \n from adhocracy4.api.mixins import ContentTypeMixin\n from adhocracy4.api.permissions import ViewSetRulesPermission\n@@ -24,6 +25,11 @@\n content_type_filter = settings.A4_RATEABLES\n \n def perform_create(self, serializer):\n+ queryset = Rating.objects.filter(content_type_id=self.content_type.pk,\n+ creator=self.request.user,\n+ object_pk=self.content_object.pk)\n+ if queryset.exists():\n+ raise ValidationError(queryset[0].pk)\n serializer.save(\n content_object=self.content_object,\n creator=self.request.user\n", "issue": "IntegrityError in ratings\nSee:\r\n- https://sentry.liqd.net/sentry/debattenportalspdde/issues/84/\r\n- https://sentry.liqd.net/sentry/meinberlin-prod/issues/5/\n", "before_files": [{"content": "from django.conf import settings\n\nfrom django_filters import rest_framework as filters\nfrom rest_framework import mixins, viewsets\nfrom rest_framework.response import Response\n\nfrom adhocracy4.api.mixins import ContentTypeMixin\nfrom adhocracy4.api.permissions import ViewSetRulesPermission\n\nfrom .models import Rating\nfrom .serializers import RatingSerializer\n\n\nclass RatingViewSet(mixins.CreateModelMixin,\n mixins.UpdateModelMixin,\n ContentTypeMixin,\n viewsets.GenericViewSet):\n\n queryset = Rating.objects.all()\n serializer_class = RatingSerializer\n permission_classes = (ViewSetRulesPermission,)\n filter_backends = (filters.DjangoFilterBackend,)\n filter_fields = ('object_pk', 'content_type')\n content_type_filter = settings.A4_RATEABLES\n\n def perform_create(self, serializer):\n serializer.save(\n content_object=self.content_object,\n creator=self.request.user\n )\n\n def get_permission_object(self):\n return self.content_object\n\n @property\n def rules_method_map(self):\n return ViewSetRulesPermission.default_rules_method_map._replace(\n POST='{app_label}.rate_{model}'.format(\n app_label=self.content_type.app_label,\n model=self.content_type.model\n )\n )\n\n def destroy(self, request, content_type, object_pk, pk=None):\n \"\"\"\n Sets value to zero\n NOTE: Rating is NOT deleted.\n \"\"\"\n rating = self.get_object()\n rating.update(0)\n serializer = self.get_serializer(rating)\n return Response(serializer.data)\n", "path": "adhocracy4/ratings/api.py"}]}
1,012
212
gh_patches_debug_24915
rasdani/github-patches
git_diff
saleor__saleor-3848
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> GraphQL error: Expected a value of type "WeightUnitsEnum" but received: kg ### Steps to reproduce the problem 1. Change the default language in the storefront to Portuguese. 2. Go to Dashboard 2.0. The homepage runs a query which fetches `defaultWeightUnit` field. This GraphQL request returns this error: `GraphQL error: Expected a value of type "WeightUnitsEnum" but received: kg` </issue> <code> [start of saleor/core/weight.py] 1 """In Saleor we are using 'weight' instead of a 'mass'. 2 3 For those of us who are earth-bound, weight is what we usually experience. 4 Mass is a theoretical construct. 5 Unless we are dealing with inertia and momentum, we are encountering 6 the attractive force between ourselves and the earth, 7 the isolated effects of mass alone being a little more esoteric. 8 9 So even though mass is more fundamental, most people think 10 in terms of weight. 11 12 In the end, it does not really matter unless you travel between 13 different planets. 14 """ 15 from decimal import Decimal 16 from enum import Enum 17 18 from django import forms 19 from django.contrib.sites.models import Site 20 from django.core.validators import MinValueValidator 21 from django.template.loader import render_to_string 22 from django.utils.translation import pgettext_lazy 23 from measurement.measures import Weight 24 25 26 class WeightUnits: 27 KILOGRAM = 'kg' 28 POUND = 'lb' 29 OUNCE = 'oz' 30 GRAM = 'g' 31 32 CHOICES = [ 33 (KILOGRAM, pgettext_lazy('Kilogram weight unit symbol', 'kg')), 34 (POUND, pgettext_lazy('Pound weight unit symbol', 'lb')), 35 (OUNCE, pgettext_lazy('Ounce weight unit symbol', 'oz')), 36 (GRAM, pgettext_lazy('Gram weight unit symbol', 'g'))] 37 38 39 WeightUnitsEnum = Enum( 40 'WeightUnitsEnum', 41 {unit: unit for unit in WeightUnits.CHOICES}) 42 43 44 def zero_weight(): 45 """Represent the zero weight value.""" 46 return Weight(kg=0) 47 48 49 def convert_weight(weight, unit): 50 # Weight amount from the Weight instance can be retrived in serveral units 51 # via its properties. eg. Weight(lb=10).kg 52 converted_weight = getattr(weight, unit) 53 return Weight(**{unit: converted_weight}) 54 55 56 def get_default_weight_unit(): 57 site = Site.objects.get_current() 58 return site.settings.default_weight_unit 59 60 61 class WeightInput(forms.TextInput): 62 template = 'dashboard/shipping/weight_widget.html' 63 input_type = 'number' 64 65 def format_value(self, value): 66 if isinstance(value, Weight): 67 unit = get_default_weight_unit() 68 if value.unit != unit: 69 value = convert_weight(value, unit) 70 return value.value 71 return value 72 73 def render(self, name, value, attrs=None, renderer=None): 74 widget = super().render(name, value, attrs=attrs, renderer=renderer) 75 unit = get_default_weight_unit() 76 translated_unit = dict(WeightUnits.CHOICES)[unit] 77 return render_to_string( 78 self.template, 79 {'widget': widget, 'value': value, 'unit': translated_unit}) 80 81 82 class WeightField(forms.FloatField): 83 def __init__(self, *args, widget=WeightInput, min_value=0, **kwargs): 84 if isinstance(widget, type): 85 widget = widget(attrs={'type': 'number', 'step': 'any'}) 86 super().__init__(*args, widget=widget, **kwargs) 87 if min_value is not None: 88 self.validators.append(MinValueValidator(min_value)) 89 90 def to_python(self, value): 91 value = super().to_python(value) 92 if value is None: 93 return value 94 unit = get_default_weight_unit() 95 return Weight(**{unit: value}) 96 97 def validate(self, weight): 98 if weight is None or weight in self.empty_values: 99 super().validate(weight) 100 else: 101 unit = get_default_weight_unit() 102 if not isinstance(weight, Weight): 103 raise Exception( 104 '%r is not a valid weight.' % (weight,)) 105 if weight.unit != unit: 106 raise forms.ValidationError( 107 'Invalid unit: %r (expected %r).' % ( 108 weight.unit, unit)) 109 super().validate(weight.value) 110 111 def clean(self, value): 112 value = value_to_be_validated = self.to_python(value) 113 self.validate(value_to_be_validated) 114 if isinstance(value, Weight): 115 value_to_be_validated = Decimal(value.value) 116 # default decimal validators can be used for Weight's value only 117 self.run_validators(value_to_be_validated) 118 return value 119 [end of saleor/core/weight.py] [start of saleor/graphql/core/enums.py] 1 import graphene 2 3 from ...core import TaxRateType as CoreTaxRateType, weight 4 from ...core.permissions import MODELS_PERMISSIONS 5 from .utils import str_to_enum 6 7 8 class ReportingPeriod(graphene.Enum): 9 TODAY = 'TODAY' 10 THIS_MONTH = 'THIS_MONTH' 11 12 13 TaxRateType = graphene.Enum( 14 'TaxRateType', 15 [(str_to_enum(rate[0]), rate[0]) for rate in CoreTaxRateType.CHOICES]) 16 17 18 PermissionEnum = graphene.Enum( 19 'PermissionEnum', [ 20 (str_to_enum(codename.split('.')[1]), codename) 21 for codename in MODELS_PERMISSIONS]) 22 23 24 WeightUnitsEnum = graphene.Enum.from_enum(weight.WeightUnitsEnum) 25 [end of saleor/graphql/core/enums.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/saleor/core/weight.py b/saleor/core/weight.py --- a/saleor/core/weight.py +++ b/saleor/core/weight.py @@ -13,7 +13,6 @@ different planets. """ from decimal import Decimal -from enum import Enum from django import forms from django.contrib.sites.models import Site @@ -36,11 +35,6 @@ (GRAM, pgettext_lazy('Gram weight unit symbol', 'g'))] -WeightUnitsEnum = Enum( - 'WeightUnitsEnum', - {unit: unit for unit in WeightUnits.CHOICES}) - - def zero_weight(): """Represent the zero weight value.""" return Weight(kg=0) diff --git a/saleor/graphql/core/enums.py b/saleor/graphql/core/enums.py --- a/saleor/graphql/core/enums.py +++ b/saleor/graphql/core/enums.py @@ -1,6 +1,7 @@ import graphene -from ...core import TaxRateType as CoreTaxRateType, weight +from ...core import TaxRateType as CoreTaxRateType +from ...core.weight import WeightUnits from ...core.permissions import MODELS_PERMISSIONS from .utils import str_to_enum @@ -21,4 +22,6 @@ for codename in MODELS_PERMISSIONS]) -WeightUnitsEnum = graphene.Enum.from_enum(weight.WeightUnitsEnum) +WeightUnitsEnum = graphene.Enum( + 'WeightUnitsEnum', + [(str_to_enum(unit[0]), unit[0]) for unit in WeightUnits.CHOICES])
{"golden_diff": "diff --git a/saleor/core/weight.py b/saleor/core/weight.py\n--- a/saleor/core/weight.py\n+++ b/saleor/core/weight.py\n@@ -13,7 +13,6 @@\n different planets.\n \"\"\"\n from decimal import Decimal\n-from enum import Enum\n \n from django import forms\n from django.contrib.sites.models import Site\n@@ -36,11 +35,6 @@\n (GRAM, pgettext_lazy('Gram weight unit symbol', 'g'))]\n \n \n-WeightUnitsEnum = Enum(\n- 'WeightUnitsEnum',\n- {unit: unit for unit in WeightUnits.CHOICES})\n-\n-\n def zero_weight():\n \"\"\"Represent the zero weight value.\"\"\"\n return Weight(kg=0)\ndiff --git a/saleor/graphql/core/enums.py b/saleor/graphql/core/enums.py\n--- a/saleor/graphql/core/enums.py\n+++ b/saleor/graphql/core/enums.py\n@@ -1,6 +1,7 @@\n import graphene\n \n-from ...core import TaxRateType as CoreTaxRateType, weight\n+from ...core import TaxRateType as CoreTaxRateType\n+from ...core.weight import WeightUnits\n from ...core.permissions import MODELS_PERMISSIONS\n from .utils import str_to_enum\n \n@@ -21,4 +22,6 @@\n for codename in MODELS_PERMISSIONS])\n \n \n-WeightUnitsEnum = graphene.Enum.from_enum(weight.WeightUnitsEnum)\n+WeightUnitsEnum = graphene.Enum(\n+ 'WeightUnitsEnum',\n+ [(str_to_enum(unit[0]), unit[0]) for unit in WeightUnits.CHOICES])\n", "issue": "GraphQL error: Expected a value of type \"WeightUnitsEnum\" but received: kg\n### Steps to reproduce the problem\r\n1. Change the default language in the storefront to Portuguese.\r\n2. Go to Dashboard 2.0. The homepage runs a query which fetches `defaultWeightUnit` field.\r\nThis GraphQL request returns this error: `GraphQL error: Expected a value of type \"WeightUnitsEnum\" but received: kg`\r\n\n", "before_files": [{"content": "\"\"\"In Saleor we are using 'weight' instead of a 'mass'.\n\nFor those of us who are earth-bound, weight is what we usually experience.\nMass is a theoretical construct.\nUnless we are dealing with inertia and momentum, we are encountering\nthe attractive force between ourselves and the earth,\nthe isolated effects of mass alone being a little more esoteric.\n\nSo even though mass is more fundamental, most people think\nin terms of weight.\n\nIn the end, it does not really matter unless you travel between\ndifferent planets.\n\"\"\"\nfrom decimal import Decimal\nfrom enum import Enum\n\nfrom django import forms\nfrom django.contrib.sites.models import Site\nfrom django.core.validators import MinValueValidator\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import pgettext_lazy\nfrom measurement.measures import Weight\n\n\nclass WeightUnits:\n KILOGRAM = 'kg'\n POUND = 'lb'\n OUNCE = 'oz'\n GRAM = 'g'\n\n CHOICES = [\n (KILOGRAM, pgettext_lazy('Kilogram weight unit symbol', 'kg')),\n (POUND, pgettext_lazy('Pound weight unit symbol', 'lb')),\n (OUNCE, pgettext_lazy('Ounce weight unit symbol', 'oz')),\n (GRAM, pgettext_lazy('Gram weight unit symbol', 'g'))]\n\n\nWeightUnitsEnum = Enum(\n 'WeightUnitsEnum',\n {unit: unit for unit in WeightUnits.CHOICES})\n\n\ndef zero_weight():\n \"\"\"Represent the zero weight value.\"\"\"\n return Weight(kg=0)\n\n\ndef convert_weight(weight, unit):\n # Weight amount from the Weight instance can be retrived in serveral units\n # via its properties. eg. Weight(lb=10).kg\n converted_weight = getattr(weight, unit)\n return Weight(**{unit: converted_weight})\n\n\ndef get_default_weight_unit():\n site = Site.objects.get_current()\n return site.settings.default_weight_unit\n\n\nclass WeightInput(forms.TextInput):\n template = 'dashboard/shipping/weight_widget.html'\n input_type = 'number'\n\n def format_value(self, value):\n if isinstance(value, Weight):\n unit = get_default_weight_unit()\n if value.unit != unit:\n value = convert_weight(value, unit)\n return value.value\n return value\n\n def render(self, name, value, attrs=None, renderer=None):\n widget = super().render(name, value, attrs=attrs, renderer=renderer)\n unit = get_default_weight_unit()\n translated_unit = dict(WeightUnits.CHOICES)[unit]\n return render_to_string(\n self.template,\n {'widget': widget, 'value': value, 'unit': translated_unit})\n\n\nclass WeightField(forms.FloatField):\n def __init__(self, *args, widget=WeightInput, min_value=0, **kwargs):\n if isinstance(widget, type):\n widget = widget(attrs={'type': 'number', 'step': 'any'})\n super().__init__(*args, widget=widget, **kwargs)\n if min_value is not None:\n self.validators.append(MinValueValidator(min_value))\n\n def to_python(self, value):\n value = super().to_python(value)\n if value is None:\n return value\n unit = get_default_weight_unit()\n return Weight(**{unit: value})\n\n def validate(self, weight):\n if weight is None or weight in self.empty_values:\n super().validate(weight)\n else:\n unit = get_default_weight_unit()\n if not isinstance(weight, Weight):\n raise Exception(\n '%r is not a valid weight.' % (weight,))\n if weight.unit != unit:\n raise forms.ValidationError(\n 'Invalid unit: %r (expected %r).' % (\n weight.unit, unit))\n super().validate(weight.value)\n\n def clean(self, value):\n value = value_to_be_validated = self.to_python(value)\n self.validate(value_to_be_validated)\n if isinstance(value, Weight):\n value_to_be_validated = Decimal(value.value)\n # default decimal validators can be used for Weight's value only\n self.run_validators(value_to_be_validated)\n return value\n", "path": "saleor/core/weight.py"}, {"content": "import graphene\n\nfrom ...core import TaxRateType as CoreTaxRateType, weight\nfrom ...core.permissions import MODELS_PERMISSIONS\nfrom .utils import str_to_enum\n\n\nclass ReportingPeriod(graphene.Enum):\n TODAY = 'TODAY'\n THIS_MONTH = 'THIS_MONTH'\n\n\nTaxRateType = graphene.Enum(\n 'TaxRateType',\n [(str_to_enum(rate[0]), rate[0]) for rate in CoreTaxRateType.CHOICES])\n\n\nPermissionEnum = graphene.Enum(\n 'PermissionEnum', [\n (str_to_enum(codename.split('.')[1]), codename)\n for codename in MODELS_PERMISSIONS])\n\n\nWeightUnitsEnum = graphene.Enum.from_enum(weight.WeightUnitsEnum)\n", "path": "saleor/graphql/core/enums.py"}]}
1,986
351
gh_patches_debug_6778
rasdani/github-patches
git_diff
flairNLP__flair-160
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Flair depends on AWS CLI (boto library etc) --- should be removed/optional I assume that the usual Flair user does not need any AWS dependency, therefore the hard AWS CLI dependency (defined in `setup.py`) should be removed or at least made optional. </issue> <code> [start of setup.py] 1 from setuptools import setup, find_packages 2 3 setup( 4 name='flair', 5 version='0.3.0', 6 description='A very simple framework for state-of-the-art NLP', 7 long_description=open("README.md", encoding='utf-8').read(), 8 long_description_content_type="text/markdown", 9 author='Alan Akbik', 10 author_email='[email protected]', 11 url='https://github.com/zalandoresearch/flair', 12 packages=find_packages(exclude='test'), # same as name 13 license='MIT', 14 install_requires=[ 15 'torch==0.4.1', 16 'awscli==1.14.32', 17 'gensim==3.4.0', 18 'typing==3.6.4', 19 'tqdm==4.23.4', 20 'segtok==1.5.6', 21 'matplotlib==3.0.0', 22 'mpld3==0.3', 23 'jinja2==2.10', 24 'sklearn', 25 'sqlitedict==1.6.0', 26 ], 27 include_package_data=True, 28 python_requires='>=3.6', 29 ) 30 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -13,14 +13,12 @@ license='MIT', install_requires=[ 'torch==0.4.1', - 'awscli==1.14.32', 'gensim==3.4.0', 'typing==3.6.4', 'tqdm==4.23.4', 'segtok==1.5.6', 'matplotlib==3.0.0', 'mpld3==0.3', - 'jinja2==2.10', 'sklearn', 'sqlitedict==1.6.0', ],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -13,14 +13,12 @@\n license='MIT',\n install_requires=[\n 'torch==0.4.1',\n- 'awscli==1.14.32',\n 'gensim==3.4.0',\n 'typing==3.6.4',\n 'tqdm==4.23.4',\n 'segtok==1.5.6',\n 'matplotlib==3.0.0',\n 'mpld3==0.3',\n- 'jinja2==2.10',\n 'sklearn',\n 'sqlitedict==1.6.0',\n ],\n", "issue": "Flair depends on AWS CLI (boto library etc) --- should be removed/optional\nI assume that the usual Flair user does not need any AWS dependency, therefore the hard AWS CLI dependency (defined in `setup.py`) should be removed or at least made optional.\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nsetup(\n name='flair',\n version='0.3.0',\n description='A very simple framework for state-of-the-art NLP',\n long_description=open(\"README.md\", encoding='utf-8').read(),\n long_description_content_type=\"text/markdown\",\n author='Alan Akbik',\n author_email='[email protected]',\n url='https://github.com/zalandoresearch/flair',\n packages=find_packages(exclude='test'), # same as name\n license='MIT',\n install_requires=[\n 'torch==0.4.1',\n 'awscli==1.14.32',\n 'gensim==3.4.0',\n 'typing==3.6.4',\n 'tqdm==4.23.4',\n 'segtok==1.5.6',\n 'matplotlib==3.0.0',\n 'mpld3==0.3',\n 'jinja2==2.10',\n 'sklearn',\n 'sqlitedict==1.6.0',\n ],\n include_package_data=True,\n python_requires='>=3.6',\n)\n", "path": "setup.py"}]}
899
165
gh_patches_debug_25887
rasdani/github-patches
git_diff
opendatacube__datacube-core-362
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Drop Support for Python 2 I'd like to propose that future development on Data Cube Core stop supporting Python 2 after the beginning of February 2018. We can continue to support Python 2 for bug fix releases of version 1.5.x, but beyond that it is an ongoing drain on the limited developer resources. </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 3 import versioneer 4 from setuptools import setup, find_packages 5 6 tests_require = [ 7 'compliance-checker', 8 'hypothesis', 9 'mock', 10 'objgraph', 11 'pycodestyle', 12 'pylint', 13 'pytest', 14 'pytest-cov', 15 'pytest-timeout', 16 ] 17 18 extras_require = { 19 'performance': ['ciso8601', 'bottleneck'], 20 'interactive': ['matplotlib', 'fiona'], 21 'distributed': ['distributed', 'dask[distributed]'], 22 'analytics': ['scipy', 'pyparsing', 'numexpr'], 23 'doc': ['Sphinx', 'setuptools'], 24 'replicas': ['paramiko', 'sshtunnel', 'tqdm'], 25 'celery': ['celery>=4', 'redis'], 26 's3': ['boto3==1.4.3', 'SharedArray', 'pathos', 'zstandard'], 27 'test': tests_require, 28 } 29 # An 'all' option, following ipython naming conventions. 30 extras_require['all'] = sorted(set(sum(extras_require.values(), []))) 31 32 setup( 33 name='datacube', 34 version=versioneer.get_version(), 35 cmdclass=versioneer.get_cmdclass(), 36 37 url='https://github.com/opendatacube/datacube-core', 38 author='AGDC Collaboration', 39 maintainer='AGDC Collaboration', 40 maintainer_email='', 41 description='An analysis environment for satellite and other earth observation data', 42 long_description=open('README.rst').read(), 43 license='Apache License 2.0', 44 classifiers=[ 45 "Development Status :: 4 - Beta", 46 "Intended Audience :: Developers", 47 "Intended Audience :: Science/Research", 48 "License :: OSI Approved :: Apache Software License", 49 "Natural Language :: English", 50 "Operating System :: MacOS :: MacOS X", 51 "Operating System :: POSIX", 52 "Operating System :: POSIX :: BSD", 53 "Operating System :: POSIX :: Linux", 54 "Operating System :: Microsoft :: Windows", 55 "Programming Language :: Python", 56 "Programming Language :: Python :: 2", 57 "Programming Language :: Python :: 2.7", 58 "Programming Language :: Python :: 3", 59 "Programming Language :: Python :: 3.5", 60 "Topic :: Scientific/Engineering :: GIS", 61 "Topic :: Scientific/Engineering :: Information Analysis", 62 ], 63 64 packages=find_packages( 65 exclude=('tests', 'tests.*', 66 'integration_tests', 'integration_tests.*') 67 ), 68 package_data={ 69 '': ['*.yaml', '*/*.yaml'], 70 }, 71 scripts=[ 72 'datacube_apps/scripts/pbs_helpers.sh' 73 ], 74 setup_requires=[ 75 'pytest-runner' 76 ], 77 install_requires=[ 78 'affine', 79 'cachetools', 80 'click>=5.0', 81 'cloudpickle>=0.4', 82 'dask[array]', 83 'gdal>=1.9', 84 'jsonschema', 85 'netcdf4', 86 'numpy', 87 'pathlib;python_version<"3"', 88 'psycopg2', 89 'pypeg2', 90 'python-dateutil', 91 'pyyaml', 92 'rasterio>=0.9a10', # required for zip reading, 0.9 gets around 1.0a ordering problems 93 'singledispatch', 94 'sqlalchemy', 95 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost 96 ], 97 extras_require=extras_require, 98 tests_require=tests_require, 99 100 entry_points={ 101 'console_scripts': [ 102 'datacube = datacube.scripts.cli_app:cli', 103 'datacube-search = datacube.scripts.search_tool:cli', 104 'datacube-stacker = datacube_apps.stacker:main', 105 'datacube-worker = datacube.execution.worker:main', 106 'datacube-fixer = datacube_apps.stacker:fixer_main', 107 'datacube-ncml = datacube_apps.ncml:ncml_app', 108 'pixeldrill = datacube_apps.pixeldrill:main [interactive]', 109 'movie_generator = datacube_apps.movie_generator:main', 110 'datacube-simple-replica = datacube_apps.simple_replica:replicate [replicas]' 111 ], 112 'datacube.plugins.io.read': [ 113 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init', 114 's3aio = datacube.drivers.s3.driver:reader_driver_init', 115 's3aio_test = datacube.drivers.s3.driver:reader_test_driver_init' 116 ], 117 'datacube.plugins.io.write': [ 118 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init', 119 's3aio = datacube.drivers.s3.driver:writer_driver_init', 120 's3aio_test = datacube.drivers.s3.driver:writer_test_driver_init', 121 ], 122 'datacube.plugins.index': [ 123 'default = datacube.index.index:index_driver_init', 124 's3aio_index = datacube.drivers.s3aio_index:index_driver_init', 125 ], 126 }, 127 ) 128 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -33,6 +33,7 @@ name='datacube', version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), + python_requires='>3.5.2', url='https://github.com/opendatacube/datacube-core', author='AGDC Collaboration', @@ -53,10 +54,9 @@ "Operating System :: POSIX :: Linux", "Operating System :: Microsoft :: Windows", "Programming Language :: Python", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", "Topic :: Scientific/Engineering :: GIS", "Topic :: Scientific/Engineering :: Information Analysis", ], @@ -84,7 +84,6 @@ 'jsonschema', 'netcdf4', 'numpy', - 'pathlib;python_version<"3"', 'psycopg2', 'pypeg2', 'python-dateutil',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,6 +33,7 @@\n name='datacube',\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n+ python_requires='>3.5.2',\n \n url='https://github.com/opendatacube/datacube-core',\n author='AGDC Collaboration',\n@@ -53,10 +54,9 @@\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n- \"Programming Language :: Python :: 2\",\n- \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n+ \"Programming Language :: Python :: 3.6\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n@@ -84,7 +84,6 @@\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n- 'pathlib;python_version<\"3\"',\n 'psycopg2',\n 'pypeg2',\n 'python-dateutil',\n", "issue": "Drop Support for Python 2\nI'd like to propose that future development on Data Cube Core stop supporting Python 2 after the beginning of February 2018.\r\n\r\nWe can continue to support Python 2 for bug fix releases of version 1.5.x, but beyond that it is an ongoing drain on the limited developer resources.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport versioneer\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'compliance-checker',\n 'hypothesis',\n 'mock',\n 'objgraph',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'interactive': ['matplotlib', 'fiona'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'analytics': ['scipy', 'pyparsing', 'numexpr'],\n 'doc': ['Sphinx', 'setuptools'],\n 'replicas': ['paramiko', 'sshtunnel', 'tqdm'],\n 'celery': ['celery>=4', 'redis'],\n 's3': ['boto3==1.4.3', 'SharedArray', 'pathos', 'zstandard'],\n 'test': tests_require,\n}\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='datacube',\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n\n url='https://github.com/opendatacube/datacube-core',\n author='AGDC Collaboration',\n maintainer='AGDC Collaboration',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n },\n scripts=[\n 'datacube_apps/scripts/pbs_helpers.sh'\n ],\n setup_requires=[\n 'pytest-runner'\n ],\n install_requires=[\n 'affine',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'gdal>=1.9',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'pathlib;python_version<\"3\"',\n 'psycopg2',\n 'pypeg2',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=0.9a10', # required for zip reading, 0.9 gets around 1.0a ordering problems\n 'singledispatch',\n 'sqlalchemy',\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-stacker = datacube_apps.stacker:main',\n 'datacube-worker = datacube.execution.worker:main',\n 'datacube-fixer = datacube_apps.stacker:fixer_main',\n 'datacube-ncml = datacube_apps.ncml:ncml_app',\n 'pixeldrill = datacube_apps.pixeldrill:main [interactive]',\n 'movie_generator = datacube_apps.movie_generator:main',\n 'datacube-simple-replica = datacube_apps.simple_replica:replicate [replicas]'\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n 's3aio = datacube.drivers.s3.driver:reader_driver_init',\n 's3aio_test = datacube.drivers.s3.driver:reader_test_driver_init'\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n 's3aio = datacube.drivers.s3.driver:writer_driver_init',\n 's3aio_test = datacube.drivers.s3.driver:writer_test_driver_init',\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.index:index_driver_init',\n 's3aio_index = datacube.drivers.s3aio_index:index_driver_init',\n ],\n },\n)\n", "path": "setup.py"}]}
1,987
269
gh_patches_debug_32556
rasdani/github-patches
git_diff
streamlink__streamlink-121
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Plugin LivecodingTV fails to load on Python 3.6.0b2 on Windows 10 x64 Just running streamlink raises the following error on my system with a fresh install: ``` C:\WINDOWS\system32>streamlink Failed to load plugin livecodingtv: File "c:\program files\python36\lib\imp.py", line 234, in load_module return load_source(name, filename, file) File "c:\program files\python36\lib\imp.py", line 172, in load_source module = _load(spec) File "<frozen importlib._bootstrap>", line 675, in _load File "<frozen importlib._bootstrap>", line 655, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 677, in exec_module File "<frozen importlib._bootstrap>", line 205, in _call_with_frames_removed File "c:\program files\python36\lib\site-packages\streamlink\plugins\livecodingtv.py", line 9, in <module> _url_re = re.compile('http(s)?://(?:\w+.)?\livecoding\.tv') File "c:\program files\python36\lib\re.py", line 233, in compile return _compile(pattern, flags) File "c:\program files\python36\lib\re.py", line 301, in _compile p = sre_compile.compile(pattern, flags) File "c:\program files\python36\lib\sre_compile.py", line 562, in compile p = sre_parse.parse(p, flags) File "c:\program files\python36\lib\sre_parse.py", line 856, in parse p = _parse_sub(source, pattern, flags & SRE_FLAG_VERBOSE, False) File "c:\program files\python36\lib\sre_parse.py", line 415, in _parse_sub itemsappend(_parse(source, state, verbose)) File "c:\program files\python36\lib\sre_parse.py", line 501, in _parse code = _escape(source, this, state) File "c:\program files\python36\lib\sre_parse.py", line 401, in _escape raise source.error("bad escape %s" % escape, len(escape)) sre_constants.error: bad escape \l at position 20 usage: streamlink [OPTIONS] [URL] [STREAM] Use -h/--help to see the available options or read the manual at http://docs.streamlink.io/ C:\WINDOWS\system32>python --version Python 3.6.0b2 ``` </issue> <code> [start of src/streamlink/plugins/livecodingtv.py] 1 import re 2 from streamlink.plugin import Plugin 3 from streamlink.stream import RTMPStream, HTTPStream 4 from streamlink.plugin.api import http 5 6 7 _vod_re = re.compile('\"(http(s)?://.*\.mp4\?t=.*)\"') 8 _rtmp_re = re.compile('rtmp://[^"]+/(?P<channel>\w+)+[^/"]+') 9 _url_re = re.compile('http(s)?://(?:\w+.)?\livecoding\.tv') 10 11 12 class LivecodingTV(Plugin): 13 @classmethod 14 def can_handle_url(cls, url): 15 return _url_re.match(url) 16 17 def _get_streams(self): 18 res = http.get(self.url) 19 match = _rtmp_re.search(res.content.decode('utf-8')) 20 if match: 21 params = { 22 "rtmp": match.group(0), 23 "pageUrl": self.url, 24 "live": True, 25 } 26 yield 'live', RTMPStream(self.session, params) 27 return 28 29 match = _vod_re.search(res.content.decode('utf-8')) 30 if match: 31 yield 'vod', HTTPStream(self.session, match.group(1)) 32 33 __plugin__ = LivecodingTV 34 [end of src/streamlink/plugins/livecodingtv.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/streamlink/plugins/livecodingtv.py b/src/streamlink/plugins/livecodingtv.py --- a/src/streamlink/plugins/livecodingtv.py +++ b/src/streamlink/plugins/livecodingtv.py @@ -1,12 +1,20 @@ import re from streamlink.plugin import Plugin +from streamlink.stream import HLSStream from streamlink.stream import RTMPStream, HTTPStream from streamlink.plugin.api import http -_vod_re = re.compile('\"(http(s)?://.*\.mp4\?t=.*)\"') -_rtmp_re = re.compile('rtmp://[^"]+/(?P<channel>\w+)+[^/"]+') -_url_re = re.compile('http(s)?://(?:\w+.)?\livecoding\.tv') +_streams_re = re.compile(r""" + src:\s+"( + rtmp://.*?\?t=.*?| # RTMP stream + https?://.*?playlist.m3u8.*?\?t=.*?| # HLS stream + https?://.*?manifest.mpd.*?\?t=.*?| # DASH stream + https?://.*?.mp4\?t=.*? # HTTP stream + )".*? + type:\s+"(.*?)" # which stream type it is + """, re.M | re.DOTALL | re.VERBOSE) +_url_re = re.compile(r"http(s)?://(?:\w+\.)?livecoding\.tv") class LivecodingTV(Plugin): @@ -16,18 +24,19 @@ def _get_streams(self): res = http.get(self.url) - match = _rtmp_re.search(res.content.decode('utf-8')) - if match: - params = { - "rtmp": match.group(0), - "pageUrl": self.url, - "live": True, - } - yield 'live', RTMPStream(self.session, params) - return - - match = _vod_re.search(res.content.decode('utf-8')) - if match: - yield 'vod', HTTPStream(self.session, match.group(1)) + match = _streams_re.findall(res.content.decode('utf-8')) + for url, stream_type in match: + if stream_type == "rtmp/mp4" and RTMPStream.is_usable(self.session): + params = { + "rtmp": url, + "pageUrl": self.url, + "live": True, + } + yield 'live', RTMPStream(self.session, params) + elif stream_type == "application/x-mpegURL": + for s in HLSStream.parse_variant_playlist(self.session, url).items(): + yield s + elif stream_type == "video/mp4": + yield 'vod', HTTPStream(self.session, url) __plugin__ = LivecodingTV
{"golden_diff": "diff --git a/src/streamlink/plugins/livecodingtv.py b/src/streamlink/plugins/livecodingtv.py\n--- a/src/streamlink/plugins/livecodingtv.py\n+++ b/src/streamlink/plugins/livecodingtv.py\n@@ -1,12 +1,20 @@\n import re\n from streamlink.plugin import Plugin\n+from streamlink.stream import HLSStream\n from streamlink.stream import RTMPStream, HTTPStream\n from streamlink.plugin.api import http\n \n \n-_vod_re = re.compile('\\\"(http(s)?://.*\\.mp4\\?t=.*)\\\"')\n-_rtmp_re = re.compile('rtmp://[^\"]+/(?P<channel>\\w+)+[^/\"]+')\n-_url_re = re.compile('http(s)?://(?:\\w+.)?\\livecoding\\.tv')\n+_streams_re = re.compile(r\"\"\"\n+ src:\\s+\"(\n+ rtmp://.*?\\?t=.*?| # RTMP stream\n+ https?://.*?playlist.m3u8.*?\\?t=.*?| # HLS stream\n+ https?://.*?manifest.mpd.*?\\?t=.*?| # DASH stream\n+ https?://.*?.mp4\\?t=.*? # HTTP stream\n+ )\".*?\n+ type:\\s+\"(.*?)\" # which stream type it is\n+ \"\"\", re.M | re.DOTALL | re.VERBOSE)\n+_url_re = re.compile(r\"http(s)?://(?:\\w+\\.)?livecoding\\.tv\")\n \n \n class LivecodingTV(Plugin):\n@@ -16,18 +24,19 @@\n \n def _get_streams(self):\n res = http.get(self.url)\n- match = _rtmp_re.search(res.content.decode('utf-8'))\n- if match:\n- params = {\n- \"rtmp\": match.group(0),\n- \"pageUrl\": self.url,\n- \"live\": True,\n- }\n- yield 'live', RTMPStream(self.session, params)\n- return\n-\n- match = _vod_re.search(res.content.decode('utf-8'))\n- if match:\n- yield 'vod', HTTPStream(self.session, match.group(1))\n+ match = _streams_re.findall(res.content.decode('utf-8'))\n+ for url, stream_type in match:\n+ if stream_type == \"rtmp/mp4\" and RTMPStream.is_usable(self.session):\n+ params = {\n+ \"rtmp\": url,\n+ \"pageUrl\": self.url,\n+ \"live\": True,\n+ }\n+ yield 'live', RTMPStream(self.session, params)\n+ elif stream_type == \"application/x-mpegURL\":\n+ for s in HLSStream.parse_variant_playlist(self.session, url).items():\n+ yield s\n+ elif stream_type == \"video/mp4\":\n+ yield 'vod', HTTPStream(self.session, url)\n \n __plugin__ = LivecodingTV\n", "issue": "Plugin LivecodingTV fails to load on Python 3.6.0b2 on Windows 10 x64\nJust running streamlink raises the following error on my system with a fresh install:\n\n```\nC:\\WINDOWS\\system32>streamlink\nFailed to load plugin livecodingtv:\n File \"c:\\program files\\python36\\lib\\imp.py\", line 234, in load_module\n return load_source(name, filename, file)\n File \"c:\\program files\\python36\\lib\\imp.py\", line 172, in load_source\n module = _load(spec)\n File \"<frozen importlib._bootstrap>\", line 675, in _load\n File \"<frozen importlib._bootstrap>\", line 655, in _load_unlocked\n File \"<frozen importlib._bootstrap_external>\", line 677, in exec_module\n File \"<frozen importlib._bootstrap>\", line 205, in _call_with_frames_removed\n File \"c:\\program files\\python36\\lib\\site-packages\\streamlink\\plugins\\livecodingtv.py\", line 9, in <module>\n _url_re = re.compile('http(s)?://(?:\\w+.)?\\livecoding\\.tv')\n File \"c:\\program files\\python36\\lib\\re.py\", line 233, in compile\n return _compile(pattern, flags)\n File \"c:\\program files\\python36\\lib\\re.py\", line 301, in _compile\n p = sre_compile.compile(pattern, flags)\n File \"c:\\program files\\python36\\lib\\sre_compile.py\", line 562, in compile\n p = sre_parse.parse(p, flags)\n File \"c:\\program files\\python36\\lib\\sre_parse.py\", line 856, in parse\n p = _parse_sub(source, pattern, flags & SRE_FLAG_VERBOSE, False)\n File \"c:\\program files\\python36\\lib\\sre_parse.py\", line 415, in _parse_sub\n itemsappend(_parse(source, state, verbose))\n File \"c:\\program files\\python36\\lib\\sre_parse.py\", line 501, in _parse\n code = _escape(source, this, state)\n File \"c:\\program files\\python36\\lib\\sre_parse.py\", line 401, in _escape\n raise source.error(\"bad escape %s\" % escape, len(escape))\nsre_constants.error: bad escape \\l at position 20\n\nusage: streamlink [OPTIONS] [URL] [STREAM]\n\nUse -h/--help to see the available options or read the manual at http://docs.streamlink.io/\n\nC:\\WINDOWS\\system32>python --version\nPython 3.6.0b2\n```\n\n", "before_files": [{"content": "import re\nfrom streamlink.plugin import Plugin\nfrom streamlink.stream import RTMPStream, HTTPStream\nfrom streamlink.plugin.api import http\n\n\n_vod_re = re.compile('\\\"(http(s)?://.*\\.mp4\\?t=.*)\\\"')\n_rtmp_re = re.compile('rtmp://[^\"]+/(?P<channel>\\w+)+[^/\"]+')\n_url_re = re.compile('http(s)?://(?:\\w+.)?\\livecoding\\.tv')\n\n\nclass LivecodingTV(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def _get_streams(self):\n res = http.get(self.url)\n match = _rtmp_re.search(res.content.decode('utf-8'))\n if match:\n params = {\n \"rtmp\": match.group(0),\n \"pageUrl\": self.url,\n \"live\": True,\n }\n yield 'live', RTMPStream(self.session, params)\n return\n\n match = _vod_re.search(res.content.decode('utf-8'))\n if match:\n yield 'vod', HTTPStream(self.session, match.group(1))\n\n__plugin__ = LivecodingTV\n", "path": "src/streamlink/plugins/livecodingtv.py"}]}
1,492
648
gh_patches_debug_651
rasdani/github-patches
git_diff
DataDog__integrations-extras-1031
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Collect clock_time_seconds metric from cert-manager cert-manager v1.5+ exposes a `clock_time` metric which reports the current seconds since the Unix Epoch See: https://github.com/jetstack/cert-manager/pull/4105 It would be useful to collect this metric in DataDog so we can alert on seconds until a given certificate expires </issue> <code> [start of cert_manager/datadog_checks/cert_manager/metrics.py] 1 # (C) Datadog, Inc. 2019-present 2 # All rights reserved 3 # Licensed under a 3-clause BSD style license (see LICENSE) 4 5 CERT_METRICS = { 6 'certmanager_certificate_ready_status': 'certificate.ready_status', 7 'certmanager_certificate_expiration_timestamp_seconds': 'certificate.expiration_timestamp', 8 } 9 10 CONTROLLER_METRICS = { 11 'certmanager_controller_sync_call_count': 'controller.sync_call.count', 12 } 13 14 ACME_METRICS = { 15 'certmanager_http_acme_client_request_count': 'http_acme_client.request.count', 16 'certmanager_http_acme_client_request_duration_seconds': 'http_acme_client.request.duration', 17 } 18 [end of cert_manager/datadog_checks/cert_manager/metrics.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cert_manager/datadog_checks/cert_manager/metrics.py b/cert_manager/datadog_checks/cert_manager/metrics.py --- a/cert_manager/datadog_checks/cert_manager/metrics.py +++ b/cert_manager/datadog_checks/cert_manager/metrics.py @@ -8,6 +8,7 @@ } CONTROLLER_METRICS = { + 'certmanager_clock_time_seconds': 'clock_time', 'certmanager_controller_sync_call_count': 'controller.sync_call.count', }
{"golden_diff": "diff --git a/cert_manager/datadog_checks/cert_manager/metrics.py b/cert_manager/datadog_checks/cert_manager/metrics.py\n--- a/cert_manager/datadog_checks/cert_manager/metrics.py\n+++ b/cert_manager/datadog_checks/cert_manager/metrics.py\n@@ -8,6 +8,7 @@\n }\n \n CONTROLLER_METRICS = {\n+ 'certmanager_clock_time_seconds': 'clock_time',\n 'certmanager_controller_sync_call_count': 'controller.sync_call.count',\n }\n", "issue": "Collect clock_time_seconds metric from cert-manager\ncert-manager v1.5+ exposes a `clock_time` metric which reports the current seconds since the Unix Epoch\r\nSee: https://github.com/jetstack/cert-manager/pull/4105\r\n\r\nIt would be useful to collect this metric in DataDog so we can alert on seconds until a given certificate expires\r\n\n", "before_files": [{"content": "# (C) Datadog, Inc. 2019-present\n# All rights reserved\n# Licensed under a 3-clause BSD style license (see LICENSE)\n\nCERT_METRICS = {\n 'certmanager_certificate_ready_status': 'certificate.ready_status',\n 'certmanager_certificate_expiration_timestamp_seconds': 'certificate.expiration_timestamp',\n}\n\nCONTROLLER_METRICS = {\n 'certmanager_controller_sync_call_count': 'controller.sync_call.count',\n}\n\nACME_METRICS = {\n 'certmanager_http_acme_client_request_count': 'http_acme_client.request.count',\n 'certmanager_http_acme_client_request_duration_seconds': 'http_acme_client.request.duration',\n}\n", "path": "cert_manager/datadog_checks/cert_manager/metrics.py"}]}
797
112
gh_patches_debug_3393
rasdani/github-patches
git_diff
aio-libs__aiohttp-2794
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Chat demo not working in aiohttp 3 ## Long story short Want to learn websockets with example code. There's a lack of simple and runnable example code on the net demonstrating websockets in python. ## Expected behaviour Server runs and page loads ## Actual behaviour Server runs but page fails to load with following error: ``` ~/work/aiohttp/demos/chat$ ~/venv3/bin/python aiohttpdemo_chat/main.py DEBUG:asyncio:Using selector: EpollSelector ======== Running on http://0.0.0.0:8080 ======== (Press CTRL+C to quit) ERROR:aiohttp.server:Error handling request Traceback (most recent call last): File "/home/robin/venv3/lib/python3.6/site-packages/aiohttp/web_protocol.py", line 381, in start resp = await self._request_handler(request) File "/home/robin/venv3/lib/python3.6/site-packages/aiohttp/web_app.py", line 310, in _handle resp = await handler(request) File "/home/robin/venv3/lib/python3.6/site-packages/aiohttpdemo_chat-0.0.1-py3.6.egg/aiohttpdemo_chat/views.py", line 15, in index ok, protocol = resp.can_start(request) AttributeError: 'WebSocketResponse' object has no attribute 'can_start' ``` ## Steps to reproduce Just run `aiohttpdemo_chat/main.py` ## Your environment archlinux, aiohttp 3.0.5 server? </issue> <code> [start of demos/chat/aiohttpdemo_chat/views.py] 1 import json 2 import logging 3 import random 4 import string 5 6 import aiohttp_jinja2 7 from aiohttp import web 8 9 10 log = logging.getLogger(__name__) 11 12 13 async def index(request): 14 resp = web.WebSocketResponse() 15 ok, protocol = resp.can_start(request) 16 if not ok: 17 return aiohttp_jinja2.render_template('index.html', request, {}) 18 19 await resp.prepare(request) 20 name = (random.choice(string.ascii_uppercase) + 21 ''.join(random.sample(string.ascii_lowercase*10, 10))) 22 log.info('%s joined.', name) 23 await resp.send_str(json.dumps({'action': 'connect', 24 'name': name})) 25 for ws in request.app['sockets'].values(): 26 await ws.send_str(json.dumps({'action': 'join', 27 'name': name})) 28 request.app['sockets'][name] = resp 29 30 while True: 31 msg = await resp.receive() 32 33 if msg.type == web.MsgType.text: 34 for ws in request.app['sockets'].values(): 35 if ws is not resp: 36 await ws.send_str(json.dumps({'action': 'sent', 37 'name': name, 38 'text': msg.data})) 39 else: 40 break 41 42 del request.app['sockets'][name] 43 log.info('%s disconnected.', name) 44 for ws in request.app['sockets'].values(): 45 await ws.send_str(json.dumps({'action': 'disconnect', 46 'name': name})) 47 return resp 48 49 50 def setup(app): 51 app.router.add_get('/', index) 52 [end of demos/chat/aiohttpdemo_chat/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/demos/chat/aiohttpdemo_chat/views.py b/demos/chat/aiohttpdemo_chat/views.py --- a/demos/chat/aiohttpdemo_chat/views.py +++ b/demos/chat/aiohttpdemo_chat/views.py @@ -12,8 +12,8 @@ async def index(request): resp = web.WebSocketResponse() - ok, protocol = resp.can_start(request) - if not ok: + is_ws = resp.can_prepare(request) + if not is_ws: return aiohttp_jinja2.render_template('index.html', request, {}) await resp.prepare(request)
{"golden_diff": "diff --git a/demos/chat/aiohttpdemo_chat/views.py b/demos/chat/aiohttpdemo_chat/views.py\n--- a/demos/chat/aiohttpdemo_chat/views.py\n+++ b/demos/chat/aiohttpdemo_chat/views.py\n@@ -12,8 +12,8 @@\n \n async def index(request):\n resp = web.WebSocketResponse()\n- ok, protocol = resp.can_start(request)\n- if not ok:\n+ is_ws = resp.can_prepare(request)\n+ if not is_ws:\n return aiohttp_jinja2.render_template('index.html', request, {})\n \n await resp.prepare(request)\n", "issue": "Chat demo not working in aiohttp 3\n## Long story short\r\n\r\nWant to learn websockets with example code. There's a lack of simple and runnable example code on the net demonstrating websockets in python.\r\n\r\n## Expected behaviour\r\n\r\nServer runs and page loads\r\n\r\n## Actual behaviour\r\n\r\nServer runs but page fails to load with following error:\r\n```\r\n~/work/aiohttp/demos/chat$ ~/venv3/bin/python aiohttpdemo_chat/main.py \r\nDEBUG:asyncio:Using selector: EpollSelector\r\n======== Running on http://0.0.0.0:8080 ========\r\n(Press CTRL+C to quit)\r\nERROR:aiohttp.server:Error handling request\r\nTraceback (most recent call last):\r\n File \"/home/robin/venv3/lib/python3.6/site-packages/aiohttp/web_protocol.py\", line 381, in start\r\n resp = await self._request_handler(request)\r\n File \"/home/robin/venv3/lib/python3.6/site-packages/aiohttp/web_app.py\", line 310, in _handle\r\n resp = await handler(request)\r\n File \"/home/robin/venv3/lib/python3.6/site-packages/aiohttpdemo_chat-0.0.1-py3.6.egg/aiohttpdemo_chat/views.py\", line 15, in index\r\n ok, protocol = resp.can_start(request)\r\nAttributeError: 'WebSocketResponse' object has no attribute 'can_start'\r\n```\r\n\r\n## Steps to reproduce\r\n\r\nJust run `aiohttpdemo_chat/main.py`\r\n\r\n## Your environment\r\n\r\narchlinux, aiohttp 3.0.5 server?\r\n\n", "before_files": [{"content": "import json\nimport logging\nimport random\nimport string\n\nimport aiohttp_jinja2\nfrom aiohttp import web\n\n\nlog = logging.getLogger(__name__)\n\n\nasync def index(request):\n resp = web.WebSocketResponse()\n ok, protocol = resp.can_start(request)\n if not ok:\n return aiohttp_jinja2.render_template('index.html', request, {})\n\n await resp.prepare(request)\n name = (random.choice(string.ascii_uppercase) +\n ''.join(random.sample(string.ascii_lowercase*10, 10)))\n log.info('%s joined.', name)\n await resp.send_str(json.dumps({'action': 'connect',\n 'name': name}))\n for ws in request.app['sockets'].values():\n await ws.send_str(json.dumps({'action': 'join',\n 'name': name}))\n request.app['sockets'][name] = resp\n\n while True:\n msg = await resp.receive()\n\n if msg.type == web.MsgType.text:\n for ws in request.app['sockets'].values():\n if ws is not resp:\n await ws.send_str(json.dumps({'action': 'sent',\n 'name': name,\n 'text': msg.data}))\n else:\n break\n\n del request.app['sockets'][name]\n log.info('%s disconnected.', name)\n for ws in request.app['sockets'].values():\n await ws.send_str(json.dumps({'action': 'disconnect',\n 'name': name}))\n return resp\n\n\ndef setup(app):\n app.router.add_get('/', index)\n", "path": "demos/chat/aiohttpdemo_chat/views.py"}]}
1,313
136
gh_patches_debug_69
rasdani/github-patches
git_diff
d2l-ai__d2l-en-2256
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ModuleNotFoundError when running the official pytorch colab notebook ![image](https://user-images.githubusercontent.com/33608782/184685789-951935c7-1725-4a23-944d-8d48b32bf76c.png) I can replicate the error at multiple official pytorch colab notebooks, e.g. https://colab.research.google.com/github/d2l-ai/d2l-pytorch-colab/blob/master/chapter_linear-classification/image-classification-dataset.ipynb#scrollTo=ee445cce </issue> <code> [start of setup.py] 1 from setuptools import setup, find_packages 2 import d2l 3 4 requirements = [ 5 'jupyter', 6 'numpy', 7 'matplotlib', 8 'requests', 9 'pandas', 10 'gym' 11 ] 12 13 setup( 14 name='d2l', 15 version=d2l.__version__, 16 python_requires='>=3.5', 17 author='D2L Developers', 18 author_email='[email protected]', 19 url='https://d2l.ai', 20 description='Dive into Deep Learning', 21 license='MIT-0', 22 packages=find_packages(), 23 zip_safe=True, 24 install_requires=requirements, 25 ) 26 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -2,6 +2,7 @@ import d2l requirements = [ + 'ipython>=7.23', 'jupyter', 'numpy', 'matplotlib',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -2,6 +2,7 @@\n import d2l\n \n requirements = [\n+ 'ipython>=7.23',\n 'jupyter',\n 'numpy',\n 'matplotlib',\n", "issue": "ModuleNotFoundError when running the official pytorch colab notebook\n![image](https://user-images.githubusercontent.com/33608782/184685789-951935c7-1725-4a23-944d-8d48b32bf76c.png)\r\n\r\nI can replicate the error at multiple official pytorch colab notebooks, e.g. \r\n\r\nhttps://colab.research.google.com/github/d2l-ai/d2l-pytorch-colab/blob/master/chapter_linear-classification/image-classification-dataset.ipynb#scrollTo=ee445cce\r\n\r\n\r\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nimport d2l\n\nrequirements = [\n 'jupyter',\n 'numpy',\n 'matplotlib',\n 'requests',\n 'pandas',\n 'gym'\n]\n\nsetup(\n name='d2l',\n version=d2l.__version__,\n python_requires='>=3.5',\n author='D2L Developers',\n author_email='[email protected]',\n url='https://d2l.ai',\n description='Dive into Deep Learning',\n license='MIT-0',\n packages=find_packages(),\n zip_safe=True,\n install_requires=requirements,\n)\n", "path": "setup.py"}]}
853
63
gh_patches_debug_49036
rasdani/github-patches
git_diff
facebookresearch__hydra-1960
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [CI] `mypy` fails for optuna sweeper on main Turns out it was caused by a newly released version of `alembic` and it has been fixed in optuna https://github.com/optuna/optuna/pull/2887 </issue> <code> [start of plugins/hydra_optuna_sweeper/setup.py] 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 # type: ignore 3 from pathlib import Path 4 5 from read_version import read_version 6 from setuptools import find_namespace_packages, setup 7 8 setup( 9 name="hydra-optuna-sweeper", 10 version=read_version("hydra_plugins/hydra_optuna_sweeper", "__init__.py"), 11 author="Toshihiko Yanase, Hiroyuki Vincent Yamazaki", 12 author_email="[email protected], [email protected]", 13 description="Hydra Optuna Sweeper plugin", 14 long_description=(Path(__file__).parent / "README.md").read_text(), 15 long_description_content_type="text/markdown", 16 url="https://github.com/facebookresearch/hydra/", 17 packages=find_namespace_packages(include=["hydra_plugins.*"]), 18 classifiers=[ 19 "License :: OSI Approved :: MIT License", 20 "Programming Language :: Python :: 3.6", 21 "Programming Language :: Python :: 3.7", 22 "Programming Language :: Python :: 3.8", 23 "Programming Language :: Python :: 3.9", 24 "Operating System :: POSIX :: Linux", 25 "Operating System :: MacOS", 26 "Development Status :: 4 - Beta", 27 ], 28 install_requires=[ 29 "hydra-core>=1.1.0.dev7", 30 "optuna>=2.5.0", 31 "alembic<1.7.0", # https://github.com/facebookresearch/hydra/issues/1806 32 ], 33 include_package_data=True, 34 ) 35 [end of plugins/hydra_optuna_sweeper/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plugins/hydra_optuna_sweeper/setup.py b/plugins/hydra_optuna_sweeper/setup.py --- a/plugins/hydra_optuna_sweeper/setup.py +++ b/plugins/hydra_optuna_sweeper/setup.py @@ -27,8 +27,7 @@ ], install_requires=[ "hydra-core>=1.1.0.dev7", - "optuna>=2.5.0", - "alembic<1.7.0", # https://github.com/facebookresearch/hydra/issues/1806 + "optuna>=2.10.0", ], include_package_data=True, )
{"golden_diff": "diff --git a/plugins/hydra_optuna_sweeper/setup.py b/plugins/hydra_optuna_sweeper/setup.py\n--- a/plugins/hydra_optuna_sweeper/setup.py\n+++ b/plugins/hydra_optuna_sweeper/setup.py\n@@ -27,8 +27,7 @@\n ],\n install_requires=[\n \"hydra-core>=1.1.0.dev7\",\n- \"optuna>=2.5.0\",\n- \"alembic<1.7.0\", # https://github.com/facebookresearch/hydra/issues/1806\n+ \"optuna>=2.10.0\",\n ],\n include_package_data=True,\n )\n", "issue": "[CI] `mypy` fails for optuna sweeper on main\nTurns out it was caused by a newly released version of `alembic` and it has been fixed in optuna https://github.com/optuna/optuna/pull/2887\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# type: ignore\nfrom pathlib import Path\n\nfrom read_version import read_version\nfrom setuptools import find_namespace_packages, setup\n\nsetup(\n name=\"hydra-optuna-sweeper\",\n version=read_version(\"hydra_plugins/hydra_optuna_sweeper\", \"__init__.py\"),\n author=\"Toshihiko Yanase, Hiroyuki Vincent Yamazaki\",\n author_email=\"[email protected], [email protected]\",\n description=\"Hydra Optuna Sweeper plugin\",\n long_description=(Path(__file__).parent / \"README.md\").read_text(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/hydra/\",\n packages=find_namespace_packages(include=[\"hydra_plugins.*\"]),\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS\",\n \"Development Status :: 4 - Beta\",\n ],\n install_requires=[\n \"hydra-core>=1.1.0.dev7\",\n \"optuna>=2.5.0\",\n \"alembic<1.7.0\", # https://github.com/facebookresearch/hydra/issues/1806\n ],\n include_package_data=True,\n)\n", "path": "plugins/hydra_optuna_sweeper/setup.py"}]}
1,012
154
gh_patches_debug_14477
rasdani/github-patches
git_diff
Kinto__kinto-891
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add user principals to hello view Along `user` info, show the list of user principals (for groups notably) </issue> <code> [start of kinto/core/views/hello.py] 1 from pyramid.security import NO_PERMISSION_REQUIRED, Authenticated 2 3 from kinto.core import Service 4 5 hello = Service(name="hello", path='/', description="Welcome") 6 7 8 @hello.get(permission=NO_PERMISSION_REQUIRED) 9 def get_hello(request): 10 """Return information regarding the current instance.""" 11 settings = request.registry.settings 12 project_name = settings['project_name'] 13 project_version = settings['project_version'] 14 data = dict( 15 project_name=project_name, 16 project_version=project_version, 17 http_api_version=settings['http_api_version'], 18 project_docs=settings['project_docs'], 19 url=request.route_url(hello.name) 20 ) 21 22 eos = get_eos(request) 23 if eos: 24 data['eos'] = eos 25 26 data['settings'] = {} 27 public_settings = request.registry.public_settings 28 for setting in list(public_settings): 29 data['settings'][setting] = settings[setting] 30 31 # If current user is authenticated, add user info: 32 # (Note: this will call authenticated_userid() with multiauth+groupfinder) 33 if Authenticated in request.effective_principals: 34 data['user'] = request.get_user_info() 35 36 # Application can register and expose arbitrary capabilities. 37 data['capabilities'] = request.registry.api_capabilities 38 39 return data 40 41 42 def get_eos(request): 43 return request.registry.settings['eos'] 44 [end of kinto/core/views/hello.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kinto/core/views/hello.py b/kinto/core/views/hello.py --- a/kinto/core/views/hello.py +++ b/kinto/core/views/hello.py @@ -32,6 +32,12 @@ # (Note: this will call authenticated_userid() with multiauth+groupfinder) if Authenticated in request.effective_principals: data['user'] = request.get_user_info() + principals = request.effective_principals + [request.prefixed_userid] + prefix, user_id = request.prefixed_userid.split(':', 1) + # Remove unprefixed user id on effective_principals to avoid conflicts. + if user_id in principals: + principals.remove(user_id) + data['user']['principals'] = principals # Application can register and expose arbitrary capabilities. data['capabilities'] = request.registry.api_capabilities
{"golden_diff": "diff --git a/kinto/core/views/hello.py b/kinto/core/views/hello.py\n--- a/kinto/core/views/hello.py\n+++ b/kinto/core/views/hello.py\n@@ -32,6 +32,12 @@\n # (Note: this will call authenticated_userid() with multiauth+groupfinder)\n if Authenticated in request.effective_principals:\n data['user'] = request.get_user_info()\n+ principals = request.effective_principals + [request.prefixed_userid]\n+ prefix, user_id = request.prefixed_userid.split(':', 1)\n+ # Remove unprefixed user id on effective_principals to avoid conflicts.\n+ if user_id in principals:\n+ principals.remove(user_id)\n+ data['user']['principals'] = principals\n \n # Application can register and expose arbitrary capabilities.\n data['capabilities'] = request.registry.api_capabilities\n", "issue": "Add user principals to hello view\nAlong `user` info, show the list of user principals (for groups notably)\n\n", "before_files": [{"content": "from pyramid.security import NO_PERMISSION_REQUIRED, Authenticated\n\nfrom kinto.core import Service\n\nhello = Service(name=\"hello\", path='/', description=\"Welcome\")\n\n\[email protected](permission=NO_PERMISSION_REQUIRED)\ndef get_hello(request):\n \"\"\"Return information regarding the current instance.\"\"\"\n settings = request.registry.settings\n project_name = settings['project_name']\n project_version = settings['project_version']\n data = dict(\n project_name=project_name,\n project_version=project_version,\n http_api_version=settings['http_api_version'],\n project_docs=settings['project_docs'],\n url=request.route_url(hello.name)\n )\n\n eos = get_eos(request)\n if eos:\n data['eos'] = eos\n\n data['settings'] = {}\n public_settings = request.registry.public_settings\n for setting in list(public_settings):\n data['settings'][setting] = settings[setting]\n\n # If current user is authenticated, add user info:\n # (Note: this will call authenticated_userid() with multiauth+groupfinder)\n if Authenticated in request.effective_principals:\n data['user'] = request.get_user_info()\n\n # Application can register and expose arbitrary capabilities.\n data['capabilities'] = request.registry.api_capabilities\n\n return data\n\n\ndef get_eos(request):\n return request.registry.settings['eos']\n", "path": "kinto/core/views/hello.py"}]}
931
196
gh_patches_debug_16034
rasdani/github-patches
git_diff
dask__distributed-636
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cannot serialize datetime64/timedelta64 numpy arrays ``` distributed.core - INFO - Unserializable Message: {'finalize-c29ed21fd1e79d8eb6b3ff89cf2ae354': <Serialize: ['2010-01-01T00:00:00.000000000' '2010-01-02T00:00:00.000000000' '2010-01-03T00:00:00.000000000']>} distributed.core - ERROR - cannot include dtype 'M' in a buffer Traceback (most recent call last): File "/Users/shoyer/dev/distributed/distributed/core.py", line 228, in write frames = protocol.dumps(msg) File "/Users/shoyer/dev/distributed/distributed/protocol/core.py", line 43, in dumps for key, value in data.items() File "/Users/shoyer/dev/distributed/distributed/protocol/core.py", line 44, in <dictcomp> if type(value) is Serialize} File "/Users/shoyer/dev/distributed/distributed/protocol/serialize.py", line 103, in serialize header, frames = serializers[name](x) File "/Users/shoyer/dev/distributed/distributed/protocol/numpy.py", line 58, in serialize_numpy_ndarray frames = [x.data] ValueError: cannot include dtype 'M' in a buffer ``` The buffer protocol fails on these (sigh). You can view as int64, but then you'll need to keep track of the original type (including time units) separately. This came up in some of my integration tests with xarray. </issue> <code> [start of distributed/protocol/numpy.py] 1 from __future__ import print_function, division, absolute_import 2 3 import sys 4 5 import numpy as np 6 7 try: 8 import blosc 9 n = blosc.set_nthreads(2) 10 except ImportError: 11 blosc = False 12 13 from .utils import frame_split_size 14 from .serialize import register_serialization 15 from . import pickle 16 17 from ..utils import log_errors, ensure_bytes 18 19 20 def itemsize(dt): 21 """ Itemsize of dtype 22 23 Try to return the itemsize of the base element, return 8 as a fallback 24 """ 25 result = dt.base.itemsize 26 if result > 255: 27 result = 8 28 return result 29 30 31 def serialize_numpy_ndarray(x): 32 if x.dtype.hasobject: 33 header = {'pickle': True} 34 frames = [pickle.dumps(x)] 35 return header, frames 36 37 size = itemsize(x.dtype) 38 39 if x.dtype.kind == 'V': 40 dt = x.dtype.descr 41 else: 42 dt = x.dtype.str 43 44 x = np.ascontiguousarray(x) # np.frombuffer requires this 45 46 header = {'dtype': dt, 47 'strides': x.strides, 48 'shape': x.shape} 49 50 if blosc: 51 frames = frame_split_size([x.data]) 52 if sys.version_info.major == 2: 53 frames = [ensure_bytes(frame) for frame in frames] 54 frames = [blosc.compress(frame, typesize=size, 55 cname='lz4', clevel=5) for frame in frames] 56 header['compression'] = ['blosc'] * len(frames) 57 else: 58 frames = [x.data] 59 60 header['lengths'] = [x.nbytes] 61 62 return header, frames 63 64 65 def deserialize_numpy_ndarray(header, frames): 66 with log_errors(): 67 assert len(frames) == 1 68 69 if header.get('pickle'): 70 return pickle.loads(frames[0]) 71 72 dt = header['dtype'] 73 if isinstance(dt, tuple): 74 dt = list(dt) 75 dt = np.dtype(dt) 76 77 buffer = frames[0] 78 79 x = np.frombuffer(buffer, dt) 80 x = np.lib.stride_tricks.as_strided(x, header['shape'], header['strides']) 81 82 return x 83 84 85 register_serialization(np.ndarray, serialize_numpy_ndarray, deserialize_numpy_ndarray) 86 [end of distributed/protocol/numpy.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/distributed/protocol/numpy.py b/distributed/protocol/numpy.py --- a/distributed/protocol/numpy.py +++ b/distributed/protocol/numpy.py @@ -47,15 +47,17 @@ 'strides': x.strides, 'shape': x.shape} + data = x.view('u1').data + if blosc: - frames = frame_split_size([x.data]) + frames = frame_split_size([data]) if sys.version_info.major == 2: frames = [ensure_bytes(frame) for frame in frames] frames = [blosc.compress(frame, typesize=size, cname='lz4', clevel=5) for frame in frames] header['compression'] = ['blosc'] * len(frames) else: - frames = [x.data] + frames = [data] header['lengths'] = [x.nbytes]
{"golden_diff": "diff --git a/distributed/protocol/numpy.py b/distributed/protocol/numpy.py\n--- a/distributed/protocol/numpy.py\n+++ b/distributed/protocol/numpy.py\n@@ -47,15 +47,17 @@\n 'strides': x.strides,\n 'shape': x.shape}\n \n+ data = x.view('u1').data\n+\n if blosc:\n- frames = frame_split_size([x.data])\n+ frames = frame_split_size([data])\n if sys.version_info.major == 2:\n frames = [ensure_bytes(frame) for frame in frames]\n frames = [blosc.compress(frame, typesize=size,\n cname='lz4', clevel=5) for frame in frames]\n header['compression'] = ['blosc'] * len(frames)\n else:\n- frames = [x.data]\n+ frames = [data]\n \n header['lengths'] = [x.nbytes]\n", "issue": "Cannot serialize datetime64/timedelta64 numpy arrays\n```\r\ndistributed.core - INFO - Unserializable Message: {'finalize-c29ed21fd1e79d8eb6b3ff89cf2ae354': <Serialize: ['2010-01-01T00:00:00.000000000' '2010-01-02T00:00:00.000000000'\r\n '2010-01-03T00:00:00.000000000']>}\r\ndistributed.core - ERROR - cannot include dtype 'M' in a buffer\r\nTraceback (most recent call last):\r\n File \"/Users/shoyer/dev/distributed/distributed/core.py\", line 228, in write\r\n frames = protocol.dumps(msg)\r\n File \"/Users/shoyer/dev/distributed/distributed/protocol/core.py\", line 43, in dumps\r\n for key, value in data.items()\r\n File \"/Users/shoyer/dev/distributed/distributed/protocol/core.py\", line 44, in <dictcomp>\r\n if type(value) is Serialize}\r\n File \"/Users/shoyer/dev/distributed/distributed/protocol/serialize.py\", line 103, in serialize\r\n header, frames = serializers[name](x)\r\n File \"/Users/shoyer/dev/distributed/distributed/protocol/numpy.py\", line 58, in serialize_numpy_ndarray\r\n frames = [x.data]\r\nValueError: cannot include dtype 'M' in a buffer\r\n```\r\n\r\nThe buffer protocol fails on these (sigh). You can view as int64, but then you'll need to keep track of the original type (including time units) separately.\r\n\r\nThis came up in some of my integration tests with xarray.\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport sys\n\nimport numpy as np\n\ntry:\n import blosc\n n = blosc.set_nthreads(2)\nexcept ImportError:\n blosc = False\n\nfrom .utils import frame_split_size\nfrom .serialize import register_serialization\nfrom . import pickle\n\nfrom ..utils import log_errors, ensure_bytes\n\n\ndef itemsize(dt):\n \"\"\" Itemsize of dtype\n\n Try to return the itemsize of the base element, return 8 as a fallback\n \"\"\"\n result = dt.base.itemsize\n if result > 255:\n result = 8\n return result\n\n\ndef serialize_numpy_ndarray(x):\n if x.dtype.hasobject:\n header = {'pickle': True}\n frames = [pickle.dumps(x)]\n return header, frames\n\n size = itemsize(x.dtype)\n\n if x.dtype.kind == 'V':\n dt = x.dtype.descr\n else:\n dt = x.dtype.str\n\n x = np.ascontiguousarray(x) # np.frombuffer requires this\n\n header = {'dtype': dt,\n 'strides': x.strides,\n 'shape': x.shape}\n\n if blosc:\n frames = frame_split_size([x.data])\n if sys.version_info.major == 2:\n frames = [ensure_bytes(frame) for frame in frames]\n frames = [blosc.compress(frame, typesize=size,\n cname='lz4', clevel=5) for frame in frames]\n header['compression'] = ['blosc'] * len(frames)\n else:\n frames = [x.data]\n\n header['lengths'] = [x.nbytes]\n\n return header, frames\n\n\ndef deserialize_numpy_ndarray(header, frames):\n with log_errors():\n assert len(frames) == 1\n\n if header.get('pickle'):\n return pickle.loads(frames[0])\n\n dt = header['dtype']\n if isinstance(dt, tuple):\n dt = list(dt)\n dt = np.dtype(dt)\n\n buffer = frames[0]\n\n x = np.frombuffer(buffer, dt)\n x = np.lib.stride_tricks.as_strided(x, header['shape'], header['strides'])\n\n return x\n\n\nregister_serialization(np.ndarray, serialize_numpy_ndarray, deserialize_numpy_ndarray)\n", "path": "distributed/protocol/numpy.py"}]}
1,625
211
gh_patches_debug_14829
rasdani/github-patches
git_diff
mozilla__pontoon-2330
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [notifications] Notify project Translators when suggestions are submitted *This issue was created automatically by a [script](https://github.com/mathjazz/bugzilla2github/).* ## [Bug 1398861](https://bugzilla.mozilla.org/show_bug.cgi?id=1398861) Bug Reporter: @mathjazz CC: @MikkCZ, [email protected] See also: https://bugzilla.mozilla.org/show_bug.cgi?id=1625102 We should probably do that periodically and aggregate notifications. </issue> <code> [start of pontoon/projects/management/commands/send_suggestion_notifications.py] 1 import calendar 2 3 from collections import defaultdict 4 from datetime import timedelta 5 from functools import cached_property 6 7 from django.conf import settings 8 from django.contrib.auth.models import User 9 from django.core.management.base import BaseCommand, CommandError 10 from django.db.models import Prefetch, Q 11 from django.template.loader import render_to_string 12 from django.utils import timezone 13 from notifications.signals import notify 14 15 from pontoon.base.models import Comment, Locale, ProjectLocale, Translation 16 17 18 class Command(BaseCommand): 19 help = "Notify contributors about newly added unreviewed suggestions" 20 21 def add_arguments(self, parser): 22 parser.add_argument( 23 "--force", 24 action="store_true", 25 dest="force", 26 default=False, 27 help="Force run command, regardless of what day of the week it is", 28 ) 29 30 @cached_property 31 def locale_reviewers(self): 32 locales = Locale.objects.prefetch_related( 33 Prefetch("managers_group__user_set", to_attr="fetched_managers"), 34 Prefetch("translators_group__user_set", to_attr="fetched_translators"), 35 ) 36 37 locale_reviewers = {} 38 39 for locale in locales: 40 managers = locale.managers_group.fetched_managers 41 translators = locale.translators_group.fetched_translators 42 locale_reviewers[locale] = managers + translators 43 44 return locale_reviewers 45 46 def extract_notifications_data(self, data, suggestion): 47 locale = suggestion.locale 48 entity = suggestion.entity 49 project = entity.resource.project 50 project_locale = ProjectLocale.objects.get(project=project, locale=locale) 51 52 translations = Translation.objects.filter(entity=entity, locale=locale) 53 recipients = set() 54 55 # Users with permission to review suggestions 56 recipients = recipients.union(self.locale_reviewers[locale]) 57 58 # Authors of previous translations of the same string 59 recipients = recipients.union(User.objects.filter(translation__in=translations)) 60 61 # Authors of comments of previous translations 62 translations_comments = Comment.objects.filter(translation__in=translations) 63 recipients = recipients.union( 64 User.objects.filter(comment__in=translations_comments) 65 ) 66 67 # Authors of team comments of the same string 68 team_comments = Comment.objects.filter(entity=entity, locale=locale) 69 recipients = recipients.union(User.objects.filter(comment__in=team_comments)) 70 71 for recipient in recipients: 72 data[recipient].add(project_locale) 73 74 def get_suggestions(self): 75 start = timezone.now() - timedelta(days=7) 76 77 return Translation.objects.filter( 78 approved=False, rejected=False, fuzzy=False 79 ).filter( 80 Q(date__gt=start) 81 | Q(unapproved_date__gt=start) 82 | Q(unrejected_date__gt=start) 83 ) 84 85 def handle(self, *args, **options): 86 """ 87 This command sends notifications about newly created unreviewed suggestions that 88 were submitted, unapproved or unrejected in the last 7 days. Recipients of 89 notifications are users with permission to review them, as well as authors of 90 previous translations or comments of the same string. 91 92 The command is designed to run on a weekly basis. 93 """ 94 self.stdout.write("Sending suggestion notifications.") 95 96 today = calendar.day_name[timezone.datetime.today().weekday()] 97 day = calendar.day_name[settings.SUGGESTION_NOTIFICATIONS_DAY] 98 99 if today != day and not options["force"]: 100 raise CommandError(f"Skipping. Command runs every {day}. Today is {today}.") 101 102 suggestions = self.get_suggestions() 103 104 data = defaultdict(set) 105 106 for suggestion in suggestions: 107 self.extract_notifications_data(data, suggestion) 108 109 for recipient, project_locales in data.items(): 110 verb = render_to_string( 111 "projects/suggestion_notification.jinja", 112 {"project_locales": project_locales}, 113 ) 114 115 notify.send(recipient, recipient=recipient, verb=verb) 116 117 self.stdout.write("Suggestion notifications sent.") 118 [end of pontoon/projects/management/commands/send_suggestion_notifications.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pontoon/projects/management/commands/send_suggestion_notifications.py b/pontoon/projects/management/commands/send_suggestion_notifications.py --- a/pontoon/projects/management/commands/send_suggestion_notifications.py +++ b/pontoon/projects/management/commands/send_suggestion_notifications.py @@ -107,11 +107,13 @@ self.extract_notifications_data(data, suggestion) for recipient, project_locales in data.items(): - verb = render_to_string( + description = render_to_string( "projects/suggestion_notification.jinja", {"project_locales": project_locales}, ) - notify.send(recipient, recipient=recipient, verb=verb) + notify.send( + recipient, recipient=recipient, verb="", description=description + ) - self.stdout.write("Suggestion notifications sent.") + self.stdout.write(f"Suggestion notifications sent to {len(data)} users.")
{"golden_diff": "diff --git a/pontoon/projects/management/commands/send_suggestion_notifications.py b/pontoon/projects/management/commands/send_suggestion_notifications.py\n--- a/pontoon/projects/management/commands/send_suggestion_notifications.py\n+++ b/pontoon/projects/management/commands/send_suggestion_notifications.py\n@@ -107,11 +107,13 @@\n self.extract_notifications_data(data, suggestion)\n \n for recipient, project_locales in data.items():\n- verb = render_to_string(\n+ description = render_to_string(\n \"projects/suggestion_notification.jinja\",\n {\"project_locales\": project_locales},\n )\n \n- notify.send(recipient, recipient=recipient, verb=verb)\n+ notify.send(\n+ recipient, recipient=recipient, verb=\"\", description=description\n+ )\n \n- self.stdout.write(\"Suggestion notifications sent.\")\n+ self.stdout.write(f\"Suggestion notifications sent to {len(data)} users.\")\n", "issue": "[notifications] Notify project Translators when suggestions are submitted\n*This issue was created automatically by a [script](https://github.com/mathjazz/bugzilla2github/).*\n## [Bug 1398861](https://bugzilla.mozilla.org/show_bug.cgi?id=1398861)\nBug Reporter: @mathjazz\nCC: @MikkCZ, [email protected]\nSee also: https://bugzilla.mozilla.org/show_bug.cgi?id=1625102\n\nWe should probably do that periodically and aggregate notifications.\n", "before_files": [{"content": "import calendar\n\nfrom collections import defaultdict\nfrom datetime import timedelta\nfrom functools import cached_property\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.db.models import Prefetch, Q\nfrom django.template.loader import render_to_string\nfrom django.utils import timezone\nfrom notifications.signals import notify\n\nfrom pontoon.base.models import Comment, Locale, ProjectLocale, Translation\n\n\nclass Command(BaseCommand):\n help = \"Notify contributors about newly added unreviewed suggestions\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--force\",\n action=\"store_true\",\n dest=\"force\",\n default=False,\n help=\"Force run command, regardless of what day of the week it is\",\n )\n\n @cached_property\n def locale_reviewers(self):\n locales = Locale.objects.prefetch_related(\n Prefetch(\"managers_group__user_set\", to_attr=\"fetched_managers\"),\n Prefetch(\"translators_group__user_set\", to_attr=\"fetched_translators\"),\n )\n\n locale_reviewers = {}\n\n for locale in locales:\n managers = locale.managers_group.fetched_managers\n translators = locale.translators_group.fetched_translators\n locale_reviewers[locale] = managers + translators\n\n return locale_reviewers\n\n def extract_notifications_data(self, data, suggestion):\n locale = suggestion.locale\n entity = suggestion.entity\n project = entity.resource.project\n project_locale = ProjectLocale.objects.get(project=project, locale=locale)\n\n translations = Translation.objects.filter(entity=entity, locale=locale)\n recipients = set()\n\n # Users with permission to review suggestions\n recipients = recipients.union(self.locale_reviewers[locale])\n\n # Authors of previous translations of the same string\n recipients = recipients.union(User.objects.filter(translation__in=translations))\n\n # Authors of comments of previous translations\n translations_comments = Comment.objects.filter(translation__in=translations)\n recipients = recipients.union(\n User.objects.filter(comment__in=translations_comments)\n )\n\n # Authors of team comments of the same string\n team_comments = Comment.objects.filter(entity=entity, locale=locale)\n recipients = recipients.union(User.objects.filter(comment__in=team_comments))\n\n for recipient in recipients:\n data[recipient].add(project_locale)\n\n def get_suggestions(self):\n start = timezone.now() - timedelta(days=7)\n\n return Translation.objects.filter(\n approved=False, rejected=False, fuzzy=False\n ).filter(\n Q(date__gt=start)\n | Q(unapproved_date__gt=start)\n | Q(unrejected_date__gt=start)\n )\n\n def handle(self, *args, **options):\n \"\"\"\n This command sends notifications about newly created unreviewed suggestions that\n were submitted, unapproved or unrejected in the last 7 days. Recipients of\n notifications are users with permission to review them, as well as authors of\n previous translations or comments of the same string.\n\n The command is designed to run on a weekly basis.\n \"\"\"\n self.stdout.write(\"Sending suggestion notifications.\")\n\n today = calendar.day_name[timezone.datetime.today().weekday()]\n day = calendar.day_name[settings.SUGGESTION_NOTIFICATIONS_DAY]\n\n if today != day and not options[\"force\"]:\n raise CommandError(f\"Skipping. Command runs every {day}. Today is {today}.\")\n\n suggestions = self.get_suggestions()\n\n data = defaultdict(set)\n\n for suggestion in suggestions:\n self.extract_notifications_data(data, suggestion)\n\n for recipient, project_locales in data.items():\n verb = render_to_string(\n \"projects/suggestion_notification.jinja\",\n {\"project_locales\": project_locales},\n )\n\n notify.send(recipient, recipient=recipient, verb=verb)\n\n self.stdout.write(\"Suggestion notifications sent.\")\n", "path": "pontoon/projects/management/commands/send_suggestion_notifications.py"}]}
1,759
206
gh_patches_debug_9202
rasdani/github-patches
git_diff
speechbrain__speechbrain-2494
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> PyPI install incorrectly ships a `tests` package ### Describe the bug If you check the `.whl` file of SB 1.0, for instance, you will notice that there is an empty `tests/` (with only the `__init__.py`). This is not a huge deal, but it has the unintended consequence that the user now has a `tests` package at their root level. ### Expected behaviour That package should not exist on normal or editable installs. ### To Reproduce ```python import tests ``` ```bash $ cat env/lib/python3.xx/speechbrain-xxx.dist-info/top_level.txt speechbrain tests ``` ### Environment Details _No response_ ### Relevant Log Output _No response_ ### Additional Context This is probably due to the way packages are specified in the `setup.py`. Does not seem like it would be linked to #2484 but it's worth investigating anyway. </issue> <code> [start of setup.py] 1 #!/usr/bin/env python3 2 import os 3 import sys 4 import site 5 import setuptools 6 from distutils.core import setup 7 8 9 # Editable install in user site directory can be allowed with this hack: 10 # https://github.com/pypa/pip/issues/7953. 11 site.ENABLE_USER_SITE = "--user" in sys.argv[1:] 12 13 with open("README.md") as f: 14 long_description = f.read() 15 16 with open(os.path.join("speechbrain", "version.txt")) as f: 17 version = f.read().strip() 18 19 setup( 20 name="speechbrain", 21 version=version, 22 description="All-in-one speech toolkit in pure Python and Pytorch", 23 long_description=long_description, 24 long_description_content_type="text/markdown", 25 author="Mirco Ravanelli & Others", 26 author_email="[email protected]", 27 classifiers=[ 28 "Programming Language :: Python :: 3", 29 "License :: OSI Approved :: Apache Software License", 30 ], 31 packages=setuptools.find_packages(), 32 package_data={"speechbrain": ["version.txt", "log-config.yaml"]}, 33 install_requires=[ 34 "hyperpyyaml", 35 "joblib", 36 "numpy", 37 "packaging", 38 "scipy", 39 "sentencepiece", 40 "torch>=1.9", 41 "torchaudio", 42 "tqdm", 43 "huggingface_hub", 44 ], 45 python_requires=">=3.8", 46 url="https://speechbrain.github.io/", 47 ) 48 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -28,7 +28,9 @@ "Programming Language :: Python :: 3", "License :: OSI Approved :: Apache Software License", ], - packages=setuptools.find_packages(), + # we don't want to ship the tests package. for future proofing, also + # exclude any tests subpackage (if we ever define __init__.py there) + packages=setuptools.find_packages(exclude=["tests", "tests.*"]), package_data={"speechbrain": ["version.txt", "log-config.yaml"]}, install_requires=[ "hyperpyyaml",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -28,7 +28,9 @@\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: Apache Software License\",\n ],\n- packages=setuptools.find_packages(),\n+ # we don't want to ship the tests package. for future proofing, also\n+ # exclude any tests subpackage (if we ever define __init__.py there)\n+ packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"speechbrain\": [\"version.txt\", \"log-config.yaml\"]},\n install_requires=[\n \"hyperpyyaml\",\n", "issue": "PyPI install incorrectly ships a `tests` package\n### Describe the bug\r\n\r\nIf you check the `.whl` file of SB 1.0, for instance, you will notice that there is an empty `tests/` (with only the `__init__.py`). This is not a huge deal, but it has the unintended consequence that the user now has a `tests` package at their root level.\r\n\r\n### Expected behaviour\r\n\r\nThat package should not exist on normal or editable installs.\r\n\r\n### To Reproduce\r\n\r\n```python\r\nimport tests\r\n```\r\n\r\n```bash\r\n$ cat env/lib/python3.xx/speechbrain-xxx.dist-info/top_level.txt \r\nspeechbrain\r\ntests\r\n```\r\n\r\n### Environment Details\r\n\r\n_No response_\r\n\r\n### Relevant Log Output\r\n\r\n_No response_\r\n\r\n### Additional Context\r\n\r\nThis is probably due to the way packages are specified in the `setup.py`.\r\n\r\nDoes not seem like it would be linked to #2484 but it's worth investigating anyway.\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport os\nimport sys\nimport site\nimport setuptools\nfrom distutils.core import setup\n\n\n# Editable install in user site directory can be allowed with this hack:\n# https://github.com/pypa/pip/issues/7953.\nsite.ENABLE_USER_SITE = \"--user\" in sys.argv[1:]\n\nwith open(\"README.md\") as f:\n long_description = f.read()\n\nwith open(os.path.join(\"speechbrain\", \"version.txt\")) as f:\n version = f.read().strip()\n\nsetup(\n name=\"speechbrain\",\n version=version,\n description=\"All-in-one speech toolkit in pure Python and Pytorch\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Mirco Ravanelli & Others\",\n author_email=\"[email protected]\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: Apache Software License\",\n ],\n packages=setuptools.find_packages(),\n package_data={\"speechbrain\": [\"version.txt\", \"log-config.yaml\"]},\n install_requires=[\n \"hyperpyyaml\",\n \"joblib\",\n \"numpy\",\n \"packaging\",\n \"scipy\",\n \"sentencepiece\",\n \"torch>=1.9\",\n \"torchaudio\",\n \"tqdm\",\n \"huggingface_hub\",\n ],\n python_requires=\">=3.8\",\n url=\"https://speechbrain.github.io/\",\n)\n", "path": "setup.py"}]}
1,132
146
gh_patches_debug_12383
rasdani/github-patches
git_diff
Lightning-AI__pytorch-lightning-1523
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Performance drop when activating gradient clipping Hello all, I experienced a substantial drop in computation time when activating gradient clipping (by passing a non-zero value to the keyword argument `gradient_clip_val` when initializing the Trainer). I noticed that in the current implementation of the `clipping_gradient` method in pytorch-lightning/trainer/training_tricks.py redundant computations are made by first computing the 2-norm and second squaring this result, which could be shortened by computing the sum of squares directly. This saves one square root and squaring operation per parameter set. Best, Jonas ### Environment ``` cuda: GPU: available: False version: None packages: numpy: 1.18.1 pyTorch_debug: False pyTorch_version: 1.4.0 pytorch-lightning: 0.7.4-dev tensorboard: 2.2.1 tqdm: 4.45.0 system: OS: Darwin architecture: 64bit processor: i386 python: 3.8.2 version: Darwin Kernel Version 19.4.0: Wed Mar 4 22:28:40 PST 2020; root:xnu-6153.101.6~15/RELEASE_X86_64 ``` ### Additional context I trained a relatively small (two-layered) MLP on MNIST; perhaps this performance drop does not become that apparent when training on larger network architectures. </issue> <code> [start of pytorch_lightning/trainer/training_tricks.py] 1 import math 2 import sys 3 from abc import ABC, abstractmethod 4 5 import torch 6 from torch import Tensor 7 8 from pytorch_lightning import _logger as log 9 from pytorch_lightning.callbacks import GradientAccumulationScheduler 10 11 EPSILON = 1e-6 12 EPSILON_FP16 = 1e-5 13 14 15 class TrainerTrainingTricksMixin(ABC): 16 17 # this is just a summary on variables used in this abstract class, 18 # the proper values/initialisation should be done in child class 19 gradient_clip_val: ... 20 precision: ... 21 22 @abstractmethod 23 def get_model(self): 24 """Warning: this is just empty shell for code implemented in other class.""" 25 26 def clip_gradients(self): 27 # this code is a modification of torch.nn.utils.clip_grad_norm_ 28 # with TPU support based on https://github.com/pytorch/xla/blob/master/TROUBLESHOOTING.md 29 if self.gradient_clip_val > 0: 30 model = self.get_model() 31 parameters = model.parameters() 32 max_norm = float(self.gradient_clip_val) 33 norm_type = float(2.0) 34 if isinstance(parameters, torch.Tensor): 35 parameters = [parameters] 36 parameters = list(filter(lambda p: p.grad is not None, parameters)) 37 if norm_type == math.inf: 38 total_norm = max(p.grad.data.abs().max() for p in parameters) 39 else: 40 device = parameters[0].device 41 total_norm = torch.zeros([], device=device if parameters else None) 42 for p in parameters: 43 param_norm = p.grad.data.norm(norm_type) ** norm_type 44 total_norm.add_(param_norm) 45 total_norm = (total_norm ** (1. / norm_type)) 46 eps = EPSILON_FP16 if self.precision == 16 else EPSILON 47 clip_coef = torch.tensor(max_norm, device=device) / (total_norm + eps) 48 for p in parameters: 49 p.grad.data.mul_(torch.where(clip_coef < 1, clip_coef, torch.tensor(1., device=device))) 50 51 def print_nan_gradients(self) -> None: 52 model = self.get_model() 53 for param in model.parameters(): 54 if (param.grad is not None) and torch.isnan(param.grad.float()).any(): 55 log.info(param, param.grad) 56 57 def detect_nan_tensors(self, loss: Tensor) -> None: 58 model = self.get_model() 59 60 # check if loss is nan 61 if not torch.isfinite(loss).all(): 62 raise ValueError( 63 'The loss returned in `training_step` is nan or inf.' 64 ) 65 # check if a network weight is nan 66 for name, param in model.named_parameters(): 67 if not torch.isfinite(param).all(): 68 self.print_nan_gradients() 69 raise ValueError( 70 f'Detected nan and/or inf values in `{name}`.' 71 ' Check your forward pass for numerically unstable operations.' 72 ) 73 74 def configure_accumulated_gradients(self, accumulate_grad_batches): 75 if isinstance(accumulate_grad_batches, dict): 76 self.accumulation_scheduler = GradientAccumulationScheduler(accumulate_grad_batches) 77 elif isinstance(accumulate_grad_batches, int): 78 schedule = {1: accumulate_grad_batches} 79 self.accumulation_scheduler = GradientAccumulationScheduler(schedule) 80 else: 81 raise TypeError("Gradient accumulation supports only int and dict types") 82 [end of pytorch_lightning/trainer/training_tricks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pytorch_lightning/trainer/training_tricks.py b/pytorch_lightning/trainer/training_tricks.py --- a/pytorch_lightning/trainer/training_tricks.py +++ b/pytorch_lightning/trainer/training_tricks.py @@ -40,7 +40,7 @@ device = parameters[0].device total_norm = torch.zeros([], device=device if parameters else None) for p in parameters: - param_norm = p.grad.data.norm(norm_type) ** norm_type + param_norm = p.grad.data.pow(norm_type).sum() total_norm.add_(param_norm) total_norm = (total_norm ** (1. / norm_type)) eps = EPSILON_FP16 if self.precision == 16 else EPSILON
{"golden_diff": "diff --git a/pytorch_lightning/trainer/training_tricks.py b/pytorch_lightning/trainer/training_tricks.py\n--- a/pytorch_lightning/trainer/training_tricks.py\n+++ b/pytorch_lightning/trainer/training_tricks.py\n@@ -40,7 +40,7 @@\n device = parameters[0].device\n total_norm = torch.zeros([], device=device if parameters else None)\n for p in parameters:\n- param_norm = p.grad.data.norm(norm_type) ** norm_type\n+ param_norm = p.grad.data.pow(norm_type).sum()\n total_norm.add_(param_norm)\n total_norm = (total_norm ** (1. / norm_type))\n eps = EPSILON_FP16 if self.precision == 16 else EPSILON\n", "issue": "Performance drop when activating gradient clipping\nHello all,\r\n\r\nI experienced a substantial drop in computation time when activating gradient clipping (by passing a non-zero value to the keyword argument `gradient_clip_val` when initializing the Trainer).\r\n\r\nI noticed that in the current implementation of the `clipping_gradient` method in pytorch-lightning/trainer/training_tricks.py redundant computations are made by first computing the 2-norm and second squaring this result, which could be shortened by computing the sum of squares directly. This saves one square root and squaring operation per parameter set.\r\n\r\nBest,\r\nJonas\r\n\r\n### Environment\r\n\r\n```\r\ncuda:\r\n\tGPU:\r\n\tavailable: False\r\n\tversion: None\r\npackages:\r\n\tnumpy: 1.18.1\r\n\tpyTorch_debug: False\r\n\tpyTorch_version: 1.4.0\r\n\tpytorch-lightning: 0.7.4-dev\r\n\ttensorboard: 2.2.1\r\n\ttqdm: 4.45.0\r\nsystem:\r\n\tOS: Darwin\r\n\tarchitecture:\r\n\t\t64bit\r\n\t\t\r\n\tprocessor: i386\r\n\tpython: 3.8.2\r\n\tversion: Darwin Kernel Version 19.4.0: Wed Mar 4 22:28:40 PST 2020; root:xnu-6153.101.6~15/RELEASE_X86_64\r\n```\r\n\r\n### Additional context\r\n\r\nI trained a relatively small (two-layered) MLP on MNIST; perhaps this performance drop does not become that apparent when training on larger network architectures.\n", "before_files": [{"content": "import math\nimport sys\nfrom abc import ABC, abstractmethod\n\nimport torch\nfrom torch import Tensor\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.callbacks import GradientAccumulationScheduler\n\nEPSILON = 1e-6\nEPSILON_FP16 = 1e-5\n\n\nclass TrainerTrainingTricksMixin(ABC):\n\n # this is just a summary on variables used in this abstract class,\n # the proper values/initialisation should be done in child class\n gradient_clip_val: ...\n precision: ...\n\n @abstractmethod\n def get_model(self):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n def clip_gradients(self):\n # this code is a modification of torch.nn.utils.clip_grad_norm_\n # with TPU support based on https://github.com/pytorch/xla/blob/master/TROUBLESHOOTING.md\n if self.gradient_clip_val > 0:\n model = self.get_model()\n parameters = model.parameters()\n max_norm = float(self.gradient_clip_val)\n norm_type = float(2.0)\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n parameters = list(filter(lambda p: p.grad is not None, parameters))\n if norm_type == math.inf:\n total_norm = max(p.grad.data.abs().max() for p in parameters)\n else:\n device = parameters[0].device\n total_norm = torch.zeros([], device=device if parameters else None)\n for p in parameters:\n param_norm = p.grad.data.norm(norm_type) ** norm_type\n total_norm.add_(param_norm)\n total_norm = (total_norm ** (1. / norm_type))\n eps = EPSILON_FP16 if self.precision == 16 else EPSILON\n clip_coef = torch.tensor(max_norm, device=device) / (total_norm + eps)\n for p in parameters:\n p.grad.data.mul_(torch.where(clip_coef < 1, clip_coef, torch.tensor(1., device=device)))\n\n def print_nan_gradients(self) -> None:\n model = self.get_model()\n for param in model.parameters():\n if (param.grad is not None) and torch.isnan(param.grad.float()).any():\n log.info(param, param.grad)\n\n def detect_nan_tensors(self, loss: Tensor) -> None:\n model = self.get_model()\n\n # check if loss is nan\n if not torch.isfinite(loss).all():\n raise ValueError(\n 'The loss returned in `training_step` is nan or inf.'\n )\n # check if a network weight is nan\n for name, param in model.named_parameters():\n if not torch.isfinite(param).all():\n self.print_nan_gradients()\n raise ValueError(\n f'Detected nan and/or inf values in `{name}`.'\n ' Check your forward pass for numerically unstable operations.'\n )\n\n def configure_accumulated_gradients(self, accumulate_grad_batches):\n if isinstance(accumulate_grad_batches, dict):\n self.accumulation_scheduler = GradientAccumulationScheduler(accumulate_grad_batches)\n elif isinstance(accumulate_grad_batches, int):\n schedule = {1: accumulate_grad_batches}\n self.accumulation_scheduler = GradientAccumulationScheduler(schedule)\n else:\n raise TypeError(\"Gradient accumulation supports only int and dict types\")\n", "path": "pytorch_lightning/trainer/training_tricks.py"}]}
1,764
170
gh_patches_debug_5057
rasdani/github-patches
git_diff
pypa__pip-2308
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pip6.0.4 can not work under python3.4, windows7 64bit. use" pip list" command, met a error: ``` python Traceback (most recent call last): File "C:\Python34\Scripts\pip-script.py", line 9, in <module> load_entry_point('pip==6.0.4', 'console_scripts', 'pip')() File "C:\Python34\lib\site-packages\pip-6.0.4-py3.4.egg\pip\__init__.py", line 217, in main return command.main(cmd_args) File "C:\Python34\lib\site-packages\pip-6.0.4-py3.4.egg\pip\basecommand.py", l ine 122, in main write_debug_log = check_path_owner(debug_log_path, os.geteuid()) AttributeError: 'module' object has no attribute 'geteuid' ``` other commands can not work also. </issue> <code> [start of pip/utils/filesystem.py] 1 import os.path 2 3 from pip.compat import get_path_uid 4 5 6 def check_path_owner(path, uid): 7 previous = None 8 while path != previous: 9 if os.path.lexists(path): 10 # Actually do the ownership check 11 try: 12 if get_path_uid(path) != os.geteuid(): 13 return False 14 except OSError: 15 return False 16 return True 17 else: 18 previous, path = path, os.path.dirname(path) 19 [end of pip/utils/filesystem.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pip/utils/filesystem.py b/pip/utils/filesystem.py --- a/pip/utils/filesystem.py +++ b/pip/utils/filesystem.py @@ -1,9 +1,15 @@ +import os import os.path from pip.compat import get_path_uid def check_path_owner(path, uid): + # If we don't have a way to check the effective uid of this process, then + # we'll just assume that we own the directory. + if not hasattr(os, "geteuid"): + return True + previous = None while path != previous: if os.path.lexists(path):
{"golden_diff": "diff --git a/pip/utils/filesystem.py b/pip/utils/filesystem.py\n--- a/pip/utils/filesystem.py\n+++ b/pip/utils/filesystem.py\n@@ -1,9 +1,15 @@\n+import os\n import os.path\n \n from pip.compat import get_path_uid\n \n \n def check_path_owner(path, uid):\n+ # If we don't have a way to check the effective uid of this process, then\n+ # we'll just assume that we own the directory.\n+ if not hasattr(os, \"geteuid\"):\n+ return True\n+\n previous = None\n while path != previous:\n if os.path.lexists(path):\n", "issue": "pip6.0.4 can not work under python3.4, windows7 64bit.\nuse\" pip list\" command, met a error:\n\n``` python\nTraceback (most recent call last):\n File \"C:\\Python34\\Scripts\\pip-script.py\", line 9, in <module>\n load_entry_point('pip==6.0.4', 'console_scripts', 'pip')()\n File \"C:\\Python34\\lib\\site-packages\\pip-6.0.4-py3.4.egg\\pip\\__init__.py\", line\n 217, in main\n return command.main(cmd_args)\n File \"C:\\Python34\\lib\\site-packages\\pip-6.0.4-py3.4.egg\\pip\\basecommand.py\", l\nine 122, in main\n write_debug_log = check_path_owner(debug_log_path, os.geteuid())\nAttributeError: 'module' object has no attribute 'geteuid'\n```\n\nother commands can not work also.\n\n", "before_files": [{"content": "import os.path\n\nfrom pip.compat import get_path_uid\n\n\ndef check_path_owner(path, uid):\n previous = None\n while path != previous:\n if os.path.lexists(path):\n # Actually do the ownership check\n try:\n if get_path_uid(path) != os.geteuid():\n return False\n except OSError:\n return False\n return True\n else:\n previous, path = path, os.path.dirname(path)\n", "path": "pip/utils/filesystem.py"}]}
890
144
gh_patches_debug_16838
rasdani/github-patches
git_diff
facebookresearch__hydra-252
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Hydra client API Create Hydra client API to allow jobs to interact with Hydra. one immediate need is to allow executed code to find out what is the original working directory is was executed from. </issue> <code> [start of hydra/utils.py] 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 import logging.config 3 4 from omegaconf import OmegaConf 5 6 # pylint: disable=C0103 7 log = logging.getLogger(__name__) 8 9 10 def get_method(path): 11 return get_class(path) 12 13 14 def get_class(path): 15 try: 16 from importlib import import_module 17 18 module_path, _, class_name = path.rpartition(".") 19 mod = import_module(module_path) 20 try: 21 klass = getattr(mod, class_name) 22 except AttributeError: 23 raise ImportError( 24 "Class {} is not in module {}".format(class_name, module_path) 25 ) 26 return klass 27 except ValueError as e: 28 log.error("Error initializing class " + path) 29 raise e 30 31 32 def get_static_method(full_method_name): 33 try: 34 spl = full_method_name.split(".") 35 method_name = spl.pop() 36 class_name = ".".join(spl) 37 clz = get_class(class_name) 38 return getattr(clz, method_name) 39 except Exception as e: 40 log.error("Error getting static method {} : {}".format(full_method_name, e)) 41 raise e 42 43 44 def instantiate(config, *args, **kwargs): 45 assert config is not None, "Input config is None" 46 try: 47 clazz = get_class(config["class"]) 48 params = config.params if "params" in config else OmegaConf.create() 49 params.merge_with(OmegaConf.create(kwargs)) 50 return clazz(*args, **params) 51 except Exception as e: 52 log.error("Error instantiating {} : {}".format(config["class"], e)) 53 raise e 54 [end of hydra/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/hydra/utils.py b/hydra/utils.py --- a/hydra/utils.py +++ b/hydra/utils.py @@ -3,6 +3,9 @@ from omegaconf import OmegaConf +from hydra._internal.pathlib import Path +from hydra.plugins.common.utils import HydraConfig + # pylint: disable=C0103 log = logging.getLogger(__name__) @@ -51,3 +54,23 @@ except Exception as e: log.error("Error instantiating {} : {}".format(config["class"], e)) raise e + + +def get_original_cwd(): + return HydraConfig().hydra.runtime.cwd + + +def to_absolute_path(path): + """ + converts the specified path to be absolute path. + if the input path is relative, it's interpreted as relative to the original working directory + if it's absolute, it's returned as is + :param path: + :return: + """ + path = Path(path) + if path.is_absolute(): + ret = path + else: + ret = Path(get_original_cwd()) / path + return str(ret)
{"golden_diff": "diff --git a/hydra/utils.py b/hydra/utils.py\n--- a/hydra/utils.py\n+++ b/hydra/utils.py\n@@ -3,6 +3,9 @@\n \n from omegaconf import OmegaConf\n \n+from hydra._internal.pathlib import Path\n+from hydra.plugins.common.utils import HydraConfig\n+\n # pylint: disable=C0103\n log = logging.getLogger(__name__)\n \n@@ -51,3 +54,23 @@\n except Exception as e:\n log.error(\"Error instantiating {} : {}\".format(config[\"class\"], e))\n raise e\n+\n+\n+def get_original_cwd():\n+ return HydraConfig().hydra.runtime.cwd\n+\n+\n+def to_absolute_path(path):\n+ \"\"\"\n+ converts the specified path to be absolute path.\n+ if the input path is relative, it's interpreted as relative to the original working directory\n+ if it's absolute, it's returned as is\n+ :param path:\n+ :return:\n+ \"\"\"\n+ path = Path(path)\n+ if path.is_absolute():\n+ ret = path\n+ else:\n+ ret = Path(get_original_cwd()) / path\n+ return str(ret)\n", "issue": "Hydra client API\nCreate Hydra client API to allow jobs to interact with Hydra.\r\none immediate need is to allow executed code to find out what is the original working directory is was executed from.\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging.config\n\nfrom omegaconf import OmegaConf\n\n# pylint: disable=C0103\nlog = logging.getLogger(__name__)\n\n\ndef get_method(path):\n return get_class(path)\n\n\ndef get_class(path):\n try:\n from importlib import import_module\n\n module_path, _, class_name = path.rpartition(\".\")\n mod = import_module(module_path)\n try:\n klass = getattr(mod, class_name)\n except AttributeError:\n raise ImportError(\n \"Class {} is not in module {}\".format(class_name, module_path)\n )\n return klass\n except ValueError as e:\n log.error(\"Error initializing class \" + path)\n raise e\n\n\ndef get_static_method(full_method_name):\n try:\n spl = full_method_name.split(\".\")\n method_name = spl.pop()\n class_name = \".\".join(spl)\n clz = get_class(class_name)\n return getattr(clz, method_name)\n except Exception as e:\n log.error(\"Error getting static method {} : {}\".format(full_method_name, e))\n raise e\n\n\ndef instantiate(config, *args, **kwargs):\n assert config is not None, \"Input config is None\"\n try:\n clazz = get_class(config[\"class\"])\n params = config.params if \"params\" in config else OmegaConf.create()\n params.merge_with(OmegaConf.create(kwargs))\n return clazz(*args, **params)\n except Exception as e:\n log.error(\"Error instantiating {} : {}\".format(config[\"class\"], e))\n raise e\n", "path": "hydra/utils.py"}]}
1,019
265
gh_patches_debug_33267
rasdani/github-patches
git_diff
kserve__kserve-2673
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Numpy 1.24 breaking changes /kind bug **What steps did you take and what happened:** [A clear and concise description of what the bug is.] numpy 1.24 was released on 12/18 https://pypi.org/project/numpy/1.24.0/ https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations ``` kserve/test/test_v1beta1_transformer_spec.py - AttributeError: module 'numpy' has no attribute 'float' [1927](https://github.com/kserve/kserve/actions/runs/3731363145/jobs/6329505081#step:5:1928) ERROR kserve/test/test_v1beta1_transformers_config.py - AttributeError: module 'numpy' has no attribute 'float' [1928](https://github.com/kserve/kserve/actions/runs/3731363145/jobs/6329505081#step:5:1929) ERROR kserve/test/test_v1beta1_triton_spec.py - AttributeError: module 'numpy' has no attribute 'float' [1929](https://github.com/kserve/kserve/actions/runs/3731363145/jobs/6329505081#step:5:1930) ERROR kserve/test/test_v1beta1_xg_boost_spec.py - AttributeError: module 'numpy' has no attribute 'float' ``` **What did you expect to happen:** **What's the InferenceService yaml:** [To help us debug please run `kubectl get isvc $name -n $namespace -oyaml` and paste the output] **Anything else you would like to add:** [Miscellaneous information that will assist in solving the issue.] **Environment:** - Istio Version: - Knative Version: - KServe Version: - Kubeflow version: - Cloud Environment:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm] - Minikube/Kind version: - Kubernetes version: (use `kubectl version`): - OS (e.g. from `/etc/os-release`): </issue> <code> [start of python/kserve/kserve/api/watch.py] 1 # Copyright 2021 The KServe Authors. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import time 16 from kubernetes import client 17 from kubernetes import watch as k8s_watch 18 from table_logger import TableLogger 19 20 from ..constants import constants 21 from ..utils import utils 22 23 24 def isvc_watch(name=None, namespace=None, timeout_seconds=600, generation=0): 25 """Watch the created or patched InferenceService in the specified namespace""" 26 27 if namespace is None: 28 namespace = utils.get_default_target_namespace() 29 30 tbl = TableLogger( 31 columns='NAME,READY,PREV,LATEST,URL', 32 colwidth={'NAME': 20, 'READY': 10, 'PREV': 25, 'LATEST': 25, 'URL': 65}, 33 border=False) 34 35 stream = k8s_watch.Watch().stream( 36 client.CustomObjectsApi().list_namespaced_custom_object, 37 constants.KSERVE_GROUP, 38 constants.KSERVE_V1BETA1_VERSION, 39 namespace, 40 constants.KSERVE_PLURAL, 41 timeout_seconds=timeout_seconds) 42 43 for event in stream: 44 isvc = event['object'] 45 isvc_name = isvc['metadata']['name'] 46 if name and name != isvc_name: 47 continue 48 else: 49 status = 'Unknown' 50 if isvc.get('status', ''): 51 url = isvc['status'].get('url', '') 52 traffic = isvc['status'].get('components', {}).get( 53 'predictor', {}).get('traffic', []) 54 traffic_percent = 100 55 if constants.OBSERVED_GENERATION in isvc['status']: 56 observed_generation = isvc['status'][constants.OBSERVED_GENERATION] 57 for t in traffic: 58 if t["latestRevision"]: 59 traffic_percent = t["percent"] 60 61 if generation != 0 and observed_generation != generation: 62 continue 63 for condition in isvc['status'].get('conditions', {}): 64 if condition.get('type', '') == 'Ready': 65 status = condition.get('status', 'Unknown') 66 tbl(isvc_name, status, 100-traffic_percent, traffic_percent, url) 67 if status == 'True': 68 break 69 70 else: 71 tbl(isvc_name, status, '', '', '') 72 # Sleep 2 to avoid status section is not generated within a very short time. 73 time.sleep(2) 74 continue 75 [end of python/kserve/kserve/api/watch.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/python/kserve/kserve/api/watch.py b/python/kserve/kserve/api/watch.py --- a/python/kserve/kserve/api/watch.py +++ b/python/kserve/kserve/api/watch.py @@ -13,9 +13,10 @@ # limitations under the License. import time + from kubernetes import client from kubernetes import watch as k8s_watch -from table_logger import TableLogger +from tabulate import tabulate from ..constants import constants from ..utils import utils @@ -27,10 +28,8 @@ if namespace is None: namespace = utils.get_default_target_namespace() - tbl = TableLogger( - columns='NAME,READY,PREV,LATEST,URL', - colwidth={'NAME': 20, 'READY': 10, 'PREV': 25, 'LATEST': 25, 'URL': 65}, - border=False) + headers = ['NAME', 'READY', 'PREV', 'LATEST', 'URL'] + table_fmt = 'plain' stream = k8s_watch.Watch().stream( client.CustomObjectsApi().list_namespaced_custom_object, @@ -63,12 +62,13 @@ for condition in isvc['status'].get('conditions', {}): if condition.get('type', '') == 'Ready': status = condition.get('status', 'Unknown') - tbl(isvc_name, status, 100-traffic_percent, traffic_percent, url) + print(tabulate([[isvc_name, status, 100 - traffic_percent, traffic_percent, url]], + headers=headers, tablefmt=table_fmt)) if status == 'True': break else: - tbl(isvc_name, status, '', '', '') + print(tabulate([[isvc_name, status, '', '', '']], headers=headers, tablefmt=table_fmt)) # Sleep 2 to avoid status section is not generated within a very short time. time.sleep(2) continue
{"golden_diff": "diff --git a/python/kserve/kserve/api/watch.py b/python/kserve/kserve/api/watch.py\n--- a/python/kserve/kserve/api/watch.py\n+++ b/python/kserve/kserve/api/watch.py\n@@ -13,9 +13,10 @@\n # limitations under the License.\n \n import time\n+\n from kubernetes import client\n from kubernetes import watch as k8s_watch\n-from table_logger import TableLogger\n+from tabulate import tabulate\n \n from ..constants import constants\n from ..utils import utils\n@@ -27,10 +28,8 @@\n if namespace is None:\n namespace = utils.get_default_target_namespace()\n \n- tbl = TableLogger(\n- columns='NAME,READY,PREV,LATEST,URL',\n- colwidth={'NAME': 20, 'READY': 10, 'PREV': 25, 'LATEST': 25, 'URL': 65},\n- border=False)\n+ headers = ['NAME', 'READY', 'PREV', 'LATEST', 'URL']\n+ table_fmt = 'plain'\n \n stream = k8s_watch.Watch().stream(\n client.CustomObjectsApi().list_namespaced_custom_object,\n@@ -63,12 +62,13 @@\n for condition in isvc['status'].get('conditions', {}):\n if condition.get('type', '') == 'Ready':\n status = condition.get('status', 'Unknown')\n- tbl(isvc_name, status, 100-traffic_percent, traffic_percent, url)\n+ print(tabulate([[isvc_name, status, 100 - traffic_percent, traffic_percent, url]],\n+ headers=headers, tablefmt=table_fmt))\n if status == 'True':\n break\n \n else:\n- tbl(isvc_name, status, '', '', '')\n+ print(tabulate([[isvc_name, status, '', '', '']], headers=headers, tablefmt=table_fmt))\n # Sleep 2 to avoid status section is not generated within a very short time.\n time.sleep(2)\n continue\n", "issue": "Numpy 1.24 breaking changes \n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\n[A clear and concise description of what the bug is.]\r\nnumpy 1.24 was released on 12/18 https://pypi.org/project/numpy/1.24.0/\r\n\r\nhttps://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\r\n```\r\nkserve/test/test_v1beta1_transformer_spec.py - AttributeError: module 'numpy' has no attribute 'float'\r\n[1927](https://github.com/kserve/kserve/actions/runs/3731363145/jobs/6329505081#step:5:1928)\r\nERROR kserve/test/test_v1beta1_transformers_config.py - AttributeError: module 'numpy' has no attribute 'float'\r\n[1928](https://github.com/kserve/kserve/actions/runs/3731363145/jobs/6329505081#step:5:1929)\r\nERROR kserve/test/test_v1beta1_triton_spec.py - AttributeError: module 'numpy' has no attribute 'float'\r\n[1929](https://github.com/kserve/kserve/actions/runs/3731363145/jobs/6329505081#step:5:1930)\r\nERROR kserve/test/test_v1beta1_xg_boost_spec.py - AttributeError: module 'numpy' has no attribute 'float'\r\n```\r\n\r\n**What did you expect to happen:**\r\n\r\n\r\n**What's the InferenceService yaml:**\r\n[To help us debug please run `kubectl get isvc $name -n $namespace -oyaml` and paste the output]\r\n\r\n**Anything else you would like to add:**\r\n[Miscellaneous information that will assist in solving the issue.]\r\n\r\n\r\n**Environment:**\r\n\r\n- Istio Version:\r\n- Knative Version:\r\n- KServe Version:\r\n- Kubeflow version:\r\n- Cloud Environment:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm]\r\n- Minikube/Kind version:\r\n- Kubernetes version: (use `kubectl version`):\r\n- OS (e.g. from `/etc/os-release`):\r\n\n", "before_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nfrom kubernetes import client\nfrom kubernetes import watch as k8s_watch\nfrom table_logger import TableLogger\n\nfrom ..constants import constants\nfrom ..utils import utils\n\n\ndef isvc_watch(name=None, namespace=None, timeout_seconds=600, generation=0):\n \"\"\"Watch the created or patched InferenceService in the specified namespace\"\"\"\n\n if namespace is None:\n namespace = utils.get_default_target_namespace()\n\n tbl = TableLogger(\n columns='NAME,READY,PREV,LATEST,URL',\n colwidth={'NAME': 20, 'READY': 10, 'PREV': 25, 'LATEST': 25, 'URL': 65},\n border=False)\n\n stream = k8s_watch.Watch().stream(\n client.CustomObjectsApi().list_namespaced_custom_object,\n constants.KSERVE_GROUP,\n constants.KSERVE_V1BETA1_VERSION,\n namespace,\n constants.KSERVE_PLURAL,\n timeout_seconds=timeout_seconds)\n\n for event in stream:\n isvc = event['object']\n isvc_name = isvc['metadata']['name']\n if name and name != isvc_name:\n continue\n else:\n status = 'Unknown'\n if isvc.get('status', ''):\n url = isvc['status'].get('url', '')\n traffic = isvc['status'].get('components', {}).get(\n 'predictor', {}).get('traffic', [])\n traffic_percent = 100\n if constants.OBSERVED_GENERATION in isvc['status']:\n observed_generation = isvc['status'][constants.OBSERVED_GENERATION]\n for t in traffic:\n if t[\"latestRevision\"]:\n traffic_percent = t[\"percent\"]\n\n if generation != 0 and observed_generation != generation:\n continue\n for condition in isvc['status'].get('conditions', {}):\n if condition.get('type', '') == 'Ready':\n status = condition.get('status', 'Unknown')\n tbl(isvc_name, status, 100-traffic_percent, traffic_percent, url)\n if status == 'True':\n break\n\n else:\n tbl(isvc_name, status, '', '', '')\n # Sleep 2 to avoid status section is not generated within a very short time.\n time.sleep(2)\n continue\n", "path": "python/kserve/kserve/api/watch.py"}]}
1,831
451
gh_patches_debug_20397
rasdani/github-patches
git_diff
CTFd__CTFd-1824
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Empty and null prerequisites can be added from admin UI **Environment**: - CTFd Version/Commit: 843546b (tip of master) - Operating System: Linux - Web Browser and Version: Firefox **What happened?** Adding a prerequisite to a challenge without selecting a valid challenge entry in the drop down results in either `""` (selecting the "--" item in the drop down) or `null` (not selecting anything in the dropdown). This also breaks rendering of any previously added prerequisites when the UI is reloaded. **What did you expect to happen?** The backend should reject invalid prerequisites (probably via the `ChallengeSchema`?) and the frontend also shouldn't make it easy to submit them. **How to reproduce your issue** 1. Create two challenges A and B 2. VIsit the admin UI page to edit B and click on the "Requirements" tab 3. Select A in the dropdown and click "Add Prerequisite" 4. Select the "--" entry in the dropdown and click "Add Prerequisite" again - observe rendering breakage 5. Solve A in either user or teams mode 6. Query `/api/v1/challenges` and observe that B is not present 7. Visit the admin UI again and select nothing in the requirements dropdown (it will remain blank), then click "Add Prerequisite" once more 8. Check the requirements for the B in the DB/from the ORM. They will be `[<ID of A>, "", None]` **Any associated stack traces or error logs** </issue> <code> [start of CTFd/schemas/challenges.py] 1 from marshmallow import validate 2 from marshmallow_sqlalchemy import field_for 3 4 from CTFd.models import Challenges, ma 5 6 7 class ChallengeSchema(ma.ModelSchema): 8 class Meta: 9 model = Challenges 10 include_fk = True 11 dump_only = ("id",) 12 13 name = field_for( 14 Challenges, 15 "name", 16 validate=[ 17 validate.Length( 18 min=0, 19 max=80, 20 error="Challenge could not be saved. Challenge name too long", 21 ) 22 ], 23 ) 24 25 category = field_for( 26 Challenges, 27 "category", 28 validate=[ 29 validate.Length( 30 min=0, 31 max=80, 32 error="Challenge could not be saved. Challenge category too long", 33 ) 34 ], 35 ) 36 37 description = field_for( 38 Challenges, 39 "description", 40 allow_none=True, 41 validate=[ 42 validate.Length( 43 min=0, 44 max=65535, 45 error="Challenge could not be saved. Challenge description too long", 46 ) 47 ], 48 ) 49 [end of CTFd/schemas/challenges.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/CTFd/schemas/challenges.py b/CTFd/schemas/challenges.py --- a/CTFd/schemas/challenges.py +++ b/CTFd/schemas/challenges.py @@ -1,9 +1,29 @@ from marshmallow import validate +from marshmallow.exceptions import ValidationError from marshmallow_sqlalchemy import field_for from CTFd.models import Challenges, ma +class ChallengeRequirementsValidator(validate.Validator): + default_message = "Error parsing challenge requirements" + + def __init__(self, error=None): + self.error = error or self.default_message + + def __call__(self, value): + if isinstance(value, dict) is False: + raise ValidationError(self.default_message) + + prereqs = value.get("prerequisites", []) + if all(prereqs) is False: + raise ValidationError( + "Challenge requirements cannot have a null prerequisite" + ) + + return value + + class ChallengeSchema(ma.ModelSchema): class Meta: model = Challenges @@ -46,3 +66,7 @@ ) ], ) + + requirements = field_for( + Challenges, "requirements", validate=[ChallengeRequirementsValidator()], + )
{"golden_diff": "diff --git a/CTFd/schemas/challenges.py b/CTFd/schemas/challenges.py\n--- a/CTFd/schemas/challenges.py\n+++ b/CTFd/schemas/challenges.py\n@@ -1,9 +1,29 @@\n from marshmallow import validate\n+from marshmallow.exceptions import ValidationError\n from marshmallow_sqlalchemy import field_for\n \n from CTFd.models import Challenges, ma\n \n \n+class ChallengeRequirementsValidator(validate.Validator):\n+ default_message = \"Error parsing challenge requirements\"\n+\n+ def __init__(self, error=None):\n+ self.error = error or self.default_message\n+\n+ def __call__(self, value):\n+ if isinstance(value, dict) is False:\n+ raise ValidationError(self.default_message)\n+\n+ prereqs = value.get(\"prerequisites\", [])\n+ if all(prereqs) is False:\n+ raise ValidationError(\n+ \"Challenge requirements cannot have a null prerequisite\"\n+ )\n+\n+ return value\n+\n+\n class ChallengeSchema(ma.ModelSchema):\n class Meta:\n model = Challenges\n@@ -46,3 +66,7 @@\n )\n ],\n )\n+\n+ requirements = field_for(\n+ Challenges, \"requirements\", validate=[ChallengeRequirementsValidator()],\n+ )\n", "issue": "Empty and null prerequisites can be added from admin UI\n**Environment**:\r\n\r\n- CTFd Version/Commit: 843546b (tip of master)\r\n- Operating System: Linux\r\n- Web Browser and Version: Firefox\r\n\r\n**What happened?**\r\nAdding a prerequisite to a challenge without selecting a valid challenge entry in the drop down results in either `\"\"` (selecting the \"--\" item in the drop down) or `null` (not selecting anything in the dropdown). This also breaks rendering of any previously added prerequisites when the UI is reloaded.\r\n\r\n**What did you expect to happen?**\r\nThe backend should reject invalid prerequisites (probably via the `ChallengeSchema`?) and the frontend also shouldn't make it easy to submit them.\r\n\r\n**How to reproduce your issue**\r\n1. Create two challenges A and B\r\n2. VIsit the admin UI page to edit B and click on the \"Requirements\" tab\r\n3. Select A in the dropdown and click \"Add Prerequisite\"\r\n4. Select the \"--\" entry in the dropdown and click \"Add Prerequisite\" again - observe rendering breakage\r\n5. Solve A in either user or teams mode\r\n6. Query `/api/v1/challenges` and observe that B is not present\r\n7. Visit the admin UI again and select nothing in the requirements dropdown (it will remain blank), then click \"Add Prerequisite\" once more\r\n8. Check the requirements for the B in the DB/from the ORM. They will be `[<ID of A>, \"\", None]`\r\n\r\n**Any associated stack traces or error logs**\r\n\n", "before_files": [{"content": "from marshmallow import validate\nfrom marshmallow_sqlalchemy import field_for\n\nfrom CTFd.models import Challenges, ma\n\n\nclass ChallengeSchema(ma.ModelSchema):\n class Meta:\n model = Challenges\n include_fk = True\n dump_only = (\"id\",)\n\n name = field_for(\n Challenges,\n \"name\",\n validate=[\n validate.Length(\n min=0,\n max=80,\n error=\"Challenge could not be saved. Challenge name too long\",\n )\n ],\n )\n\n category = field_for(\n Challenges,\n \"category\",\n validate=[\n validate.Length(\n min=0,\n max=80,\n error=\"Challenge could not be saved. Challenge category too long\",\n )\n ],\n )\n\n description = field_for(\n Challenges,\n \"description\",\n allow_none=True,\n validate=[\n validate.Length(\n min=0,\n max=65535,\n error=\"Challenge could not be saved. Challenge description too long\",\n )\n ],\n )\n", "path": "CTFd/schemas/challenges.py"}]}
1,182
273
gh_patches_debug_26513
rasdani/github-patches
git_diff
getsentry__sentry-5804
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Issues resolved via UI do not appear in releases overview To reproduce: * Navigate to an issue * Click "Resolve in current release (abc123)" * Navigate to that release (abc123) * Issue does not appear under "New Issues in this Release" </issue> <code> [start of src/sentry/api/endpoints/issues_resolved_in_release.py] 1 from __future__ import absolute_import 2 3 from rest_framework.response import Response 4 5 from sentry.api.base import DocSection 6 from sentry.api.bases.project import ProjectEndpoint, ProjectPermission 7 from sentry.api.exceptions import ResourceDoesNotExist 8 from sentry.api.serializers import serialize 9 from sentry.api.serializers.models.group import StreamGroupSerializer 10 from sentry.models import ( 11 Group, 12 GroupCommitResolution, 13 Release, 14 ReleaseCommit, 15 ) 16 17 18 class IssuesResolvedInReleaseEndpoint(ProjectEndpoint): 19 doc_section = DocSection.RELEASES 20 permission_classes = (ProjectPermission, ) 21 22 def get(self, request, project, version): 23 """ 24 List issues to be resolved in a particular release 25 ```````````````````````` 26 27 Retrieve a list of issues to be resolved in a given release. 28 29 :pparam string organization_slug: the slug of the organization the 30 release belongs to. 31 :pparam string project_slug: the slug of the project associated with the release. 32 :pparam string version: the version identifier of the release. 33 :auth: required 34 """ 35 try: 36 release = Release.objects.get(version=version, organization=project.organization) 37 except Release.DoesNotExist: 38 raise ResourceDoesNotExist 39 40 groups = Group.objects.filter( 41 project=project, 42 id__in=GroupCommitResolution.objects.filter( 43 commit_id__in=ReleaseCommit.objects.filter( 44 release=release, 45 ).values_list('commit_id', flat=True), 46 ).values_list('group_id', flat=True), 47 ) 48 49 context = serialize(list(groups), request.user, StreamGroupSerializer(stats_period=None)) 50 return Response(context) 51 [end of src/sentry/api/endpoints/issues_resolved_in_release.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/sentry/api/endpoints/issues_resolved_in_release.py b/src/sentry/api/endpoints/issues_resolved_in_release.py --- a/src/sentry/api/endpoints/issues_resolved_in_release.py +++ b/src/sentry/api/endpoints/issues_resolved_in_release.py @@ -10,6 +10,7 @@ from sentry.models import ( Group, GroupCommitResolution, + GroupResolution, Release, ReleaseCommit, ) @@ -37,14 +38,27 @@ except Release.DoesNotExist: raise ResourceDoesNotExist - groups = Group.objects.filter( - project=project, - id__in=GroupCommitResolution.objects.filter( + group_ids = set() + group_ids |= set( + GroupResolution.objects.filter( + release=release, + ).values_list('group_id', flat=True) + ) + group_ids |= set( + GroupCommitResolution.objects.filter( commit_id__in=ReleaseCommit.objects.filter( release=release, - ).values_list('commit_id', flat=True), - ).values_list('group_id', flat=True), + ).values_list( + 'commit_id', + flat=True, + ) + ).values_list( + 'group_id', + flat=True, + ) ) + groups = Group.objects.filter(project=project, id__in=group_ids) + context = serialize(list(groups), request.user, StreamGroupSerializer(stats_period=None)) return Response(context)
{"golden_diff": "diff --git a/src/sentry/api/endpoints/issues_resolved_in_release.py b/src/sentry/api/endpoints/issues_resolved_in_release.py\n--- a/src/sentry/api/endpoints/issues_resolved_in_release.py\n+++ b/src/sentry/api/endpoints/issues_resolved_in_release.py\n@@ -10,6 +10,7 @@\n from sentry.models import (\n Group,\n GroupCommitResolution,\n+ GroupResolution,\n Release,\n ReleaseCommit,\n )\n@@ -37,14 +38,27 @@\n except Release.DoesNotExist:\n raise ResourceDoesNotExist\n \n- groups = Group.objects.filter(\n- project=project,\n- id__in=GroupCommitResolution.objects.filter(\n+ group_ids = set()\n+ group_ids |= set(\n+ GroupResolution.objects.filter(\n+ release=release,\n+ ).values_list('group_id', flat=True)\n+ )\n+ group_ids |= set(\n+ GroupCommitResolution.objects.filter(\n commit_id__in=ReleaseCommit.objects.filter(\n release=release,\n- ).values_list('commit_id', flat=True),\n- ).values_list('group_id', flat=True),\n+ ).values_list(\n+ 'commit_id',\n+ flat=True,\n+ )\n+ ).values_list(\n+ 'group_id',\n+ flat=True,\n+ )\n )\n \n+ groups = Group.objects.filter(project=project, id__in=group_ids)\n+\n context = serialize(list(groups), request.user, StreamGroupSerializer(stats_period=None))\n return Response(context)\n", "issue": "Issues resolved via UI do not appear in releases overview\nTo reproduce:\r\n\r\n* Navigate to an issue\r\n* Click \"Resolve in current release (abc123)\"\r\n* Navigate to that release (abc123)\r\n* Issue does not appear under \"New Issues in this Release\"\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom rest_framework.response import Response\n\nfrom sentry.api.base import DocSection\nfrom sentry.api.bases.project import ProjectEndpoint, ProjectPermission\nfrom sentry.api.exceptions import ResourceDoesNotExist\nfrom sentry.api.serializers import serialize\nfrom sentry.api.serializers.models.group import StreamGroupSerializer\nfrom sentry.models import (\n Group,\n GroupCommitResolution,\n Release,\n ReleaseCommit,\n)\n\n\nclass IssuesResolvedInReleaseEndpoint(ProjectEndpoint):\n doc_section = DocSection.RELEASES\n permission_classes = (ProjectPermission, )\n\n def get(self, request, project, version):\n \"\"\"\n List issues to be resolved in a particular release\n ````````````````````````\n\n Retrieve a list of issues to be resolved in a given release.\n\n :pparam string organization_slug: the slug of the organization the\n release belongs to.\n :pparam string project_slug: the slug of the project associated with the release.\n :pparam string version: the version identifier of the release.\n :auth: required\n \"\"\"\n try:\n release = Release.objects.get(version=version, organization=project.organization)\n except Release.DoesNotExist:\n raise ResourceDoesNotExist\n\n groups = Group.objects.filter(\n project=project,\n id__in=GroupCommitResolution.objects.filter(\n commit_id__in=ReleaseCommit.objects.filter(\n release=release,\n ).values_list('commit_id', flat=True),\n ).values_list('group_id', flat=True),\n )\n\n context = serialize(list(groups), request.user, StreamGroupSerializer(stats_period=None))\n return Response(context)\n", "path": "src/sentry/api/endpoints/issues_resolved_in_release.py"}]}
1,057
332
gh_patches_debug_23241
rasdani/github-patches
git_diff
Cog-Creators__Red-DiscordBot-3361
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [crash] bank balance - when the bot doesnt have perms to send message ```Traceback (most recent call last): File "/home/bb8/venv/lib/python3.7/site-packages/discord/ext/commands/core.py", line 79, in wrapped ret = await coro(*args, **kwargs) File "/home/bb8/venv/lib/python3.7/site-packages/redbot/cogs/economy/economy.py", line 174, in balance user=user.display_name, num=humanize_number(bal), currency=currency File "/home/bb8/venv/lib/python3.7/site-packages/redbot/core/commands/context.py", line 81, in send return await super().send(content=content, **kwargs) File "/home/bb8/venv/lib/python3.7/site-packages/discord/abc.py", line 823, in send data = await state.http.send_message(channel.id, content, tts=tts, embed=embed, nonce=nonce) File "/home/bb8/venv/lib/python3.7/site-packages/discord/http.py", line 218, in request raise Forbidden(r, data) discord.errors.Forbidden: 403 FORBIDDEN (error code: 50013): Missing Permissions The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/home/bb8/venv/lib/python3.7/site-packages/discord/ext/commands/bot.py", line 863, in invoke await ctx.command.invoke(ctx) File "/home/bb8/venv/lib/python3.7/site-packages/redbot/core/commands/commands.py", line 587, in invoke await super().invoke(ctx) File "/home/bb8/venv/lib/python3.7/site-packages/discord/ext/commands/core.py", line 1158, in invoke await ctx.invoked_subcommand.invoke(ctx) File "/home/bb8/venv/lib/python3.7/site-packages/discord/ext/commands/core.py", line 728, in invoke await injected(*ctx.args, **ctx.kwargs) File "/home/bb8/venv/lib/python3.7/site-packages/discord/ext/commands/core.py", line 88, in wrapped raise CommandInvokeError(exc) from exc discord.ext.commands.errors.CommandInvokeError: Command raised an exception: Forbidden: 403 FORBIDDEN (error code: 50013): Missing Permissions``` Trace taken from BB-8 </issue> <code> [start of redbot/core/global_checks.py] 1 """The checks in this module run on every command.""" 2 from . import commands 3 4 5 def init_global_checks(bot): 6 @bot.check_once 7 def actually_up(ctx): 8 """ 9 Uptime is set during the initial startup process. 10 If this hasn't been set, we should assume the bot isn't ready yet. 11 """ 12 return ctx.bot.uptime is not None 13 14 @bot.check_once 15 async def whiteblacklist_checks(ctx): 16 return await ctx.bot.allowed_by_whitelist_blacklist(ctx.author) 17 18 @bot.check_once 19 def bots(ctx): 20 """Check the user is not another bot.""" 21 return not ctx.author.bot 22 [end of redbot/core/global_checks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/redbot/core/global_checks.py b/redbot/core/global_checks.py --- a/redbot/core/global_checks.py +++ b/redbot/core/global_checks.py @@ -4,7 +4,17 @@ def init_global_checks(bot): @bot.check_once - def actually_up(ctx): + def minimum_bot_perms(ctx) -> bool: + """ + Too many 403, 401, and 429 Errors can cause bots to get global'd + + It's reasonable to assume the below as a minimum amount of perms for + commands. + """ + return ctx.channel.permissions_for(ctx.me).send_messages + + @bot.check_once + def actually_up(ctx) -> bool: """ Uptime is set during the initial startup process. If this hasn't been set, we should assume the bot isn't ready yet. @@ -12,10 +22,10 @@ return ctx.bot.uptime is not None @bot.check_once - async def whiteblacklist_checks(ctx): + async def whiteblacklist_checks(ctx) -> bool: return await ctx.bot.allowed_by_whitelist_blacklist(ctx.author) @bot.check_once - def bots(ctx): + def bots(ctx) -> bool: """Check the user is not another bot.""" return not ctx.author.bot
{"golden_diff": "diff --git a/redbot/core/global_checks.py b/redbot/core/global_checks.py\n--- a/redbot/core/global_checks.py\n+++ b/redbot/core/global_checks.py\n@@ -4,7 +4,17 @@\n \n def init_global_checks(bot):\n @bot.check_once\n- def actually_up(ctx):\n+ def minimum_bot_perms(ctx) -> bool:\n+ \"\"\"\n+ Too many 403, 401, and 429 Errors can cause bots to get global'd\n+ \n+ It's reasonable to assume the below as a minimum amount of perms for\n+ commands.\n+ \"\"\"\n+ return ctx.channel.permissions_for(ctx.me).send_messages\n+\n+ @bot.check_once\n+ def actually_up(ctx) -> bool:\n \"\"\" \n Uptime is set during the initial startup process.\n If this hasn't been set, we should assume the bot isn't ready yet. \n@@ -12,10 +22,10 @@\n return ctx.bot.uptime is not None\n \n @bot.check_once\n- async def whiteblacklist_checks(ctx):\n+ async def whiteblacklist_checks(ctx) -> bool:\n return await ctx.bot.allowed_by_whitelist_blacklist(ctx.author)\n \n @bot.check_once\n- def bots(ctx):\n+ def bots(ctx) -> bool:\n \"\"\"Check the user is not another bot.\"\"\"\n return not ctx.author.bot\n", "issue": "[crash] bank balance - when the bot doesnt have perms to send message\n```Traceback (most recent call last):\r\n File \"/home/bb8/venv/lib/python3.7/site-packages/discord/ext/commands/core.py\", line 79, in wrapped\r\n ret = await coro(*args, **kwargs)\r\n File \"/home/bb8/venv/lib/python3.7/site-packages/redbot/cogs/economy/economy.py\", line 174, in balance\r\n user=user.display_name, num=humanize_number(bal), currency=currency\r\n File \"/home/bb8/venv/lib/python3.7/site-packages/redbot/core/commands/context.py\", line 81, in send\r\n return await super().send(content=content, **kwargs)\r\n File \"/home/bb8/venv/lib/python3.7/site-packages/discord/abc.py\", line 823, in send\r\n data = await state.http.send_message(channel.id, content, tts=tts, embed=embed, nonce=nonce)\r\n File \"/home/bb8/venv/lib/python3.7/site-packages/discord/http.py\", line 218, in request\r\n raise Forbidden(r, data)\r\ndiscord.errors.Forbidden: 403 FORBIDDEN (error code: 50013): Missing Permissions\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/bb8/venv/lib/python3.7/site-packages/discord/ext/commands/bot.py\", line 863, in invoke\r\n await ctx.command.invoke(ctx)\r\n File \"/home/bb8/venv/lib/python3.7/site-packages/redbot/core/commands/commands.py\", line 587, in invoke\r\n await super().invoke(ctx)\r\n File \"/home/bb8/venv/lib/python3.7/site-packages/discord/ext/commands/core.py\", line 1158, in invoke\r\n await ctx.invoked_subcommand.invoke(ctx)\r\n File \"/home/bb8/venv/lib/python3.7/site-packages/discord/ext/commands/core.py\", line 728, in invoke\r\n await injected(*ctx.args, **ctx.kwargs)\r\n File \"/home/bb8/venv/lib/python3.7/site-packages/discord/ext/commands/core.py\", line 88, in wrapped\r\n raise CommandInvokeError(exc) from exc\r\ndiscord.ext.commands.errors.CommandInvokeError: Command raised an exception: Forbidden: 403 FORBIDDEN (error code: 50013): Missing Permissions```\r\n\r\nTrace taken from BB-8\n", "before_files": [{"content": "\"\"\"The checks in this module run on every command.\"\"\"\nfrom . import commands\n\n\ndef init_global_checks(bot):\n @bot.check_once\n def actually_up(ctx):\n \"\"\" \n Uptime is set during the initial startup process.\n If this hasn't been set, we should assume the bot isn't ready yet. \n \"\"\"\n return ctx.bot.uptime is not None\n\n @bot.check_once\n async def whiteblacklist_checks(ctx):\n return await ctx.bot.allowed_by_whitelist_blacklist(ctx.author)\n\n @bot.check_once\n def bots(ctx):\n \"\"\"Check the user is not another bot.\"\"\"\n return not ctx.author.bot\n", "path": "redbot/core/global_checks.py"}]}
1,289
306
gh_patches_debug_19583
rasdani/github-patches
git_diff
dotkom__onlineweb4-1455
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Group Sync when changing a User Currently the group syncer is only run when a group is updated (saved). If a user is added to (or removed from) a group, the syncer won't run. Fix: Connect to the signal sent by User.groups m2m-relation when a user is saved, like described here: https://docs.djangoproject.com/en/dev/ref/signals/#m2m-changed This is to be done in `apps/authentication/signals.py`. </issue> <code> [start of apps/authentication/signals.py] 1 # -*- coding: utf-8 -*- 2 from django.contrib.auth.models import Group 3 from django.db.models.signals import post_save 4 from django.dispatch import receiver 5 6 from apps.authentication.tasks import SynchronizeGroups 7 8 9 @receiver(post_save, sender=Group) 10 def trigger_group_syncer(sender, instance, created, **kwargs): 11 """ 12 :param sender: The model that triggered this hook 13 :param instance: The model instance triggering this hook 14 :param created: True if the instance was created, False if the instance was updated 15 16 Calls the SynchronizeGroups Task if a group is updated. (Not if it's the initial creation of a group) 17 """ 18 19 if created: 20 # If a new instance is created, we do not need to trigger group sync. 21 pass 22 else: 23 SynchronizeGroups.run() 24 [end of apps/authentication/signals.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/apps/authentication/signals.py b/apps/authentication/signals.py --- a/apps/authentication/signals.py +++ b/apps/authentication/signals.py @@ -1,13 +1,17 @@ # -*- coding: utf-8 -*- +from django.contrib.auth import get_user_model from django.contrib.auth.models import Group -from django.db.models.signals import post_save +from django.db.models.signals import m2m_changed, post_save from django.dispatch import receiver from apps.authentication.tasks import SynchronizeGroups +User = get_user_model() + + @receiver(post_save, sender=Group) -def trigger_group_syncer(sender, instance, created, **kwargs): +def trigger_group_syncer(sender, instance, created=False, **kwargs): """ :param sender: The model that triggered this hook :param instance: The model instance triggering this hook @@ -21,3 +25,5 @@ pass else: SynchronizeGroups.run() + +m2m_changed.connect(trigger_group_syncer, sender=User.groups.through)
{"golden_diff": "diff --git a/apps/authentication/signals.py b/apps/authentication/signals.py\n--- a/apps/authentication/signals.py\n+++ b/apps/authentication/signals.py\n@@ -1,13 +1,17 @@\n # -*- coding: utf-8 -*-\n+from django.contrib.auth import get_user_model\n from django.contrib.auth.models import Group\n-from django.db.models.signals import post_save\n+from django.db.models.signals import m2m_changed, post_save\n from django.dispatch import receiver\n \n from apps.authentication.tasks import SynchronizeGroups\n \n \n+User = get_user_model()\n+\n+\n @receiver(post_save, sender=Group)\n-def trigger_group_syncer(sender, instance, created, **kwargs):\n+def trigger_group_syncer(sender, instance, created=False, **kwargs):\n \"\"\"\n :param sender: The model that triggered this hook\n :param instance: The model instance triggering this hook\n@@ -21,3 +25,5 @@\n pass\n else:\n SynchronizeGroups.run()\n+\n+m2m_changed.connect(trigger_group_syncer, sender=User.groups.through)\n", "issue": "Group Sync when changing a User\nCurrently the group syncer is only run when a group is updated (saved). If a user is added to (or removed from) a group, the syncer won't run. \n\nFix:\nConnect to the signal sent by User.groups m2m-relation when a user is saved, like described here: https://docs.djangoproject.com/en/dev/ref/signals/#m2m-changed\n\nThis is to be done in `apps/authentication/signals.py`.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom django.contrib.auth.models import Group\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom apps.authentication.tasks import SynchronizeGroups\n\n\n@receiver(post_save, sender=Group)\ndef trigger_group_syncer(sender, instance, created, **kwargs):\n \"\"\"\n :param sender: The model that triggered this hook\n :param instance: The model instance triggering this hook\n :param created: True if the instance was created, False if the instance was updated\n\n Calls the SynchronizeGroups Task if a group is updated. (Not if it's the initial creation of a group)\n \"\"\"\n\n if created:\n # If a new instance is created, we do not need to trigger group sync.\n pass\n else:\n SynchronizeGroups.run()\n", "path": "apps/authentication/signals.py"}]}
850
226
gh_patches_debug_11051
rasdani/github-patches
git_diff
saleor__saleor-1447
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add Categories to the Sitemap Sitemap consists of Products only, we should expand it to include categories as well. </issue> <code> [start of saleor/core/sitemaps.py] 1 from __future__ import unicode_literals 2 3 from django.contrib.sitemaps import Sitemap 4 5 from ..product.models import Product 6 7 8 class ProductSitemap(Sitemap): 9 10 def items(self): 11 return Product.objects.only('id', 'name').order_by('-id') 12 13 14 sitemaps = {'products': ProductSitemap} 15 [end of saleor/core/sitemaps.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/saleor/core/sitemaps.py b/saleor/core/sitemaps.py --- a/saleor/core/sitemaps.py +++ b/saleor/core/sitemaps.py @@ -2,7 +2,7 @@ from django.contrib.sitemaps import Sitemap -from ..product.models import Product +from ..product.models import Category, Product class ProductSitemap(Sitemap): @@ -11,4 +11,13 @@ return Product.objects.only('id', 'name').order_by('-id') -sitemaps = {'products': ProductSitemap} +class CategorySitemap(Sitemap): + + def items(self): + categories = Category.objects.all().order_by('id') + return categories.only('id', 'name', 'slug') + +sitemaps = { + 'categories': CategorySitemap, + 'products': ProductSitemap +}
{"golden_diff": "diff --git a/saleor/core/sitemaps.py b/saleor/core/sitemaps.py\n--- a/saleor/core/sitemaps.py\n+++ b/saleor/core/sitemaps.py\n@@ -2,7 +2,7 @@\n \n from django.contrib.sitemaps import Sitemap\n \n-from ..product.models import Product\n+from ..product.models import Category, Product\n \n \n class ProductSitemap(Sitemap):\n@@ -11,4 +11,13 @@\n return Product.objects.only('id', 'name').order_by('-id')\n \n \n-sitemaps = {'products': ProductSitemap}\n+class CategorySitemap(Sitemap):\n+\n+ def items(self):\n+ categories = Category.objects.all().order_by('id')\n+ return categories.only('id', 'name', 'slug')\n+\n+sitemaps = {\n+ 'categories': CategorySitemap,\n+ 'products': ProductSitemap\n+}\n", "issue": "Add Categories to the Sitemap\nSitemap consists of Products only, we should expand it to include categories as well.\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.contrib.sitemaps import Sitemap\n\nfrom ..product.models import Product\n\n\nclass ProductSitemap(Sitemap):\n\n def items(self):\n return Product.objects.only('id', 'name').order_by('-id')\n\n\nsitemaps = {'products': ProductSitemap}\n", "path": "saleor/core/sitemaps.py"}]}
656
200
gh_patches_debug_16716
rasdani/github-patches
git_diff
sosreport__sos-2872
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> smartcard plugin is using tools removed/not available from rhel8 and newer The pam_pkcs11 was last available in RHEL7 and is no longer available in RHEL8 and newer so there is no point for checking it and its configuration files in: https://github.com/sosreport/sos/blob/b96926f151da984491de3082af7d06ecb07fc517/sos/report/plugins/smartcard.py#L21-L22 The `pcsc-tools` package above is not available in RHEL (only from EPEL). Instead, this list should contain `pcsc-lite` and `pcsc-lite-ccid` packages. Also the following commands/path do not have any significance so I propose to remove them (the `pklogin_finder` is part of pam_pkcs11`, which is not available in RHEL8 and newer). The respective replacement would be some command from sssd, but most of the sssd logs are already collected so I do not think we need a separate command: https://github.com/sosreport/sos/blob/b96926f151da984491de3082af7d06ecb07fc517/sos/report/plugins/smartcard.py#L32-L33 https://github.com/sosreport/sos/blob/b96926f151da984491de3082af7d06ecb07fc517/sos/report/plugins/smartcard.py#L40 The `pcsc_scan` is a tool that is shipped in `pcsc-tools` package, which is available in EPEL only so I suggest to remove it: https://github.com/sosreport/sos/blob/b96926f151da984491de3082af7d06ecb07fc517/sos/report/plugins/smartcard.py#L34 Let me know if this sounds ok to you or I should open a PR with the proposed changes. Note, that these are still needed for RHEL7, but I think for RHEL7 you have a different branch. </issue> <code> [start of sos/report/plugins/smartcard.py] 1 # Copyright (C) 2007 Sadique Puthen <[email protected]> 2 3 # This file is part of the sos project: https://github.com/sosreport/sos 4 # 5 # This copyrighted material is made available to anyone wishing to use, 6 # modify, copy, or redistribute it subject to the terms and conditions of 7 # version 2 of the GNU General Public License. 8 # 9 # See the LICENSE file in the source distribution for further information. 10 11 from sos.report.plugins import Plugin, RedHatPlugin 12 13 14 class Smartcard(Plugin, RedHatPlugin): 15 16 short_desc = 'PKCS#11 smart cards' 17 18 plugin_name = 'smartcard' 19 profiles = ('security', 'identity', 'hardware') 20 21 files = ('/etc/pam_pkcs11/pam_pkcs11.conf',) 22 packages = ('pam_pkcs11', 'pcsc-tools', 'opensc') 23 24 def setup(self): 25 self.add_copy_spec([ 26 "/etc/reader.conf", 27 "/etc/reader.conf.d/", 28 "/etc/pam_pkcs11/", 29 "/etc/opensc-*.conf" 30 ]) 31 self.add_cmd_output([ 32 "pklogin_finder debug", 33 "ls -nl /usr/lib*/pam_pkcs11/", 34 "pcsc_scan", 35 "pkcs11-tool --show-info", 36 "pkcs11-tool --list-mechanisms", 37 "pkcs11-tool --list-slots", 38 "pkcs11-tool --list-objects" 39 ]) 40 self.add_forbidden_path("/etc/pam_pkcs11/nssdb/key[3-4].db") 41 42 # vim: set et ts=4 sw=4 : 43 [end of sos/report/plugins/smartcard.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sos/report/plugins/smartcard.py b/sos/report/plugins/smartcard.py --- a/sos/report/plugins/smartcard.py +++ b/sos/report/plugins/smartcard.py @@ -19,14 +19,18 @@ profiles = ('security', 'identity', 'hardware') files = ('/etc/pam_pkcs11/pam_pkcs11.conf',) - packages = ('pam_pkcs11', 'pcsc-tools', 'opensc') + # The pam_pkcs11 is available only in RHEL7 + packages = ('pam_pkcs11', 'pcsc-tools', 'opensc', 'pcsc-lite', + 'pcsc-lite-ccid') def setup(self): self.add_copy_spec([ "/etc/reader.conf", "/etc/reader.conf.d/", "/etc/pam_pkcs11/", - "/etc/opensc-*.conf" + "/etc/opensc-*.conf", + "/etc/pkcs11/modules/*.module", + "/usr/share/p11-kit/modules/*.module" ]) self.add_cmd_output([ "pklogin_finder debug",
{"golden_diff": "diff --git a/sos/report/plugins/smartcard.py b/sos/report/plugins/smartcard.py\n--- a/sos/report/plugins/smartcard.py\n+++ b/sos/report/plugins/smartcard.py\n@@ -19,14 +19,18 @@\n profiles = ('security', 'identity', 'hardware')\n \n files = ('/etc/pam_pkcs11/pam_pkcs11.conf',)\n- packages = ('pam_pkcs11', 'pcsc-tools', 'opensc')\n+ # The pam_pkcs11 is available only in RHEL7\n+ packages = ('pam_pkcs11', 'pcsc-tools', 'opensc', 'pcsc-lite',\n+ 'pcsc-lite-ccid')\n \n def setup(self):\n self.add_copy_spec([\n \"/etc/reader.conf\",\n \"/etc/reader.conf.d/\",\n \"/etc/pam_pkcs11/\",\n- \"/etc/opensc-*.conf\"\n+ \"/etc/opensc-*.conf\",\n+ \"/etc/pkcs11/modules/*.module\",\n+ \"/usr/share/p11-kit/modules/*.module\"\n ])\n self.add_cmd_output([\n \"pklogin_finder debug\",\n", "issue": "smartcard plugin is using tools removed/not available from rhel8 and newer\nThe pam_pkcs11 was last available in RHEL7 and is no longer available in RHEL8 and newer so there is no point for checking it and its configuration files in:\r\n\r\nhttps://github.com/sosreport/sos/blob/b96926f151da984491de3082af7d06ecb07fc517/sos/report/plugins/smartcard.py#L21-L22\r\n\r\nThe `pcsc-tools` package above is not available in RHEL (only from EPEL). Instead, this list should contain `pcsc-lite` and `pcsc-lite-ccid` packages.\r\n\r\nAlso the following commands/path do not have any significance so I propose to remove them (the `pklogin_finder` is part of pam_pkcs11`, which is not available in RHEL8 and newer). The respective replacement would be some command from sssd, but most of the sssd logs are already collected so I do not think we need a separate command:\r\n\r\nhttps://github.com/sosreport/sos/blob/b96926f151da984491de3082af7d06ecb07fc517/sos/report/plugins/smartcard.py#L32-L33\r\nhttps://github.com/sosreport/sos/blob/b96926f151da984491de3082af7d06ecb07fc517/sos/report/plugins/smartcard.py#L40\r\n\r\nThe `pcsc_scan` is a tool that is shipped in `pcsc-tools` package, which is available in EPEL only so I suggest to remove it:\r\n\r\nhttps://github.com/sosreport/sos/blob/b96926f151da984491de3082af7d06ecb07fc517/sos/report/plugins/smartcard.py#L34\r\n\r\nLet me know if this sounds ok to you or I should open a PR with the proposed changes.\r\n\r\nNote, that these are still needed for RHEL7, but I think for RHEL7 you have a different branch.\n", "before_files": [{"content": "# Copyright (C) 2007 Sadique Puthen <[email protected]>\n\n# This file is part of the sos project: https://github.com/sosreport/sos\n#\n# This copyrighted material is made available to anyone wishing to use,\n# modify, copy, or redistribute it subject to the terms and conditions of\n# version 2 of the GNU General Public License.\n#\n# See the LICENSE file in the source distribution for further information.\n\nfrom sos.report.plugins import Plugin, RedHatPlugin\n\n\nclass Smartcard(Plugin, RedHatPlugin):\n\n short_desc = 'PKCS#11 smart cards'\n\n plugin_name = 'smartcard'\n profiles = ('security', 'identity', 'hardware')\n\n files = ('/etc/pam_pkcs11/pam_pkcs11.conf',)\n packages = ('pam_pkcs11', 'pcsc-tools', 'opensc')\n\n def setup(self):\n self.add_copy_spec([\n \"/etc/reader.conf\",\n \"/etc/reader.conf.d/\",\n \"/etc/pam_pkcs11/\",\n \"/etc/opensc-*.conf\"\n ])\n self.add_cmd_output([\n \"pklogin_finder debug\",\n \"ls -nl /usr/lib*/pam_pkcs11/\",\n \"pcsc_scan\",\n \"pkcs11-tool --show-info\",\n \"pkcs11-tool --list-mechanisms\",\n \"pkcs11-tool --list-slots\",\n \"pkcs11-tool --list-objects\"\n ])\n self.add_forbidden_path(\"/etc/pam_pkcs11/nssdb/key[3-4].db\")\n\n# vim: set et ts=4 sw=4 :\n", "path": "sos/report/plugins/smartcard.py"}]}
1,483
266
gh_patches_debug_4924
rasdani/github-patches
git_diff
saleor__saleor-1604
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Promotional code field and button overlap each other. In the checkout process: <img width="465" alt="zrzut ekranu 2018-01-10 o 10 02 22" src="https://user-images.githubusercontent.com/1561204/34765559-24865056-f5f2-11e7-9e2a-d0ff639e7a79.png"> </issue> <code> [start of saleor/discount/forms.py] 1 from datetime import date 2 3 from django import forms 4 from django.utils.encoding import smart_text 5 from django.utils.translation import pgettext_lazy 6 7 from .models import NotApplicable, Voucher 8 9 10 class VoucherField(forms.ModelChoiceField): 11 12 default_error_messages = { 13 'invalid_choice': pgettext_lazy( 14 'voucher', pgettext_lazy( 15 'Voucher form error', 'Discount code incorrect or expired')), 16 } 17 18 19 class CheckoutDiscountForm(forms.Form): 20 21 voucher = VoucherField( 22 queryset=Voucher.objects.none(), 23 to_field_name='code', 24 label=pgettext_lazy( 25 'Checkout discount form label for voucher field', 26 'Gift card or discount code'), 27 widget=forms.TextInput) 28 29 def __init__(self, *args, **kwargs): 30 self.checkout = kwargs.pop('checkout') 31 initial = kwargs.get('initial', {}) 32 if 'voucher' not in initial: 33 initial['voucher'] = self.checkout.voucher_code 34 kwargs['initial'] = initial 35 super().__init__(*args, **kwargs) 36 self.fields['voucher'].queryset = Voucher.objects.active( 37 date=date.today()) 38 39 def clean(self): 40 cleaned_data = super().clean() 41 if 'voucher' in cleaned_data: 42 voucher = cleaned_data['voucher'] 43 try: 44 discount = voucher.get_discount_for_checkout(self.checkout) 45 cleaned_data['discount'] = discount 46 except NotApplicable as e: 47 self.add_error('voucher', smart_text(e)) 48 return cleaned_data 49 50 def apply_discount(self): 51 discount = self.cleaned_data['discount'] 52 voucher = self.cleaned_data['voucher'] 53 self.checkout.discount = discount 54 self.checkout.voucher_code = voucher.code 55 [end of saleor/discount/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/saleor/discount/forms.py b/saleor/discount/forms.py --- a/saleor/discount/forms.py +++ b/saleor/discount/forms.py @@ -21,7 +21,7 @@ voucher = VoucherField( queryset=Voucher.objects.none(), to_field_name='code', - label=pgettext_lazy( + help_text=pgettext_lazy( 'Checkout discount form label for voucher field', 'Gift card or discount code'), widget=forms.TextInput)
{"golden_diff": "diff --git a/saleor/discount/forms.py b/saleor/discount/forms.py\n--- a/saleor/discount/forms.py\n+++ b/saleor/discount/forms.py\n@@ -21,7 +21,7 @@\n voucher = VoucherField(\n queryset=Voucher.objects.none(),\n to_field_name='code',\n- label=pgettext_lazy(\n+ help_text=pgettext_lazy(\n 'Checkout discount form label for voucher field',\n 'Gift card or discount code'),\n widget=forms.TextInput)\n", "issue": "Promotional code field and button overlap each other.\nIn the checkout process:\r\n<img width=\"465\" alt=\"zrzut ekranu 2018-01-10 o 10 02 22\" src=\"https://user-images.githubusercontent.com/1561204/34765559-24865056-f5f2-11e7-9e2a-d0ff639e7a79.png\">\r\n\n", "before_files": [{"content": "from datetime import date\n\nfrom django import forms\nfrom django.utils.encoding import smart_text\nfrom django.utils.translation import pgettext_lazy\n\nfrom .models import NotApplicable, Voucher\n\n\nclass VoucherField(forms.ModelChoiceField):\n\n default_error_messages = {\n 'invalid_choice': pgettext_lazy(\n 'voucher', pgettext_lazy(\n 'Voucher form error', 'Discount code incorrect or expired')),\n }\n\n\nclass CheckoutDiscountForm(forms.Form):\n\n voucher = VoucherField(\n queryset=Voucher.objects.none(),\n to_field_name='code',\n label=pgettext_lazy(\n 'Checkout discount form label for voucher field',\n 'Gift card or discount code'),\n widget=forms.TextInput)\n\n def __init__(self, *args, **kwargs):\n self.checkout = kwargs.pop('checkout')\n initial = kwargs.get('initial', {})\n if 'voucher' not in initial:\n initial['voucher'] = self.checkout.voucher_code\n kwargs['initial'] = initial\n super().__init__(*args, **kwargs)\n self.fields['voucher'].queryset = Voucher.objects.active(\n date=date.today())\n\n def clean(self):\n cleaned_data = super().clean()\n if 'voucher' in cleaned_data:\n voucher = cleaned_data['voucher']\n try:\n discount = voucher.get_discount_for_checkout(self.checkout)\n cleaned_data['discount'] = discount\n except NotApplicable as e:\n self.add_error('voucher', smart_text(e))\n return cleaned_data\n\n def apply_discount(self):\n discount = self.cleaned_data['discount']\n voucher = self.cleaned_data['voucher']\n self.checkout.discount = discount\n self.checkout.voucher_code = voucher.code\n", "path": "saleor/discount/forms.py"}]}
1,121
113
gh_patches_debug_31936
rasdani/github-patches
git_diff
WordPress__openverse-api-210
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Feature] Add new Authority Type ## Problem <!-- Describe a problem solved by this feature; or delete the section entirely. --> We currently lack an authority type for curated image galleries: places like https://stocksnap.io where content is manually curated by the platform, but it isn't a site with social, user-uploaded content, or isn't a formal GLAM institution. ## Description <!-- Describe the feature and how it solves the problem. --> Our current authorities: https://github.com/WordPress/openverse-api/blob/9d0d724651f18cc9f96931e01bea92b8032bd6a0/ingestion_server/ingestion_server/authority.py#L32-L36 Should be modified to: ```diff boost = { - AuthorityTypes.CURATED: 90, + AuthorityTypes.CURATED: 87.5, + AuthorityTypes.CULTURAL_INSTITUTIONS: 90, AuthorityTypes.SOCIAL_MEDIA: 80, AuthorityTypes.DEFAULT: 85 } ``` We'll also need to re-classify the existing providers classified as `CURATED` to `CULTURAL_INSTITUTIONS` and add a line for StockSnap here (we might also want to sort these alphabetically): https://github.com/WordPress/openverse-api/blob/9d0d724651f18cc9f96931e01bea92b8032bd6a0/ingestion_server/ingestion_server/authority.py#L37-L53 ## Alternatives <!-- Describe any alternative solutions or features you have considered. How is this feature better? --> ## Additional context <!-- Add any other context about the feature here; or delete the section entirely. --> ## Implementation <!-- Replace the [ ] with [x] to check the box. --> - [ ] 🙋 I would be interested in implementing this feature. </issue> <code> [start of ingestion_server/ingestion_server/authority.py] 1 from enum import Enum, auto 2 3 4 """ 5 Authority is a ranking from 0 to 100 (with 0 being least authoritative) 6 indicating the pedigree of an image. Some examples of things that could impact 7 authority: 8 - The reputation of the website that posted an image 9 - The popularity of the uploader on a social media site in terms of number of 10 followers 11 - Whether the uploader has uploaded images that have previously been flagged for 12 copyright infringement. 13 - etc 14 15 The authority can be set from the catalog layer through the meta_data field 16 or through the ingestion layer. As of now, we are only factoring in the 17 reputation of the website as a static hand-picked list based on experience 18 and search result quality, with the intention to add more sophisticated and 19 tailored measures of authority later on. 20 21 Also note that this is just one factor in rankings, and the magnitude of the 22 boost can be adjusted at search-time. 23 """ 24 25 26 class AuthorityTypes(Enum): 27 CURATED = auto() 28 SOCIAL_MEDIA = auto() 29 DEFAULT = auto() 30 31 32 # We want to boost curated collections where each image has been vetted for 33 # cultural significance. 34 boost = { 35 AuthorityTypes.CURATED: 90, 36 AuthorityTypes.SOCIAL_MEDIA: 80, 37 AuthorityTypes.DEFAULT: 85, 38 } 39 authority_types = { 40 "flickr": AuthorityTypes.SOCIAL_MEDIA, 41 "behance": AuthorityTypes.SOCIAL_MEDIA, 42 "thingiverse": AuthorityTypes.SOCIAL_MEDIA, 43 "sketchfab": AuthorityTypes.SOCIAL_MEDIA, 44 "deviantart": AuthorityTypes.SOCIAL_MEDIA, 45 "thorvaldsensmuseum": AuthorityTypes.CURATED, 46 "svgsilh": AuthorityTypes.CURATED, 47 "smithsonian": AuthorityTypes.CURATED, 48 "rijksmuseum": AuthorityTypes.CURATED, 49 "museumsvictoria": AuthorityTypes.CURATED, 50 "met": AuthorityTypes.CURATED, 51 "mccordsmuseum": AuthorityTypes.CURATED, 52 "digitaltmuseum": AuthorityTypes.CURATED, 53 "clevelandmuseum": AuthorityTypes.CURATED, 54 "brooklynmuseum": AuthorityTypes.CURATED, 55 } 56 57 58 def get_authority_boost(source): 59 authority_boost = None 60 if source in authority_types: 61 authority_type = authority_types[source] 62 if authority_type in boost: 63 authority_boost = boost[authority_type] 64 else: 65 authority_boost = boost[AuthorityTypes.DEFAULT] 66 return authority_boost 67 [end of ingestion_server/ingestion_server/authority.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ingestion_server/ingestion_server/authority.py b/ingestion_server/ingestion_server/authority.py --- a/ingestion_server/ingestion_server/authority.py +++ b/ingestion_server/ingestion_server/authority.py @@ -25,6 +25,7 @@ class AuthorityTypes(Enum): CURATED = auto() + CULTURAL_INSTITUTION = auto() SOCIAL_MEDIA = auto() DEFAULT = auto() @@ -32,26 +33,29 @@ # We want to boost curated collections where each image has been vetted for # cultural significance. boost = { - AuthorityTypes.CURATED: 90, - AuthorityTypes.SOCIAL_MEDIA: 80, - AuthorityTypes.DEFAULT: 85, + AuthorityTypes.CURATED: 85, + AuthorityTypes.CULTURAL_INSTITUTION: 90, + AuthorityTypes.SOCIAL_MEDIA: 75, + AuthorityTypes.DEFAULT: 80, } + authority_types = { "flickr": AuthorityTypes.SOCIAL_MEDIA, "behance": AuthorityTypes.SOCIAL_MEDIA, "thingiverse": AuthorityTypes.SOCIAL_MEDIA, "sketchfab": AuthorityTypes.SOCIAL_MEDIA, "deviantart": AuthorityTypes.SOCIAL_MEDIA, - "thorvaldsensmuseum": AuthorityTypes.CURATED, - "svgsilh": AuthorityTypes.CURATED, - "smithsonian": AuthorityTypes.CURATED, - "rijksmuseum": AuthorityTypes.CURATED, - "museumsvictoria": AuthorityTypes.CURATED, - "met": AuthorityTypes.CURATED, - "mccordsmuseum": AuthorityTypes.CURATED, - "digitaltmuseum": AuthorityTypes.CURATED, - "clevelandmuseum": AuthorityTypes.CURATED, - "brooklynmuseum": AuthorityTypes.CURATED, + "thorvaldsensmuseum": AuthorityTypes.CULTURAL_INSTITUTION, + "svgsilh": AuthorityTypes.CULTURAL_INSTITUTION, + "smithsonian": AuthorityTypes.CULTURAL_INSTITUTION, + "rijksmuseum": AuthorityTypes.CULTURAL_INSTITUTION, + "museumsvictoria": AuthorityTypes.CULTURAL_INSTITUTION, + "met": AuthorityTypes.CULTURAL_INSTITUTION, + "mccordsmuseum": AuthorityTypes.CULTURAL_INSTITUTION, + "digitaltmuseum": AuthorityTypes.CULTURAL_INSTITUTION, + "clevelandmuseum": AuthorityTypes.CULTURAL_INSTITUTION, + "brooklynmuseum": AuthorityTypes.CULTURAL_INSTITUTION, + "stocksnap": AuthorityTypes.CURATED, }
{"golden_diff": "diff --git a/ingestion_server/ingestion_server/authority.py b/ingestion_server/ingestion_server/authority.py\n--- a/ingestion_server/ingestion_server/authority.py\n+++ b/ingestion_server/ingestion_server/authority.py\n@@ -25,6 +25,7 @@\n \n class AuthorityTypes(Enum):\n CURATED = auto()\n+ CULTURAL_INSTITUTION = auto()\n SOCIAL_MEDIA = auto()\n DEFAULT = auto()\n \n@@ -32,26 +33,29 @@\n # We want to boost curated collections where each image has been vetted for\n # cultural significance.\n boost = {\n- AuthorityTypes.CURATED: 90,\n- AuthorityTypes.SOCIAL_MEDIA: 80,\n- AuthorityTypes.DEFAULT: 85,\n+ AuthorityTypes.CURATED: 85,\n+ AuthorityTypes.CULTURAL_INSTITUTION: 90,\n+ AuthorityTypes.SOCIAL_MEDIA: 75,\n+ AuthorityTypes.DEFAULT: 80,\n }\n+\n authority_types = {\n \"flickr\": AuthorityTypes.SOCIAL_MEDIA,\n \"behance\": AuthorityTypes.SOCIAL_MEDIA,\n \"thingiverse\": AuthorityTypes.SOCIAL_MEDIA,\n \"sketchfab\": AuthorityTypes.SOCIAL_MEDIA,\n \"deviantart\": AuthorityTypes.SOCIAL_MEDIA,\n- \"thorvaldsensmuseum\": AuthorityTypes.CURATED,\n- \"svgsilh\": AuthorityTypes.CURATED,\n- \"smithsonian\": AuthorityTypes.CURATED,\n- \"rijksmuseum\": AuthorityTypes.CURATED,\n- \"museumsvictoria\": AuthorityTypes.CURATED,\n- \"met\": AuthorityTypes.CURATED,\n- \"mccordsmuseum\": AuthorityTypes.CURATED,\n- \"digitaltmuseum\": AuthorityTypes.CURATED,\n- \"clevelandmuseum\": AuthorityTypes.CURATED,\n- \"brooklynmuseum\": AuthorityTypes.CURATED,\n+ \"thorvaldsensmuseum\": AuthorityTypes.CULTURAL_INSTITUTION,\n+ \"svgsilh\": AuthorityTypes.CULTURAL_INSTITUTION,\n+ \"smithsonian\": AuthorityTypes.CULTURAL_INSTITUTION,\n+ \"rijksmuseum\": AuthorityTypes.CULTURAL_INSTITUTION,\n+ \"museumsvictoria\": AuthorityTypes.CULTURAL_INSTITUTION,\n+ \"met\": AuthorityTypes.CULTURAL_INSTITUTION,\n+ \"mccordsmuseum\": AuthorityTypes.CULTURAL_INSTITUTION,\n+ \"digitaltmuseum\": AuthorityTypes.CULTURAL_INSTITUTION,\n+ \"clevelandmuseum\": AuthorityTypes.CULTURAL_INSTITUTION,\n+ \"brooklynmuseum\": AuthorityTypes.CULTURAL_INSTITUTION,\n+ \"stocksnap\": AuthorityTypes.CURATED,\n }\n", "issue": "[Feature] Add new Authority Type\n## Problem\r\n<!-- Describe a problem solved by this feature; or delete the section entirely. -->\r\n\r\nWe currently lack an authority type for curated image galleries: places like https://stocksnap.io where content is manually curated by the platform, but it isn't a site with social, user-uploaded content, or isn't a formal GLAM institution.\r\n\r\n## Description\r\n<!-- Describe the feature and how it solves the problem. -->\r\n\r\nOur current authorities:\r\n\r\nhttps://github.com/WordPress/openverse-api/blob/9d0d724651f18cc9f96931e01bea92b8032bd6a0/ingestion_server/ingestion_server/authority.py#L32-L36\r\n\r\nShould be modified to:\r\n\r\n\r\n```diff\r\nboost = {\r\n- AuthorityTypes.CURATED: 90,\r\n+ AuthorityTypes.CURATED: 87.5,\r\n+ AuthorityTypes.CULTURAL_INSTITUTIONS: 90,\r\n AuthorityTypes.SOCIAL_MEDIA: 80,\r\n AuthorityTypes.DEFAULT: 85\r\n}\r\n```\r\n\r\nWe'll also need to re-classify the existing providers classified as `CURATED` to `CULTURAL_INSTITUTIONS` and add a line for StockSnap here (we might also want to sort these alphabetically):\r\n\r\nhttps://github.com/WordPress/openverse-api/blob/9d0d724651f18cc9f96931e01bea92b8032bd6a0/ingestion_server/ingestion_server/authority.py#L37-L53\r\n\r\n\r\n\r\n## Alternatives\r\n<!-- Describe any alternative solutions or features you have considered. How is this feature better? -->\r\n\r\n## Additional context\r\n<!-- Add any other context about the feature here; or delete the section entirely. -->\r\n\r\n## Implementation\r\n<!-- Replace the [ ] with [x] to check the box. -->\r\n- [ ] \ud83d\ude4b I would be interested in implementing this feature.\r\n\n", "before_files": [{"content": "from enum import Enum, auto\n\n\n\"\"\"\nAuthority is a ranking from 0 to 100 (with 0 being least authoritative)\nindicating the pedigree of an image. Some examples of things that could impact\nauthority:\n- The reputation of the website that posted an image\n- The popularity of the uploader on a social media site in terms of number of\nfollowers\n- Whether the uploader has uploaded images that have previously been flagged for\ncopyright infringement.\n- etc\n\nThe authority can be set from the catalog layer through the meta_data field\nor through the ingestion layer. As of now, we are only factoring in the\nreputation of the website as a static hand-picked list based on experience\nand search result quality, with the intention to add more sophisticated and\ntailored measures of authority later on.\n\nAlso note that this is just one factor in rankings, and the magnitude of the\nboost can be adjusted at search-time.\n\"\"\"\n\n\nclass AuthorityTypes(Enum):\n CURATED = auto()\n SOCIAL_MEDIA = auto()\n DEFAULT = auto()\n\n\n# We want to boost curated collections where each image has been vetted for\n# cultural significance.\nboost = {\n AuthorityTypes.CURATED: 90,\n AuthorityTypes.SOCIAL_MEDIA: 80,\n AuthorityTypes.DEFAULT: 85,\n}\nauthority_types = {\n \"flickr\": AuthorityTypes.SOCIAL_MEDIA,\n \"behance\": AuthorityTypes.SOCIAL_MEDIA,\n \"thingiverse\": AuthorityTypes.SOCIAL_MEDIA,\n \"sketchfab\": AuthorityTypes.SOCIAL_MEDIA,\n \"deviantart\": AuthorityTypes.SOCIAL_MEDIA,\n \"thorvaldsensmuseum\": AuthorityTypes.CURATED,\n \"svgsilh\": AuthorityTypes.CURATED,\n \"smithsonian\": AuthorityTypes.CURATED,\n \"rijksmuseum\": AuthorityTypes.CURATED,\n \"museumsvictoria\": AuthorityTypes.CURATED,\n \"met\": AuthorityTypes.CURATED,\n \"mccordsmuseum\": AuthorityTypes.CURATED,\n \"digitaltmuseum\": AuthorityTypes.CURATED,\n \"clevelandmuseum\": AuthorityTypes.CURATED,\n \"brooklynmuseum\": AuthorityTypes.CURATED,\n}\n\n\ndef get_authority_boost(source):\n authority_boost = None\n if source in authority_types:\n authority_type = authority_types[source]\n if authority_type in boost:\n authority_boost = boost[authority_type]\n else:\n authority_boost = boost[AuthorityTypes.DEFAULT]\n return authority_boost\n", "path": "ingestion_server/ingestion_server/authority.py"}]}
1,637
615
gh_patches_debug_14673
rasdani/github-patches
git_diff
xonsh__xonsh-427
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Tab completing on empty line with prompt_toolkit kills terminal Arch Linux with xonsh 0.2.2 and using prompt toolkit. Hitting TAB on an empty line crashes the terminal (rxvt-unicode). I can see the traceback for a split second but then the terminal is gone. This doesn't happen using readline. </issue> <code> [start of xonsh/prompt_toolkit_key_bindings.py] 1 """Key bindings for prompt_toolkit xonsh shell.""" 2 import builtins 3 4 from prompt_toolkit.filters import Filter 5 from prompt_toolkit.keys import Keys 6 7 8 class TabShouldInsertIndentFilter(Filter): 9 """ 10 Filter that is intended to check if <Tab> should insert indent instead of 11 starting autocompletion. 12 It basically just checks if there are only whitespaces before the cursor - 13 if so indent should be inserted, otherwise autocompletion. 14 """ 15 def __call__(self, cli): 16 before_cursor = cli.current_buffer.document.current_line_before_cursor 17 18 return bool(not before_cursor or before_cursor.isspace()) 19 20 21 def load_xonsh_bindings(key_bindings_manager): 22 """ 23 Load custom key bindings. 24 """ 25 handle = key_bindings_manager.registry.add_binding 26 env = builtins.__xonsh_env__ 27 28 @handle(Keys.Tab, filter=TabShouldInsertIndentFilter()) 29 def _(event): 30 """ 31 If there are only whitespaces before current cursor position insert 32 indent instead of autocompleting. 33 """ 34 event.cli.current_buffer.insert_text(env['INDENT']) 35 36 37 [end of xonsh/prompt_toolkit_key_bindings.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/xonsh/prompt_toolkit_key_bindings.py b/xonsh/prompt_toolkit_key_bindings.py --- a/xonsh/prompt_toolkit_key_bindings.py +++ b/xonsh/prompt_toolkit_key_bindings.py @@ -15,7 +15,7 @@ def __call__(self, cli): before_cursor = cli.current_buffer.document.current_line_before_cursor - return bool(not before_cursor or before_cursor.isspace()) + return bool(before_cursor.isspace()) def load_xonsh_bindings(key_bindings_manager): @@ -31,6 +31,6 @@ If there are only whitespaces before current cursor position insert indent instead of autocompleting. """ - event.cli.current_buffer.insert_text(env['INDENT']) + event.cli.current_buffer.insert_text(env.get('INDENT'))
{"golden_diff": "diff --git a/xonsh/prompt_toolkit_key_bindings.py b/xonsh/prompt_toolkit_key_bindings.py\n--- a/xonsh/prompt_toolkit_key_bindings.py\n+++ b/xonsh/prompt_toolkit_key_bindings.py\n@@ -15,7 +15,7 @@\n def __call__(self, cli):\n before_cursor = cli.current_buffer.document.current_line_before_cursor\n \n- return bool(not before_cursor or before_cursor.isspace())\n+ return bool(before_cursor.isspace())\n \n \n def load_xonsh_bindings(key_bindings_manager):\n@@ -31,6 +31,6 @@\n If there are only whitespaces before current cursor position insert\n indent instead of autocompleting.\n \"\"\"\n- event.cli.current_buffer.insert_text(env['INDENT'])\n+ event.cli.current_buffer.insert_text(env.get('INDENT'))\n", "issue": "Tab completing on empty line with prompt_toolkit kills terminal\nArch Linux with xonsh 0.2.2 and using prompt toolkit. Hitting TAB on an empty line crashes the terminal (rxvt-unicode). I can see the traceback for a split second but then the terminal is gone. \n\nThis doesn't happen using readline.\n\n", "before_files": [{"content": "\"\"\"Key bindings for prompt_toolkit xonsh shell.\"\"\"\nimport builtins\n\nfrom prompt_toolkit.filters import Filter\nfrom prompt_toolkit.keys import Keys\n\n\nclass TabShouldInsertIndentFilter(Filter):\n \"\"\"\n Filter that is intended to check if <Tab> should insert indent instead of\n starting autocompletion.\n It basically just checks if there are only whitespaces before the cursor -\n if so indent should be inserted, otherwise autocompletion.\n \"\"\"\n def __call__(self, cli):\n before_cursor = cli.current_buffer.document.current_line_before_cursor\n\n return bool(not before_cursor or before_cursor.isspace())\n\n\ndef load_xonsh_bindings(key_bindings_manager):\n \"\"\"\n Load custom key bindings.\n \"\"\"\n handle = key_bindings_manager.registry.add_binding\n env = builtins.__xonsh_env__\n\n @handle(Keys.Tab, filter=TabShouldInsertIndentFilter())\n def _(event):\n \"\"\"\n If there are only whitespaces before current cursor position insert\n indent instead of autocompleting.\n \"\"\"\n event.cli.current_buffer.insert_text(env['INDENT'])\n\n\n", "path": "xonsh/prompt_toolkit_key_bindings.py"}]}
917
183
gh_patches_debug_7426
rasdani/github-patches
git_diff
cltk__cltk-938
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add "Gorman Trees" corpus for Greek https://github.com/perseids-publications/gorman-trees = https://perseids-publications.github.io/gorman-trees/ ~500,000 tokens of parsed Ancient Greek. </issue> <code> [start of cltk/corpus/greek/corpora.py] 1 """Greek language corpora available for download or loading locally. 2 All remote corpora hosted by github on the cltk organization account, eg: 3 'http://github.com/cltk' + name 4 """ 5 6 GREEK_CORPORA = [ 7 {'name': 'greek_software_tlgu', 8 'origin': 'https://github.com/cltk/greek_software_tlgu.git', 9 'location': 'remote', 10 'type': 'software'}, 11 {'encoding': 'utf-8', 12 'markup': 'tei_xml', 13 'origin': 'https://github.com/cltk/greek_text_perseus.git', 14 'name': 'greek_text_perseus', 15 'location': 'remote', 16 'type': 'text'}, 17 {'encoding': 'latin-1', 18 'markup': 'beta_code', 19 'origin': None, 20 'name': 'phi7', 21 'location': 'local', 22 'type': 'text'}, 23 {'encoding': 'latin-1', 24 'markup': 'beta_code', 25 'name': 'tlg', 26 'origin': None, 27 'location': 'local', 28 'type': 'text'}, 29 {'encoding': 'utf-8', 30 'markup': 'plaintext', 31 'name': 'greek_proper_names_cltk', 32 'origin': 'https://github.com/cltk/greek_proper_names_cltk.git', 33 'location': 'remote', 34 'type': 'lexicon'}, 35 {'name': 'greek_models_cltk', 36 'origin': 'https://github.com/cltk/greek_models_cltk.git', 37 'location': 'remote', 38 'type': 'model'}, 39 {'encoding': 'utf-8', 40 'markup': 'xml', 41 'origin': 'https://github.com/cltk/greek_treebank_perseus.git', 42 'name': 'greek_treebank_perseus', 43 'location': 'remote', 44 'type': 'treebank'}, 45 {'encoding': 'xml', 46 'markup': 'plaintext', 47 'origin': 'https://github.com/cltk/greek_lexica_perseus.git', 48 'name': 'greek_lexica_perseus', 49 'location': 'remote', 50 'type': 'lexicon'}, 51 {'encoding': 'utf-8', 52 'markup': 'plaintext', 53 'origin': 'https://github.com/cltk/greek_training_set_sentence_cltk.git', 54 'name': 'greek_training_set_sentence_cltk', 55 'location': 'remote', 56 'type': 'training_set'}, 57 {'name': 'greek_word2vec_cltk', 58 'origin': 'https://github.com/cltk/greek_word2vec_cltk.git', 59 'location': 'remote', 60 'type': 'model'}, 61 {'name': 'greek_text_lacus_curtius', 62 'origin': 'https://github.com/cltk/greek_text_lacus_curtius.git', 63 'location': 'remote', 64 'type': 'text'}, 65 {'name': 'greek_text_first1kgreek', 66 'origin': 'https://github.com/cltk/First1KGreek', 67 'location': 'remote', 68 'type': 'text'}, 69 {'name': 'greek_text_tesserae', 70 'encoding': 'utf-8', 71 'markup': 'plaintext', #modified plaintext with Tesserae-style citations 72 'origin': 'https://github.com/cltk/greek_text_tesserae.git', 73 'location': 'remote', 74 'type': 'text'}, 75 ] 76 [end of cltk/corpus/greek/corpora.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cltk/corpus/greek/corpora.py b/cltk/corpus/greek/corpora.py --- a/cltk/corpus/greek/corpora.py +++ b/cltk/corpus/greek/corpora.py @@ -42,6 +42,12 @@ 'name': 'greek_treebank_perseus', 'location': 'remote', 'type': 'treebank'}, + {'encoding': 'utf-8', + 'markup': 'xml', + 'origin': 'https://github.com/vgorman1/Greek-Dependency-Trees.git', + 'name': 'greek_treebank_gorman', + 'location': 'remote', + 'type': 'treebank'}, {'encoding': 'xml', 'markup': 'plaintext', 'origin': 'https://github.com/cltk/greek_lexica_perseus.git',
{"golden_diff": "diff --git a/cltk/corpus/greek/corpora.py b/cltk/corpus/greek/corpora.py\n--- a/cltk/corpus/greek/corpora.py\n+++ b/cltk/corpus/greek/corpora.py\n@@ -42,6 +42,12 @@\n 'name': 'greek_treebank_perseus',\n 'location': 'remote',\n 'type': 'treebank'},\n+ {'encoding': 'utf-8',\n+ 'markup': 'xml',\n+ 'origin': 'https://github.com/vgorman1/Greek-Dependency-Trees.git',\n+ 'name': 'greek_treebank_gorman',\n+ 'location': 'remote',\n+ 'type': 'treebank'},\n {'encoding': 'xml',\n 'markup': 'plaintext',\n 'origin': 'https://github.com/cltk/greek_lexica_perseus.git',\n", "issue": "Add \"Gorman Trees\" corpus for Greek\nhttps://github.com/perseids-publications/gorman-trees = https://perseids-publications.github.io/gorman-trees/\r\n\r\n~500,000 tokens of parsed Ancient Greek.\n", "before_files": [{"content": "\"\"\"Greek language corpora available for download or loading locally.\nAll remote corpora hosted by github on the cltk organization account, eg:\n'http://github.com/cltk' + name\n\"\"\"\n\nGREEK_CORPORA = [\n {'name': 'greek_software_tlgu',\n 'origin': 'https://github.com/cltk/greek_software_tlgu.git',\n 'location': 'remote',\n 'type': 'software'},\n {'encoding': 'utf-8',\n 'markup': 'tei_xml',\n 'origin': 'https://github.com/cltk/greek_text_perseus.git',\n 'name': 'greek_text_perseus',\n 'location': 'remote',\n 'type': 'text'},\n {'encoding': 'latin-1',\n 'markup': 'beta_code',\n 'origin': None,\n 'name': 'phi7',\n 'location': 'local',\n 'type': 'text'},\n {'encoding': 'latin-1',\n 'markup': 'beta_code',\n 'name': 'tlg',\n 'origin': None,\n 'location': 'local',\n 'type': 'text'},\n {'encoding': 'utf-8',\n 'markup': 'plaintext',\n 'name': 'greek_proper_names_cltk',\n 'origin': 'https://github.com/cltk/greek_proper_names_cltk.git',\n 'location': 'remote',\n 'type': 'lexicon'},\n {'name': 'greek_models_cltk',\n 'origin': 'https://github.com/cltk/greek_models_cltk.git',\n 'location': 'remote',\n 'type': 'model'},\n {'encoding': 'utf-8',\n 'markup': 'xml',\n 'origin': 'https://github.com/cltk/greek_treebank_perseus.git',\n 'name': 'greek_treebank_perseus',\n 'location': 'remote',\n 'type': 'treebank'},\n {'encoding': 'xml',\n 'markup': 'plaintext',\n 'origin': 'https://github.com/cltk/greek_lexica_perseus.git',\n 'name': 'greek_lexica_perseus',\n 'location': 'remote',\n 'type': 'lexicon'},\n {'encoding': 'utf-8',\n 'markup': 'plaintext',\n 'origin': 'https://github.com/cltk/greek_training_set_sentence_cltk.git',\n 'name': 'greek_training_set_sentence_cltk',\n 'location': 'remote',\n 'type': 'training_set'},\n {'name': 'greek_word2vec_cltk',\n 'origin': 'https://github.com/cltk/greek_word2vec_cltk.git',\n 'location': 'remote',\n 'type': 'model'},\n {'name': 'greek_text_lacus_curtius',\n 'origin': 'https://github.com/cltk/greek_text_lacus_curtius.git',\n 'location': 'remote',\n 'type': 'text'},\n {'name': 'greek_text_first1kgreek',\n 'origin': 'https://github.com/cltk/First1KGreek',\n 'location': 'remote',\n 'type': 'text'},\n {'name': 'greek_text_tesserae',\n 'encoding': 'utf-8',\n 'markup': 'plaintext', #modified plaintext with Tesserae-style citations\n 'origin': 'https://github.com/cltk/greek_text_tesserae.git',\n 'location': 'remote',\n 'type': 'text'},\n]\n", "path": "cltk/corpus/greek/corpora.py"}]}
1,500
202
gh_patches_debug_11190
rasdani/github-patches
git_diff
freedomofpress__securedrop-4467
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add list of supported languages to the metadata API Whether or not a SecureDrop is available in one of the [supported languages](https://docs.securedrop.org/en/latest/admin.html#configuring-localization-for-the-source-interface-and-the-journalist-interface) is public information enumerated at the bottom of the source interface, but it's not currently exposed in the metadata API. Returning the list of supported languages along with the other instance metadata would be useful, including for the envisioned source interface scanner integrated with securedrop.org. # User Stories As a translator, I want to know which languages are currently used by SecureDrop users, so I know if and where my translations have real world impact. As a SecureDrop support team member, I want to know at a glance whether news organizations have configured supported languages, so I can point out to them if/when translations relevant to them are available. As a SecureDrop.org visitor, I'd like to know if a SecureDrop instance is available in the language I speak, so that I know if I can navigate it with confidence, and that it is likely to accept submissions in my language. </issue> <code> [start of securedrop/source_app/api.py] 1 import json 2 import platform 3 4 from flask import Blueprint, make_response 5 6 import version 7 8 9 def make_blueprint(config): 10 view = Blueprint('api', __name__) 11 12 @view.route('/metadata') 13 def metadata(): 14 meta = {'gpg_fpr': config.JOURNALIST_KEY, 15 'sd_version': version.__version__, 16 'server_os': platform.linux_distribution()[1], 17 } 18 resp = make_response(json.dumps(meta)) 19 resp.headers['Content-Type'] = 'application/json' 20 return resp 21 22 return view 23 [end of securedrop/source_app/api.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/securedrop/source_app/api.py b/securedrop/source_app/api.py --- a/securedrop/source_app/api.py +++ b/securedrop/source_app/api.py @@ -11,10 +11,12 @@ @view.route('/metadata') def metadata(): - meta = {'gpg_fpr': config.JOURNALIST_KEY, - 'sd_version': version.__version__, - 'server_os': platform.linux_distribution()[1], - } + meta = { + 'gpg_fpr': config.JOURNALIST_KEY, + 'sd_version': version.__version__, + 'server_os': platform.linux_distribution()[1], + 'supported_languages': config.SUPPORTED_LOCALES + } resp = make_response(json.dumps(meta)) resp.headers['Content-Type'] = 'application/json' return resp
{"golden_diff": "diff --git a/securedrop/source_app/api.py b/securedrop/source_app/api.py\n--- a/securedrop/source_app/api.py\n+++ b/securedrop/source_app/api.py\n@@ -11,10 +11,12 @@\n \n @view.route('/metadata')\n def metadata():\n- meta = {'gpg_fpr': config.JOURNALIST_KEY,\n- 'sd_version': version.__version__,\n- 'server_os': platform.linux_distribution()[1],\n- }\n+ meta = {\n+ 'gpg_fpr': config.JOURNALIST_KEY,\n+ 'sd_version': version.__version__,\n+ 'server_os': platform.linux_distribution()[1],\n+ 'supported_languages': config.SUPPORTED_LOCALES\n+ }\n resp = make_response(json.dumps(meta))\n resp.headers['Content-Type'] = 'application/json'\n return resp\n", "issue": "Add list of supported languages to the metadata API\nWhether or not a SecureDrop is available in one of the [supported languages](https://docs.securedrop.org/en/latest/admin.html#configuring-localization-for-the-source-interface-and-the-journalist-interface) is public information enumerated at the bottom of the source interface, but it's not currently exposed in the metadata API.\r\n\r\nReturning the list of supported languages along with the other instance metadata would be useful, including for the envisioned source interface scanner integrated with securedrop.org. \r\n\r\n# User Stories\r\n\r\nAs a translator, I want to know which languages are currently used by SecureDrop users, so I know if and where my translations have real world impact.\r\n\r\nAs a SecureDrop support team member, I want to know at a glance whether news organizations have configured supported languages, so I can point out to them if/when translations relevant to them are available.\r\n\r\nAs a SecureDrop.org visitor, I'd like to know if a SecureDrop instance is available in the language I speak, so that I know if I can navigate it with confidence, and that it is likely to accept submissions in my language.\n", "before_files": [{"content": "import json\nimport platform\n\nfrom flask import Blueprint, make_response\n\nimport version\n\n\ndef make_blueprint(config):\n view = Blueprint('api', __name__)\n\n @view.route('/metadata')\n def metadata():\n meta = {'gpg_fpr': config.JOURNALIST_KEY,\n 'sd_version': version.__version__,\n 'server_os': platform.linux_distribution()[1],\n }\n resp = make_response(json.dumps(meta))\n resp.headers['Content-Type'] = 'application/json'\n return resp\n\n return view\n", "path": "securedrop/source_app/api.py"}]}
921
192
gh_patches_debug_17916
rasdani/github-patches
git_diff
pex-tool__pex-556
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 1.4.7 docket of user-facing issues fixed: + #550: Pex 1.4.6 builds a failing pex </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = '1.4.6' 5 6 # Versions 34.0.0 through 35.0.2 (last pre-36.0.0) de-vendored dependencies which causes problems 7 # for pex code so we exclude that range. 8 SETUPTOOLS_REQUIREMENT = 'setuptools>=20.3,<41,!=34.*,!=35.*' 9 10 WHEEL_REQUIREMENT = 'wheel>=0.26.0,<0.32' 11 [end of pex/version.py] [start of setup.py] 1 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 import os 5 6 from setuptools import setup 7 8 with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as fp: 9 LONG_DESCRIPTION = fp.read() + '\n' 10 11 with open(os.path.join(os.path.dirname(__file__), 'CHANGES.rst')) as fp: 12 LONG_DESCRIPTION += fp.read() 13 14 15 # This seems to be a fairly standard version file pattern. 16 # 17 # Populates the following variables: 18 # __version__ 19 # __setuptools_requirement 20 # __wheel_requirement 21 __version__ = '' 22 version_py_file = os.path.join(os.path.dirname(__file__), 'pex', 'version.py') 23 with open(version_py_file) as version_py: 24 exec(compile(version_py.read(), version_py_file, 'exec')) 25 26 27 setup( 28 name = 'pex', 29 version = __version__, 30 description = "The PEX packaging toolchain.", 31 long_description = LONG_DESCRIPTION, 32 url = 'https://github.com/pantsbuild/pex', 33 license = 'Apache License, Version 2.0', 34 zip_safe = True, 35 classifiers = [ 36 'Intended Audience :: Developers', 37 'License :: OSI Approved :: Apache Software License', 38 'Operating System :: Unix', 39 'Operating System :: POSIX :: Linux', 40 'Operating System :: MacOS :: MacOS X', 41 'Programming Language :: Python', 42 'Programming Language :: Python :: 2', 43 'Programming Language :: Python :: 2.7', 44 'Programming Language :: Python :: 3', 45 'Programming Language :: Python :: 3.3', 46 'Programming Language :: Python :: 3.4', 47 'Programming Language :: Python :: 3.5', 48 'Programming Language :: Python :: 3.6', 49 ], 50 packages = [ 51 'pex', 52 'pex.bin', 53 'pex.commands', 54 ], 55 install_requires = [ 56 SETUPTOOLS_REQUIREMENT, 57 WHEEL_REQUIREMENT, 58 ], 59 extras_require={ 60 # For improved subprocess robustness under python2.7. 61 'subprocess': ['subprocess32>=3.2.7'], 62 # For improved requirement resolution and fetching robustness. 63 'requests': ['requests>=2.8.14'], 64 # For improved requirement resolution and fetching performance. 65 'cachecontrol': ['CacheControl>=0.12.3'], 66 }, 67 tests_require = [ 68 'mock', 69 'twitter.common.contextutil>=0.3.1,<0.4.0', 70 'twitter.common.lang>=0.3.1,<0.4.0', 71 'twitter.common.testing>=0.3.1,<0.4.0', 72 'twitter.common.dirutil>=0.3.1,<0.4.0', 73 'pytest', 74 ], 75 entry_points = { 76 'distutils.commands': [ 77 'bdist_pex = pex.commands.bdist_pex:bdist_pex', 78 ], 79 'console_scripts': [ 80 'pex = pex.bin.pex:main', 81 ], 82 }, 83 ) 84 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,7 +1,7 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = '1.4.6' +__version__ = '1.4.7' # Versions 34.0.0 through 35.0.2 (last pre-36.0.0) de-vendored dependencies which causes problems # for pex code so we exclude that range. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -29,6 +29,7 @@ version = __version__, description = "The PEX packaging toolchain.", long_description = LONG_DESCRIPTION, + long_description_content_type="text/x-rst", url = 'https://github.com/pantsbuild/pex', license = 'Apache License, Version 2.0', zip_safe = True,
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,7 +1,7 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '1.4.6'\n+__version__ = '1.4.7'\n \n # Versions 34.0.0 through 35.0.2 (last pre-36.0.0) de-vendored dependencies which causes problems\n # for pex code so we exclude that range.\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -29,6 +29,7 @@\n version = __version__,\n description = \"The PEX packaging toolchain.\",\n long_description = LONG_DESCRIPTION,\n+ long_description_content_type=\"text/x-rst\",\n url = 'https://github.com/pantsbuild/pex',\n license = 'Apache License, Version 2.0',\n zip_safe = True,\n", "issue": "Release 1.4.7\ndocket of user-facing issues fixed:\r\n+ #550: Pex 1.4.6 builds a failing pex\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.4.6'\n\n# Versions 34.0.0 through 35.0.2 (last pre-36.0.0) de-vendored dependencies which causes problems\n# for pex code so we exclude that range.\nSETUPTOOLS_REQUIREMENT = 'setuptools>=20.3,<41,!=34.*,!=35.*'\n\nWHEEL_REQUIREMENT = 'wheel>=0.26.0,<0.32'\n", "path": "pex/version.py"}, {"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport os\n\nfrom setuptools import setup\n\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as fp:\n LONG_DESCRIPTION = fp.read() + '\\n'\n\nwith open(os.path.join(os.path.dirname(__file__), 'CHANGES.rst')) as fp:\n LONG_DESCRIPTION += fp.read()\n\n\n# This seems to be a fairly standard version file pattern.\n#\n# Populates the following variables:\n# __version__\n# __setuptools_requirement\n# __wheel_requirement\n__version__ = ''\nversion_py_file = os.path.join(os.path.dirname(__file__), 'pex', 'version.py')\nwith open(version_py_file) as version_py:\n exec(compile(version_py.read(), version_py_file, 'exec'))\n\n\nsetup(\n name = 'pex',\n version = __version__,\n description = \"The PEX packaging toolchain.\",\n long_description = LONG_DESCRIPTION,\n url = 'https://github.com/pantsbuild/pex',\n license = 'Apache License, Version 2.0',\n zip_safe = True,\n classifiers = [\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: Unix',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n packages = [\n 'pex',\n 'pex.bin',\n 'pex.commands',\n ],\n install_requires = [\n SETUPTOOLS_REQUIREMENT,\n WHEEL_REQUIREMENT,\n ],\n extras_require={\n # For improved subprocess robustness under python2.7.\n 'subprocess': ['subprocess32>=3.2.7'],\n # For improved requirement resolution and fetching robustness.\n 'requests': ['requests>=2.8.14'],\n # For improved requirement resolution and fetching performance.\n 'cachecontrol': ['CacheControl>=0.12.3'],\n },\n tests_require = [\n 'mock',\n 'twitter.common.contextutil>=0.3.1,<0.4.0',\n 'twitter.common.lang>=0.3.1,<0.4.0',\n 'twitter.common.testing>=0.3.1,<0.4.0',\n 'twitter.common.dirutil>=0.3.1,<0.4.0',\n 'pytest',\n ],\n entry_points = {\n 'distutils.commands': [\n 'bdist_pex = pex.commands.bdist_pex:bdist_pex',\n ],\n 'console_scripts': [\n 'pex = pex.bin.pex:main',\n ],\n },\n)\n", "path": "setup.py"}]}
1,573
245
gh_patches_debug_2
rasdani/github-patches
git_diff
CTFd__CTFd-598
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Docker startup getting stuck on mysqladmin ping **Environment**: - CTFd Version/Commit: ctfd/ctfd:latest from Docker hub (17 days old) - Operating System: Amazon Linux AMI 2017.09.j x86_64 ECS HVM GP2 - Web Browser and Version: N/A **What happened?** Trying to setup CTFd with AWS ECS and RDS Aurora. If I don't set the DATABASE_URL env variable, it works fine and starts. If I do set the DATABASE_URL to mysql+pymysql://ctfd:<MYPASSWORD>@ctfd.<resource-id>i.eu-west-1.rds.amazonaws.com/ctfd I get stuck on docker-entrypoint.sh:7 `while ! mysqladmin ping -h db --silent; do` **What did you expect to happen?** That the ping should succeed and startup continue **How to reproduce your issue** Create an ECS task with ctfd/ctfd as image source, set env variable SECRET_KEY and DATABASE_URL. Start container. I have made sure the container can access the database by running `docker exec container-id mysql -h ctfd.<resource-id>.eu-west-1.rds.amazonaws.com -p<SECRET PASSWORD>` which works. **Any associated stack traces or error logs** Just stuck on "Waiting on MySQL" My question is basically: am I doing something wrong and should somehow make that "db" resolve to the database or is the script incorrect and should take the value of DATABASE_URL into account? </issue> <code> [start of wsgi.py] 1 from CTFd import create_app 2 3 app = create_app() 4 [end of wsgi.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wsgi.py b/wsgi.py deleted file mode 100644 --- a/wsgi.py +++ /dev/null @@ -1,3 +0,0 @@ -from CTFd import create_app - -app = create_app()
{"golden_diff": "diff --git a/wsgi.py b/wsgi.py\ndeleted file mode 100644\n--- a/wsgi.py\n+++ /dev/null\n@@ -1,3 +0,0 @@\n-from CTFd import create_app\n-\n-app = create_app()\n", "issue": "Docker startup getting stuck on mysqladmin ping\n**Environment**:\r\n\r\n - CTFd Version/Commit: ctfd/ctfd:latest from Docker hub (17 days old)\r\n - Operating System: Amazon Linux AMI 2017.09.j x86_64 ECS HVM GP2\r\n - Web Browser and Version: N/A\r\n\r\n**What happened?**\r\n\r\nTrying to setup CTFd with AWS ECS and RDS Aurora.\r\nIf I don't set the DATABASE_URL env variable, it works fine and starts.\r\nIf I do set the DATABASE_URL to mysql+pymysql://ctfd:<MYPASSWORD>@ctfd.<resource-id>i.eu-west-1.rds.amazonaws.com/ctfd I get stuck on docker-entrypoint.sh:7 `while ! mysqladmin ping -h db --silent; do`\r\n\r\n**What did you expect to happen?**\r\n\r\nThat the ping should succeed and startup continue\r\n\r\n**How to reproduce your issue**\r\n\r\nCreate an ECS task with ctfd/ctfd as image source, set env variable SECRET_KEY and DATABASE_URL. Start container.\r\n\r\nI have made sure the container can access the database by running `docker exec container-id mysql -h ctfd.<resource-id>.eu-west-1.rds.amazonaws.com -p<SECRET PASSWORD>` which works.\r\n\r\n**Any associated stack traces or error logs**\r\n\r\nJust stuck on \"Waiting on MySQL\"\r\n\r\n\r\nMy question is basically: am I doing something wrong and should somehow make that \"db\" resolve to the database or is the script incorrect and should take the value of DATABASE_URL into account?\n", "before_files": [{"content": "from CTFd import create_app\n\napp = create_app()\n", "path": "wsgi.py"}]}
880
59
gh_patches_debug_48613
rasdani/github-patches
git_diff
OpenEnergyPlatform__oeplatform-1475
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Scenario bundles: Output datasets render weirdly ## Description of the issue I added an output dataset for the WAM scenario for this factsheet: https://openenergy-platform.org/scenario-bundles/id/95a65aca-6915-b64a-cac7-3831c12885b4 ![grafik](https://github.com/OpenEnergyPlatform/oeplatform/assets/36844029/fa43133d-3cd7-4a22-80a0-384cbf6514e3) It reads wrongly and shows more than only the title of the dataset, i.e. it should only be rendered as: Rahmendaten für den Projektionsbericht 2023 (Datentabelle) - as it does for the WEM scenario (this was already existing before the new release). ## Steps to Reproduce 1. Add a dataset to a scenario 2. 3. ## Ideas of solution Describe possible ideas for solution and evaluate advantages and disadvantages. ## Context and Environment * Version used: * Operating system: * Environment setup and (python) version: ## Workflow checklist - [ ] I am aware of the workflow in [CONTRIBUTING.md](https://github.com/OpenEnergyPlatform/oeplatform/blob/develop/CONTRIBUTING.md) </issue> <code> [start of oeplatform/__init__.py] 1 __version__ = "0.16.1" 2 [end of oeplatform/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/oeplatform/__init__.py b/oeplatform/__init__.py --- a/oeplatform/__init__.py +++ b/oeplatform/__init__.py @@ -1 +1 @@ -__version__ = "0.16.1" +__version__ = "0.16.2"
{"golden_diff": "diff --git a/oeplatform/__init__.py b/oeplatform/__init__.py\n--- a/oeplatform/__init__.py\n+++ b/oeplatform/__init__.py\n@@ -1 +1 @@\n-__version__ = \"0.16.1\"\n+__version__ = \"0.16.2\"\n", "issue": "Scenario bundles: Output datasets render weirdly\n## Description of the issue\r\n\r\nI added an output dataset for the WAM scenario for this factsheet: https://openenergy-platform.org/scenario-bundles/id/95a65aca-6915-b64a-cac7-3831c12885b4\r\n\r\n![grafik](https://github.com/OpenEnergyPlatform/oeplatform/assets/36844029/fa43133d-3cd7-4a22-80a0-384cbf6514e3)\r\n\r\nIt reads wrongly and shows more than only the title of the dataset, i.e. it should only be rendered as: Rahmendaten f\u00fcr den Projektionsbericht 2023 (Datentabelle) - as it does for the WEM scenario (this was already existing before the new release). \r\n\r\n\r\n## Steps to Reproduce\r\n1. Add a dataset to a scenario\r\n2.\r\n3.\r\n\r\n## Ideas of solution\r\n\r\nDescribe possible ideas for solution and evaluate advantages and disadvantages.\r\n\r\n## Context and Environment\r\n* Version used: \r\n* Operating system: \r\n* Environment setup and (python) version: \r\n\r\n## Workflow checklist\r\n- [ ] I am aware of the workflow in [CONTRIBUTING.md](https://github.com/OpenEnergyPlatform/oeplatform/blob/develop/CONTRIBUTING.md)\r\n\n", "before_files": [{"content": "__version__ = \"0.16.1\"\n", "path": "oeplatform/__init__.py"}]}
852
73